diff --git a/-NE4T4oBgHgl3EQfDgur/vector_store/index.pkl b/-NE4T4oBgHgl3EQfDgur/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f0c0c7f9657ff60d7aedfb5822a6643e03b9a0c3 --- /dev/null +++ b/-NE4T4oBgHgl3EQfDgur/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8e7a4be19f5e1b11a51dc84b827b981133ddd3dc59b9565445c29e34e1f019e +size 137503 diff --git a/-tA0T4oBgHgl3EQfPP-n/content/2301.02173v1.pdf b/-tA0T4oBgHgl3EQfPP-n/content/2301.02173v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..deddc9d9428bd819cff72454c6ef893f7276ece1 --- /dev/null +++ b/-tA0T4oBgHgl3EQfPP-n/content/2301.02173v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628da2ce84db6e415691cbe032806131b8572444e08a2e27d29198ee7b6624be +size 620979 diff --git a/-tA0T4oBgHgl3EQfPP-n/vector_store/index.faiss b/-tA0T4oBgHgl3EQfPP-n/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a98b8a0d15e17067a93da97a76a2351eef1941ba --- /dev/null +++ b/-tA0T4oBgHgl3EQfPP-n/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a367d47ef95a5989f605f00153dedb427b003a18a2096a2838ad5f505a204d4e +size 2752557 diff --git a/-tA0T4oBgHgl3EQfPP-n/vector_store/index.pkl b/-tA0T4oBgHgl3EQfPP-n/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e99fe298b6578769b30c03628f4e560a3760057d --- /dev/null +++ b/-tA0T4oBgHgl3EQfPP-n/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de0b6fb1604362d11d467139a26d93b2398a10ff5a5c9d4de697e173bf53f56 +size 103400 diff --git a/-tFST4oBgHgl3EQfcjgx/vector_store/index.pkl b/-tFST4oBgHgl3EQfcjgx/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f38b253712de8842fb1a500a23bac25dfb95aafb --- /dev/null +++ b/-tFST4oBgHgl3EQfcjgx/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04580e8ea87bb1f2c9f5dbdc991e58e17e610135ca32c31c867b4cfd97f37cc5 +size 166878 diff --git a/.gitattributes b/.gitattributes index 2ae48f9c398030a6d8377bab68b85c6e251b3863..72da24f5c9303debf5b02ad8c6e2e7ca5c4a3dc8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -9267,3 +9267,66 @@ gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf filter=lfs diff=lfs merge=lfs -tex VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf filter=lfs diff=lfs merge=lfs -text +0tFIT4oBgHgl3EQf3Cv8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_NFJT4oBgHgl3EQfqSwP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +xNAzT4oBgHgl3EQf7v4z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_NFQT4oBgHgl3EQf7jbZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +YtAyT4oBgHgl3EQf9fqe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +t9AyT4oBgHgl3EQfaPc2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +U9FLT4oBgHgl3EQfRi9C/content/2301.12037v1.pdf filter=lfs diff=lfs merge=lfs -text +ktAyT4oBgHgl3EQf_fpg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +gtE2T4oBgHgl3EQfyQif/content/2301.04119v1.pdf filter=lfs diff=lfs merge=lfs -text +etFJT4oBgHgl3EQfTSwk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ONAyT4oBgHgl3EQfUfdD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ONAyT4oBgHgl3EQfUfdD/content/2301.00125v1.pdf filter=lfs diff=lfs merge=lfs -text +WdFKT4oBgHgl3EQfni5N/content/2301.11862v1.pdf filter=lfs diff=lfs merge=lfs -text +etFJT4oBgHgl3EQfTSwk/content/2301.11503v1.pdf filter=lfs diff=lfs merge=lfs -text +ItAzT4oBgHgl3EQfjv0x/content/2301.01520v1.pdf filter=lfs diff=lfs merge=lfs -text +ItAzT4oBgHgl3EQfjv0x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_NFJT4oBgHgl3EQfqSwP/content/2301.11603v1.pdf filter=lfs diff=lfs merge=lfs -text +3tFAT4oBgHgl3EQfERy2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +PNE3T4oBgHgl3EQfCQnQ/content/2301.04275v1.pdf filter=lfs diff=lfs merge=lfs -text +W9AyT4oBgHgl3EQfvPl0/content/2301.00627v1.pdf filter=lfs diff=lfs merge=lfs -text +r9E0T4oBgHgl3EQfawB_/content/2301.02339v1.pdf filter=lfs diff=lfs merge=lfs -text +O9FIT4oBgHgl3EQfeSvC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +-tA0T4oBgHgl3EQfPP-n/content/2301.02173v1.pdf filter=lfs diff=lfs merge=lfs -text +I9AyT4oBgHgl3EQf5_o0/content/2301.00813v1.pdf filter=lfs diff=lfs merge=lfs -text +0tE2T4oBgHgl3EQf4wij/content/2301.04184v1.pdf filter=lfs diff=lfs merge=lfs -text +P9A0T4oBgHgl3EQfDP-k/content/2301.02001v1.pdf filter=lfs diff=lfs merge=lfs -text +ktE2T4oBgHgl3EQfIga_/content/2301.03682v1.pdf filter=lfs diff=lfs merge=lfs -text +gtE2T4oBgHgl3EQfyQif/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +HdFLT4oBgHgl3EQfIC9G/content/2301.11998v1.pdf filter=lfs diff=lfs merge=lfs -text +WdFKT4oBgHgl3EQfni5N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +oNE3T4oBgHgl3EQfLAnj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ktE2T4oBgHgl3EQfIga_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +xNAzT4oBgHgl3EQf7v4z/content/2301.01893v1.pdf filter=lfs diff=lfs merge=lfs -text +r9E0T4oBgHgl3EQfawB_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +WdFLT4oBgHgl3EQfTC8I/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +PtE4T4oBgHgl3EQf-Q74/content/2301.05363v1.pdf filter=lfs diff=lfs merge=lfs -text +PNAzT4oBgHgl3EQflP3v/content/2301.01546v1.pdf filter=lfs diff=lfs merge=lfs -text +xtFKT4oBgHgl3EQf6i4E/content/2301.11941v1.pdf filter=lfs diff=lfs merge=lfs -text +-tA0T4oBgHgl3EQfPP-n/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +U9E2T4oBgHgl3EQfDAYv/content/2301.03619v1.pdf filter=lfs diff=lfs merge=lfs -text +_NAyT4oBgHgl3EQf3vlI/content/2301.00773v1.pdf filter=lfs diff=lfs merge=lfs -text +rNE4T4oBgHgl3EQfVAw3/content/2301.05020v1.pdf filter=lfs diff=lfs merge=lfs -text +k9AyT4oBgHgl3EQfk_gh/content/2301.00442v1.pdf filter=lfs diff=lfs merge=lfs -text +QdAzT4oBgHgl3EQfIvvq/content/2301.01069v1.pdf filter=lfs diff=lfs merge=lfs -text +LtFRT4oBgHgl3EQfFDdF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +8NE3T4oBgHgl3EQfqQrl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +TNE5T4oBgHgl3EQfAQ5w/content/2301.05377v1.pdf filter=lfs diff=lfs merge=lfs -text +iNAzT4oBgHgl3EQfM_sr/content/2301.01140v1.pdf filter=lfs diff=lfs merge=lfs -text +f9E0T4oBgHgl3EQfXQDJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +w9A0T4oBgHgl3EQfMf9w/content/2301.02133v1.pdf filter=lfs diff=lfs merge=lfs -text +0NE0T4oBgHgl3EQfuAEL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5NE0T4oBgHgl3EQfegCm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +w9A0T4oBgHgl3EQfMf9w/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +HdFLT4oBgHgl3EQfIC9G/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +uNAzT4oBgHgl3EQfc_zP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +odAzT4oBgHgl3EQfOfsX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +D9AyT4oBgHgl3EQfevgJ/content/2301.00325v1.pdf filter=lfs diff=lfs merge=lfs -text +KNE1T4oBgHgl3EQfYgRo/content/2301.03139v1.pdf filter=lfs diff=lfs merge=lfs -text +rNE4T4oBgHgl3EQfVAw3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +k9AyT4oBgHgl3EQfk_gh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +O9FIT4oBgHgl3EQfeSvC/content/2301.11274v1.pdf filter=lfs diff=lfs merge=lfs -text +QdAyT4oBgHgl3EQftvmX/content/2301.00601v1.pdf filter=lfs diff=lfs merge=lfs -text +QdAzT4oBgHgl3EQfIvvq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text diff --git a/0NE0T4oBgHgl3EQfuAEL/vector_store/index.faiss b/0NE0T4oBgHgl3EQfuAEL/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..e2559843bbe3c5dfb9345a9926919a03c7c39348 --- /dev/null +++ b/0NE0T4oBgHgl3EQfuAEL/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:009f97ff67732775b5aeb68eeedde2637b7c6b7dfe75117ef084e2a37f58a93a +size 5570605 diff --git a/0tE0T4oBgHgl3EQfuAGv/vector_store/index.pkl b/0tE0T4oBgHgl3EQfuAGv/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..103a590323ebe5ab39ccc53499e1f4a2a488464b --- /dev/null +++ b/0tE0T4oBgHgl3EQfuAGv/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d09e241eb60661491dcc561b45a011f016bd3bedc90edad0dee5bbd2ebf7460d +size 100168 diff --git a/0tE2T4oBgHgl3EQf4wij/content/2301.04184v1.pdf b/0tE2T4oBgHgl3EQf4wij/content/2301.04184v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eeab8b80aa88e9232f72aa6808b0df3f580d7154 --- /dev/null +++ b/0tE2T4oBgHgl3EQf4wij/content/2301.04184v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a165dfdecbcc98ce41688f6e3998ede034ce567709f9bf683387a96be45f7245 +size 1559969 diff --git a/0tE2T4oBgHgl3EQf4wij/vector_store/index.pkl b/0tE2T4oBgHgl3EQf4wij/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ef810b21eea2338ca8edaeda5aa5f8a67dad77ca --- /dev/null +++ b/0tE2T4oBgHgl3EQf4wij/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a8cd2086ec044026dff8286d8a78d56588d871e355722137df67fa712b2d2fe +size 201997 diff --git a/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.faiss b/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..b9bea0dd9d48bf3f6d601a7cac238473deca357a --- /dev/null +++ b/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7923868aad12d456d1e214c568a3efc91e8a2d4ab029f766506b861a24970ddd +size 4849709 diff --git a/1NAzT4oBgHgl3EQft_01/vector_store/index.pkl b/1NAzT4oBgHgl3EQft_01/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3398f8e24876bb866b2e5ceaa982d24d231afa0a --- /dev/null +++ b/1NAzT4oBgHgl3EQft_01/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51cf52018f1a5681231c2365d9f786a4adcb6c68f35521e53d361153e8ebeede +size 238669 diff --git a/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/2301.00268v1.pdf.txt b/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/2301.00268v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..9ea2371a7509e67566c0c9ee3cd8b7492c96e32e --- /dev/null +++ b/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/2301.00268v1.pdf.txt @@ -0,0 +1,1572 @@ +arXiv:2301.00268v1 [math-ph] 31 Dec 2022 +AUTOCORRELATIONS OF CHARACTERISTIC POLYNOMIALS +FOR THE ALTERNATIVE CIRCULAR UNITARY ENSEMBLE +BRAD RODGERS, HARSHITH SAI VALLABHANENI +Abstract. We find closed formulas for arbitrarily high mixed moments of +characteristic polynomials of the Alternative Circular Unitary Ensemble (ACUE), +as well as closed formulas for the averages of ratios of characteristic polynomials +in this ensemble. A comparison is made to analogous results for the Circular +Unitary Ensemble (CUE). Both moments and ratios are studied via symmetric +function theory and a general formula of Borodin-Olshanski-Strahov. +1. Introduction +In this short note we examine mixed moments and averages of ratios of char- +acteristic polynomials associated with the Alternative Circular Unitary Ensemble +(ACUE). Our main results are a closed formula for arbitrarily high mixed moments +in Theorem 2 and a closed formula for averages of ratios in Theorem 6. The ACUE +refers to a certain random collection of points on the unit circle of the complex plane +whose distribution is meant to mimic the points of the Circular Unitary Ensemble +(CUE) of random matrix theory. Let us use the notation +∆(x1, ..., xN) := +� +1≤j N or L > N these formulas show the moments +for these models differ, despite having a closely related structure. +In fact it is by specializing the following formula for averages of ratios of char- +acteristic polynomials that we derive Theorem 2. +Theorem 4. For N and J positive integers, and v1, ..., vJ complex numbers and +u1, ..., uJ complex numbers which are not 2N-th roots of unity, +EACUE(N) +� �J +j=1 det(1 + vjg) +�J +j=1 det(1 + ujg) +� += +1 +det +� +1 +ui−vj +� det +� +1 +ui − vj +eN(ui, vj) +� +, +(5) +where the determinants on the right hand side are of J×J matrices, over the indices +1 ≤ i, j ≤ J, and +eN(u, v) := 1 − uNvN +1 − u2N . +This formula in turn is a consequence of a general formula introduced by Borodin- +Olshanski-Strahov in [5] for computing the average of ratios of characteristic poly- +nomials associated to what they call Giambelli-compatible point processes. We will +show the ACUE falls into this class of point processes and then specialize their +result; see Theorem 6 below. +Theorem 4 may be compared to an analogous formula for the CUE (see e.g. [24, +Thm. 4.2], [13, Thm. 5.4], or [19, (4.35)]): +Theorem 5. For N and J positive integers, and v1, ..., vJ complex numbers and +u1, ..., uJ complex numbers which do not lie on the unit circle, +ECUE(N) +� �J +j=1 det(1 + vjG) +�J +j=1 det(1 + ujG) +� += +1 +det +� +1 +ui−vj +� det +� +1 +ui − vj +eN(ui, vj) +� +, +where the determinants on the right hand side are of J×J matrices, over the indices +1 ≤ i, j ≤ J, and +eN(u, v) := +� +1 +if |u| < 1, +vN/uN +if |u| > 1. +From Theorem 4, a possible strategy for proving Theorem 2 is evident: we take +appropriately scaled limits, with each ui tending either to 0 or ∞ in order to +recover the average appearing in Theorem 2. Doing so nonetheless involves several +nontrivial determinantal manipulations. +There is at least one alternative strategy for proving Theorems 2 and 4, and +this is to rely on the theory of orthogonal polynomials. This method has been +used to derive similar formulas for moments and averages of ratios of characteristic +polynomials in several random matrix ensembles; see for instance [7, 1, 17] for +moments and [27, 6] for ratios. One difficulty in the orthogonal polynomial method +is that the finitely supported weights which define the ACUE allow for at most a + +AUTOCORRELATIONS FOR THE ACUE +5 +finite collection of monic orthogonal polynomials. It would be interesting to see if +this difficulty can be overcome to give alterative proofs of Theorems 2 or 4. +It is perhaps a little surprising that moments of characteristic polynomials from +the ACUE have a structure related to those from the CUE even for very large +powers. This may ultimately be seen as a consequence of the similarity between +Theorems 4 and 5 for ratios; another purpose of this paper is to provide an expla- +nation of how ratio formulas like Theorem 4 can be used to derive moment formulas +like Theorem 2. It will be evident that the same method could be used to deduce +Theorem 3 from Theorem 5 as well. +We note that formulas for the averages of ratios of characteristic polynomials in +the CUE usually are written in a form involving a sum over ‘swaps’, involving a +slightly different formalism than Theorem 5, – see for instance [10, Prop 2.1], [9, +Cor. 1.2], or [8, Thm. 3]. By use of the functional equation, these formulas can be +deduced from Theorem 5. For instance, the J = 2 case of Theorem 5 entails the +following: for complex numbers α, β, γ, δ with |γ|, |δ| < 1, +ECUE(N) +� +det(1 − α G) det(1 − β G) +det(1 − γ G) det(1 − δ G) +� += βN +δN ECUE(N) +� +det(1 − α G) det(1 − β−1 G) +det(1 − γ G) det(1 − δ−1 G) +� += (1 − βγ)(1 − αδ) +(1 − δγ)(1 − αβ) + (αβ)N (1 − γα−1)(1 − δβ−1) +(1 − α−1β−1)(1 − γδ). +Note that this formula is valid only for |γ|, |δ| < 1. If instead for instance |γ| < 1 +and |δ| > 1, the left hand side would just work out to 1. +By using the functional equation (4) for det(1 + vg) one can derive expressions +of this sort for the ACUE as well. For instance, for complex numbers α, β, γ, δ with +neither γ nor δ equal to 2N-th roots of unity, Theorem 4 reveals, +EACUE(N) +� +det(1 − α g) det(1 − β g) +det(1 − γ g) det(1 − δ g) +� += βN +δN EACUE(N) +� +det(1 − α g) det(1 − β−1 g) +det(1 − γ g) det(1 − δ−1 g) +� += (1 − βγ)(1 − αδ) +(1 − δγ)(1 − αβ) +�1 − αNγN +1 − γ2N +��1 − βNδN +1 − δ2N +� ++ (αβ)N (1 − γα−1)(1 − δβ−1) +(1 − α−1β−1)(1 − γδ) +�1 − β−NγN +1 − γ2N +��1 − α−NδN +1 − δ2N +� +. +Note that in this case there is no need to assume that |γ|, |δ| < 1. Indeed, the right +and left hand sides are meromorphic in the variables γ and δ, with singularities +only at 2N-th roots of unity. +This procedure can be used to obtain formulas for J > 2 as well. But for mixed +ratios of more than two characteristic polynomials, expansions like this for the +ACUE seem to become increasingly more complicated than those for the CUE; by +contrast the determinantal formula of Theorem 4 remains relatively simple for all +J. +It is natural to ask whether Theorems 2 or 6 shed light on any number theoretic +phenomena. A typical question in number theory involves moments of the Riemann +zeta-function in which powers K and L are fixed or grow slowly. Theorem 1 of Tao +is certainly of interest in this regard, but because K and L must be of size at least +N before Theorem 2 sees a difference between the CUE and ACUE prediction, it +does not seem that the new information in this theorem will shed light on these + +6 +BRAD RODGERS, HARSHITH SAI VALLABHANENI +sorts of questions. On the other hand, uniform estimates for moments can be of +some interest in determining extreme values of L-functions (see e.g. [25, Sec. 7]), +and Theorem 2 may be of some use in examining alternative possibilities here. +Furthermore Theorem 4 suggests a hypothetical ‘alternative ratio formula’ for the +Riemann zeta-function – a formula which one would like to rule out but cannot at +present. This is discussed further in Section 4. +Acknowledgements: We thank David Farmer and Ofir Gorodetsky for very use- +ful references, comments, and corrections. B.R. received partial support from an +NSERC grant and US NSF FRG grant 1854398. +2. The ratio formula: Theorem 4 +In this section we prove Theorem 4. Our starting point is an application of a +general formula of Borodin-Olshanski-Strahov to the ACUE. +Theorem 6 (A Borodin-Olshanski-Strahov Formula for ACUE). For N and J +positive integers, v1, ..., vJ complex numbers, and u1, ..., uJ complex numbers which +are not 2N-th roots of unity, +EACUE(N) +� �J +j=1 det(1 + vjg) +�J +j=1 det(1 + ujg) +� += +1 +det +� +1 +ui−vj +� det +� +1 +ui − vj +EACUE(N) +�det(1 + vjg) +det(1 + uig) +�� +, +(6) +where the determinants on the right hand side are of J×J matrices, over the indices +1 ≤ i, j ≤ J. +Proof. This requires only minor modifications of formulas in [5]. Claims I and II +of that paper show that if α is a measure on C with finite moments and if a point +process consisting of N points {z1, ..., zN} in C has a joint density given by +(const.) +� +1≤i 1 (where all power series will converge absolutely) +and then meromorphically continuing to all α1, ..., αJ. +Finally, we arrive at (6) simply by setting αj = −u−1 +j , βj = v−1 +j +and simplifying +the resulting determinants. +□ +The remainder of this section is therefore devoted to understanding the expec- +tation which occurs on the right hand side of (6), accomplished in Proposition 8 +below. +Lemma 7. Consider a hook partition (a, 1b) with a ≥ 1 and b ≥ 0 of length +b + 1 ≤ N. +For the Schur polynomial s(a,1b) associated to this partition in the +variables e(ϑ1), ..., e(ϑN) of the ACUE(N), we have +EACUE(N)s(a,1b) = +� +(−1)b +if +a + b ≡ 0 (mod 2N) +0 +otherwise. +Proof. Label ωj = e(ϑj) so that for a partition λ of length ℓ(λ) ≤ N, +sλ = det(ωλj+N−j +i +) +det(ωN−j +i +) +, +where if ℓ(λ) < N we adopt the convention λℓ(λ)+1 = · · · = λN = 0, and the +determinants above are N × N. +Note that det(ωN−j +i +) = ∆(ω1, ..., ωN). +Hence from the definition (1) of the +ACUE, +EACUE(N)s(a,1b) += +1 +N! (2N)N +� +t1,...,tN +det +� +e +� +(λj + N − j)ti +�� +det +� +e +� +(N − j)ti +�� +, +(7) +where each index ti is summed over the set {0, +1 +2N , ..., 2N−1 +2N }. +Expanding each +determinant into a sum over permutations mapping {1, ..., N} to {1, ..., N} one +sees +det +� +e +� +λj + N − j)ti +�� +det +� +e +� +N − j)ti +�� += +� +σ,π∈SN +(−1)σ(−1)π +N +� +i=1 +e +� +(λσ(i) + N − σ(i))ti − (N − π(i))ti +� +. +Thus (7) is += 1 +N! +� +σ,π∈SN +(−1)σ(−1)π1 +� +(λσ(i) − σ(i)) + π(i) ≡ 0 (mod 2N) +for all i +� += +� +π∈SN +(−1)π1 +� +λi − i + π(i) ≡ 0 (mod 2N) +for all i +� +, +(8) +where 1[ · ] denotes an indicator function, taking the value 1 or 0 depending on +whether the proposition inside is true or false. + +8 +BRAD RODGERS, HARSHITH SAI VALLABHANENI +In the special case that λ = (a, 1b) this sum has a simple evaluation. In that +case any nonvanishing summand will have π satisfying +a − 1 + π(1) ≡ 0 (mod 2N) +and +1 − 2 + π(2) ≡ 0 (mod 2N) +... +1 − (b + 1) + π(b + 1) ≡ 0 (mod 2N) +and +0 − (b + 2) + π(b + 2) ≡ 0 (mod 2N) +... +0 − N + π(N) ≡ 0 (mod 2N). +Since 1 ≤ π(i) ≤ N, the last N − 1 of these equations force +π(b + 2) = b + 2, ...., π(N) = N, +π(2) = 1, ...., π(b + 1) = b. +This forces +π(1) = b + 1, +and so at most one permutation π makes a nonzero contribution to (8), and that +contribution is nonzero if and only if a+b ≡ 0 (mod 2N), since a+b = a−1+π(1). +Since in cycle notation this permutation is π = (b + 1, b, ..., 2, 1) we have (−1)π = +(−1)b and this verifies the lemma. +□ +Proposition 8. For v any complex number and u any complex number which is +not a 2N-th root of unity, +EACUE(N) +det(1 + vg) +det(1 + ug) = 1 − uNvN +1 − u2N . +Proof. We first consider |u| < 1. From a series expansion we have +det(1 + vg) +det(1 + ug) = +N +� +j=0 +∞ +� +k=0 +(−1)kejhkvjuk, +(9) +where ej and hk are respectively elementary symmetric polynomials of degree j and +homogeneous symmetric polynomials of degree k in the variables e(ϑ1), ..., e(ϑN) +associated to ACUE(N). Note that +e0 = h0 = 1, +while other terms can be expression in terms of Schur polynomials in the variables +e(ϑ1), ..., e(ϑN): +ejh0 = s(1j) +for 1 ≤ j ≤ N, +e0hk = s(k) +for k ≥ 1, +ejhk = s(k+1,1j−1) + s(k,1j) +for 1 ≤ j ≤ N − 1, k ≥ 1, +eNhk = s(k+1,1N−1) +for k ≥ 1, + +AUTOCORRELATIONS FOR THE ACUE +9 +with the first two identities following from the combinatorial definition of Schur +functions [26, Sec. 7.10], and the last two from the Pieri rule [26, Thm. 7.15.7]. +From Lemma 7 it thus follows +EACUE(N)ejhk = + + + + + +1 +if j = 0, k ≡ 0 (mod 2N), +(−1)N−1 +if j = N, k ≡ N (mod 2N), +0 +otherwise. +Hence from (9), +EACUE(N) +det(1 + vg) +det(1 + ug) = (1 + u2N + u4N + · · · ) − (vNuN + vNu3N + vNu5N + · · · ) += 1 − uNvN +1 − u2N , +for |u| < 1 and all v. The result then follows by analytic continuation. +□ +Thus we have: +Proof of Theorem 4. Apply Proposition 8 to Theorem 6. +□ +3. The moment formula: Theorem 2 +Our technique in proving Theorem 2 will be to condense the determinants in +(5) by letting all ui → 0. We begin with several lemmas that are useful for that +purpose. +The following is a slight generalization of Lemma 1 of [22]. +Lemma 9 (Determinantal Condensation Identity). Take q ≤ J. For f1, f2, ..., fJ +functions (mapping R to C) that are at least q times continuously differentiable at +the point a, +lim +u1,...,uq→a +1 +∆(uq, ..., u1) det +� +fj(ui) +�J +i,j=1 = det + + +� +1 +(i−1)!f (i−1) +j +(a) +� +i≤q, j≤J +� +fj(ui) +� +q+1≤i≤J, j≤J + + , +(10) +where on the left hand side the limit is taken in the order that first u1 → u2, then +u2 → u3, ... , uq−1 → uq, and finally uq → a. +Proof. We prove this identity by induction, viewing ∆(u1) = 1 for the q = 1 case, +which then becomes trivial. Suppose then that (10) has been proved for a limit in +q − 1 variables. This implies for a limit in q variables, +lim +u1,...,uq→a +1 +∆(uq, ..., u1) det +� +fj(ui) +�J +i,j=1 += lim +uq→a +lim +uq−1→uq +1 +(uq − qq−1)q−1 det + + +� +1 +(i−1)!f (i−1) +j +(uq−1) +� +i≤q−1,j≤J +� +fj(ui) +� +i≥q,j≤J + + . +But Taylor expanding the entries of row q as +fj(uq) = +q +� +i=1 +f (i−1) +j +(uq−1) +(i − 1)! +(uq − uq−1)i−1 + O((uq − uq−1)q), +and using multilinearity of the determinant to cancel out the first q − 1 terms of +the above sum in row q, the claimed result quickly follows. +□ + +10 +BRAD RODGERS, HARSHITH SAI VALLABHANENI +Remark 10. It is likely that a result of this sort remains true no matter the +path along which a limit is taken (perhaps with further analytic conditions on the +functions fj), but we won’t require that in what follows. +Remark 11. It is easy to see by permuting rows of the determinant that this result +also implies +lim +u1,...,uq→a +1 +∆(u1, ..., uq) det +� +fj(ui) +�J +i,j=1 = det + + +� +1 +(q−i)!f (q−i) +j +(a) +� +i≤q,j≤J +� +fj(ui) +� +i≥q+1,j≤J + + +(11) +and +lim +uq+1,...,uK→a +1 +∆(uK, ..., uq+1) det +� +fj(ui) +�J +i,j=1 = det + + +� +fj(ui) +� +i≤q,j≤J +� +1 +(i−q−1)!f (i−q−1) +j +(a) +� +i≥q+1,j≤J + + , +(12) +where in this last equation the limit is taken in the order uq+1 → uq+2,..., uK−1 → +uK, uK → a. +In applying this lemma we need the following computation. +Lemma 12. For integers ℓ ≥ 0 and N ≥ 1, +lim +u→0 +1 +ℓ! +dℓ +duℓ +� +1 +u − v +1 − uNvN +1 − u2N +� += −pN,ℓ(v), +for pN,ℓ defined by +pN,ℓ(v) := +1 +vℓ+1 −vN−1 HN,ℓ(1/v) = +1 +vℓ+1 − +� +0 +if 0 ≤ [ℓ]2N ≤ N − 1 +v2N−1−[ℓ]2N +if N ≤ [ℓ]2N ≤ 2N − 1. +(13) +Proof. Note that we have +1 +u − v +1 − uNvN +1 − u2N += −1 +v +1 +1 − u/v + uN − vN +u − v +uN +1 − u2N += − +�1 +v + u +v2 + u2 +v3 + · · · +� ++ (vN−1 + vN−2u + · · · + uN−1)(uN + u3N + · · · ), +taking a series expansion around u = 0. Since the quantity on the left hand side of +the Lemma is exactly the coefficient of uℓ in this expansion, the claim follows by +inspection. +□ +Lemma 13. (Cauchy Determinant Formula) For u1, ..., uJ and v1, ..., vJ collections +of complex numbers with no elements in common, +det +� +1 +ui − vj +�J +i,j=1 = ∆(uJ, ..., u1)∆(v1, ..., vJ) +□(u; v) +where +□(u; v) := +J +� +i=1 +J +� +j=1 +(ui − vj). +Proof. See for instance [23, Part 7, §1, Ex. 3]. +□ +We can now give a proof of the moment formula for ACUE. + +AUTOCORRELATIONS FOR THE ACUE +11 +Proof of Theorem 2. We set u = (u1, ..., uK) and u′ = (u′ +1, ..., u′ +L) as abbreviations +for ordered lists, and let u ∪ u′ := (u1, ..., uK, u′ +1, ..., u′ +L) be an (ordered) concatena- +tion of these lists. We abbreviate ∆(u) = ∆(u1, ..., uK) and also use the notation +�∆(u) = ∆(uK, ..., u1) = (−1)K(K−1)/2∆(u). +Our starting point is the identity +EACUE(N) +� +det(g)−K +K+L +� +k=1 +det(1 + vkg) +� += lim +u→∞ uN +1 · · · uN +K lim +u′→0 EN(u ∪ u′ ; v), +(14) +where we define +EN(u ∪ u′ ; v) := EACUE(N) +� +�K+L +k=1 det(1 + vkg) +�K +k=1 det(1 + ukg) �L +ℓ=1 det(1 + u′ +ℓg) +� +. +The limits u → ∞ and u′ → 0 mean u1, ..., uK → ∞ and u′ +1, ..., u′ +L → 0. In what +follows we will take these in the order u′ +1 → u′ +2, ..., u′ +L−1 → u′ +L, u′ +L → 0 and +u1 → u2, ..., uK−1 → uK, uK → ∞ so that Lemma 9 can easily be applied. +For notational reasons we write +FN(u, v) := +1 +u − v +1 − uNvN +1 − u2N . +We use Theorem 4 and Lemma 13 to see, +EN(u ∪ u′ ; v) = +□(u ∪ u′ ; v) +�∆(u ∪ u′)∆(v) +det + + +� +fN(ui, vj) +� +i≤K,j≤K+L +� +fN(u′ +i−K, vj) +� +i≥K+1,j≤K+L + + += +□(u ; v)□(u′ ; v) +�∆(u)�∆(u′)□(u′; u)∆(v) +det + + +� +fN(ui, vj) +� +i≤K,j≤K+L +� +fN(u′ +i−K, vj) +� +i≥K+1,j≤K+L + + . +Taking a limit u′ → 0 and using Lemma 9 – in particular its consequence (12) – +and Lemma 12, +lim +u′→0 EN(u∪u′ ; v) = +□(u; v) �K+L +k=1 (−vk)L +�∆(u) �K +k=1(−uk)L∆(v) +det + + +� +fN(ui, vj) +� +i≤K,j≤K+L +� +− pN,i−K−1(vj) +� +i≥K+1,j≤K+L + + . +But note the easily verified functional equation +fN(u, v) = −fN(u−1, v−1)vN−1u−(N+1). +Thus +uN +1 · · · uN +K lim +u′→0 EN(u ∪ u′ ; v) += (−1)L □(u; v) �K+L +k=1 vL +k +�∆(u)∆(v) +K +� +k=1 +u−L−1 +k +·det + + +� +− vN−1 +j +fN(u−1 +i , v−1 +j ) +� +i≤K,j≤K+L +� +− pN,i−K−1(vj) +� +i≥K+1,j≤K+L + + . +(15) + +12 +BRAD RODGERS, HARSHITH SAI VALLABHANENI +For fixed v, we have as u → ∞, +□(u; v) +�∆(u) +K +� +k=1 +u−L−1 +k += +□(u; v) �K +k=1 u−L−1 +k +�K +k=1 uK−1 +k +∆ +� 1 +u1 , ..., +1 +uK +� ∼ +1 +∆ +� 1 +u1 , ..., +1 +uK +�. +Applying Lemma (9) – with its consequence (11) this time – and Lemma 12, the +limit of (15) as u → ∞ is += (−1)L +�K+L +k=1 vL +k +∆(v) +det + + +� +vN−1 +j +pN,K−i(v−1 +j ) +� +i≤K,j≤K+L +� +− pN,i−K−1(vj) +� +i≥K+1,j≤K+L + + += +1 +∆(v) det + + +� +vN+L−1 +j +pN,K−i(v−1 +j ) +� +i≤K,j≤K+L +� +vL +j pN,i−K−1(vj) +� +i≥K+1,j≤K+L + + . +By inspection of matrix entries, the above is += +det +� +φi(vj) +�K+L +i,j=1 +∆(v) +. +Recalling (14), this is exactly what we sought to prove. +□ +4. Hypothetical implications for ratios of ζ(s) +Let us briefly and somewhat informally discuss these results in the context of the +distribution of the Riemann zeta-function. For the sake of this discussion, suppose +the Riemann Hypothesis is true, and label the nontrivial zeros of the zeta-function +by {1/2 + iγj}j∈Z, so that γj ∈ R for all j. What is widely believed about the local +distribution of zeros concerns two point processes, the first point process (associated +to a large parameter T ) given by +�log T +2π (γj − t) +� +j∈Z +(16) +where t ∈ [T, 2T ] is chosen randomly and uniformly, and the second point process +(associated to a large parameter N) given by +{Nθi}i=1,...,N +(17) +where θ1, ..., θN ∈ [−1/2, 1/2) are identified with the points e(θ1), ..., e(θN) of +CUE(N). The widely believed GUE (Gaussian Unitary Ensemble) Hypothesis states +that as T → ∞ and N → ∞ both point processes (16) and (17) tend to the same +limiting point process. +(This means that randomly generated configurations of +points from these two processes will look similar near the origin of the real line.) +The ACUE was first investigated as one alternative model of how zeros of the +Riemann zeta-function might be spaced. +In particular, one considers the point +process (associated to a large parameter N) given by +{Nϑi + r +2}i=1,...,N +(18) +where ϑ1, ..., ϑN +∈ {− 1 +2, −N+1 +2N +, −N+2 +2N +, ..., N−1 +2N } are identified with the points +e(ϑ1), ..., e(ϑN) of ACUE(N), and r ∈ [0, 1) is chosen independently, and uniformly +at random. As N → ∞ the point process (18) tends to a limiting process, called the +AH point process in [21]. The AH point process has correlation functions which +mimic the limiting process for CUE (see [20] for further discussion), but it also + +AUTOCORRELATIONS FOR THE ACUE +13 +has gaps between points which are always half-integers. In this way it is one pos- +sible – though likely not a unique – candidate for a limiting distribution of the +zeta-function point process 16 which is compatible with what is currently known +about the local distribution of zeros of the zeta-function and also with the so-called +Alternative Hypothesis, a (widely disbelieved) conjecture that gaps between zeros +always occur close to half-integer multiples of the mean spacing. +For this reason [28] gave the name AGUE (Alternative Gaussian Unitary En- +semble) Hypothesis to the hypothetical claim that as T → ∞ the zeta zero point +process (16) tends to the AH point process. As one would like to rule out the +Alternative Hypothesis, one would like to rule out the stronger AGUE Hypothesis. +More details on the AH point process can be found in the references [21, 28], +while further information on the Alternative Hypothesis in general can be found in +[2]. +A major impetus for studying mixed moments of characteristic polynomials +det(1 + uG) for the CUE came from the work of Keating-Snaith, who used in- +formation about CUE moments to make a conjecture regarding moments of the +Riemann zeta-function [18, Eq. (19)]. As first observed by Tao and as discussed in +the introduction, the consequence of Theorem 1 that for sufficiently large N mixed +moments in the CUE and ACUE agree suggests that even should the zeros of the +Riemann zeta-function be spaced according to the pattern of the ACUE, this could +still be consistent with the Keating-Snaith moment conjecture. +The local spacing of zeros of the Riemann zeta-function is also closely related to +the averages of ratios of shifts of the Riemann zeta function near the critical line. +This perspective was first pursued by Farmer [14, 15] and has subsequently been +investigated by others [9, 11, 12]. In particular note that from Theorem 5, +lim +N→∞ ECUE(N) +� +J +� +j=1 +det(1 − e−νj/NG) +det(1 − e−µj/NG) +� += +1 +det +� +1 +νj−µi +� det +� +1 +νj − µi +e(µi, νj) +� +, +for Re µj ̸= 0 for all j, where +e(µ, ν) := +� +1 +if Re µ > 0 +eµ−ν +if Re µ < 0. +From the results proved in [12, 24] it can be seen that the claim +lim +T →∞ +1 +T +� 2T +T +J +� +j=1 +ζ(1/2 + νj/ log T + it) +ζ(1/2 + µj/ log T + it) dt = +1 +det +� +1 +νj−µi +� det +� +1 +νj − µi +e(µi, νj) +� +(19) +for Re µj ̸= 0 for all j, is equivalent to the GUE Hypothesis. (In fact [24] treats +only real µj, νj, but the method can be adapted to complex values. There is a +notational difference in [24]; the function E used there satisfies E(ν, µ) = e(µ, ν) +for the function e used here.) +A belief in the AGUE Hypothesis would suggest that we replace characteristic +polynomials det(1 − uG) as they appear above by det(1 − uei2πr/2Ng), where r ∈ +[0, 1) is independent of g and uniformly chosen. For the ACUE, from Theorem 4 + +14 +BRAD RODGERS, HARSHITH SAI VALLABHANENI +we have +lim +N→∞ EACUE(N) +� +J +� +j=1 +det(1 − e−νj/Ng) +det(1 − e−µj/Ng) +� += +1 +det +� +1 +νj−µi +� det +� +1 +νj − µi +e(µi, νj) +� +, +for µj /∈ i +2Z for all j, where +e(µ, ν) := 1 − e−µ−ν +1 − e−2µ . +Hence on the assumption of the AGUE Hypothesis, one should instead expect for +Re µj ̸= 0 for all j, +lim +T →∞ +1 +T +� 2T +T +J +� +j=1 +ζ(1/2 + νj/ log T + it) +ζ(1/2 + µj/ log T + it) dt += lim +N→∞ +� 1 +0 +EACUE(N) +� +J +� +j=1 +det(1 − e−νj/Neiπr/Ng) +det(1 − e−µj/Neiπr/Ng) +� +dr += +� 1 +0 +1 +det +� +1 +νj−µi +� det +� +1 +νj − µi +e(µi − iπr, νj − iπr) +� +dr += +1 +det +� +1 +νj−µi +� +� +|z|=1 +det +� +1 +νj − µi +1 − ze−µ−ν +1 − ze−2µ +� dz +z . +(20) +(20) is of course a different expression than (19). Thus for averages of ratios of +the Riemann zeta-function, an ACUE spacing would be distinguished from CUE +spacing. In fact using the methods of [12, 24] it should be possible to demonstrate +rigorously that (20) is equivalent to the AGUE Hypothesis, but we do not pursue +this here. +References +[1] J. Baik, P. Deift, E. Strahov. Products and ratios of characteristic polynomials of random +Hermitian matrices. Integrability, topological solitons and beyond. J. Math. Phys. 44 (2003), +no. 8, 3657–3670. +[2] S.A.C. Baluyot. On the pair correlation conjecture and the alternative hypothesis. J. Number +Theory 169 (2016), 183–226. +[3] A. Borodin. Periodic Schur process and cylindric partitions. Duke Math. J. 140 (2007), no. +3, 391–468. +[4] A. Borodin, A. Okounkov, G. Olshanski. Asymptotics of Plancherel measures for symmetric +groups. J. Amer. Math. Soc. 13 (2000), no. 3, 481–515. +[5] A. Borodin, G. Olshanski, and E. Strahov. Giambelli compatible point processes. Adv. in +Appl. Math. 37 (2006), no. 2, 209–248. +[6] A. Borodin, E. Strahov. Averages of characteristic polynomials in random matrix theory. +Comm. Pure Appl. Math. 59 (2006), no. 2, 161–253. +[7] E. Br´ezin, S. Hikami. Characteristic polynomials of random matrices. Comm. Math. Phys. +214 (2000), no. 1, 111–135. +[8] D. Bump, and A. Gamburd. On the averages of characteristic polynomials from classical +groups. Comm. Math. Phys. 265 (2006), no. 1, 227–274. +[9] J. B. Conrey, D.W. Farmer, and M.R. Zirnbauer. Howe pairs, supersymmetry, and ratios of +random characteristic polynomials for the unitary groups U(N). Preprint. +[10] J.B. Conrey, P.J. Forrester, and N.C. Snaith. Averages of ratios of characteristic polynomials +for the compact classical groups. Int. Math. Res. Not. IMRN (2005): 397-431. + +AUTOCORRELATIONS FOR THE ACUE +15 +[11] J.B. Conrey, N.C. Snaith. Applications of the L-functions ratios conjectures. Proc. Lond. +Math. Soc. (3) 94 (2007), no. 3, 594–646. +[12] J.B. Conrey, N.C. Snaith. Correlations of eigenvalues and Riemann zeros. Commun. Number +Theory Phys. 2 (2008), no. 3, 477–536. +[13] R. Chhaibi, J. Najnudel, and A. Nikeghbali. The circular unitary ensemble and the Riemann +zeta function: the microscopic landscape and a new approach to ratios. Invent. math. 207, +23–113 (2017). +[14] D.W. Farmer. Long mollifiers of the Riemann zeta-function. Mathematika 40.01 (1993): +71–87. +[15] D.W. Farmer. Mean values of ζ′/ζ and the GUE hypothesis. Int. Math. Res. Not. (1995): +71 – 82. +[16] K. Johansson. Non-intersecting paths, random tilings and random matrices. Probab. Theory +Related Fields 123 (2002), no. 2, 225–280. +[17] B. Jonnadula, J.P. Keating, and F. Mezzadri. On the moments of characteristic polynomials. +Glasg. Math. J. (2022) 1-21. +[18] J.P. Keating, N.C. Snaith. Random matrix theory and ζ(1/2 + it). Comm. Math. Phys. 214 +(2000), no. 1, 57–89. +[19] M. Kieburg, and T. Guhr. Derivation of determinantal structures for random matrix ensem- +bles in a new way. J. Phys. A: Math. Theor. 43 (2010): 31pp. +[20] J. C. Lagarias, and B. Rodgers. Band-limited mimicry of point processes by point processes +supported on a lattice. Ann. Appl. Probab. 31 (2021), no. 1, 351–376. +[21] J. C. Lagarias, and B. Rodgers. Higher correlations and the alternative hypothesis. Q. J. +Math. 71 (2020), no. 1, 257–280. +[22] A. Medjedovic. Exact Formulas for Averages of Secular Coefficients. MSc Thesis. University +of Waterloo. Available at http://hdl.handle.net/10012/17591. +[23] G. P´olya, and G. Szeg˝o. Problems and theorems in analysis. II. Theory of functions, zeros, +polynomials, determinants, number theory, geometry. Translated from the German by C. +E. Billigheimer. Reprint of the 1976 English translation. Classics in Mathematics. Springer- +Verlag, Berlin, 1998. xii+392 pp. +[24] B. Rodgers. Tail bounds for counts of zeros and eigenvalues, and an application to ratios. +Comment. Math. Helv. 92 (2017), no. 2, 311–347. +[25] K. Soundararajan. The distribution of values of zeta and L-functions. arXiv preprint +arXiv:2112.03389. +[26] R.P. Stanley. Enumerative combinatorics. Vol. 2. Cambridge Studies in Advanced Mathe- +matics, 62. Cambridge University Press, Cambridge, 1999. +[27] E. Strahov, Y.V. Fyodorov. Universal results for correlations of characteristic polynomials: +Riemann-Hilbert approach. Comm. Math. Phys. 241 (2003), no. 2-3, 343–382. +[28] T. +Tao, +The +alternative +hypothesis +for +unitary +matrices, +weblog +post. +Avail- +able at https://terrytao.wordpress.com/2019/05/08/the-alternative-hypothesis-for-unitary- +matrices/ +[29] H. Widom. Random Hermitian matrices and (nonrandom) Toeplitz matrices. Toeplitz op- +erators and related topics (Santa Cruz, CA, 1992), 9–15, Oper. Theory Adv. Appl., 71, +Birkh¨auser, Basel, 1994. +Department of Mathematics and Statistics, Queen’s University, Kingston, Ontario, +K7L 3N6, Canada +E-mail address: brad.rodgers@queensu.ca +Indian Institute of Technology Kharagpur, Kharagpur, West Bengal 721302, India +E-mail address: vallabhaneniharshith@gmail.com + diff --git a/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/load_file.txt b/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2300f0c2a704b98bfb9e62939061ce64822d843 --- /dev/null +++ b/1tAyT4oBgHgl3EQfbvdS/content/tmp_files/load_file.txt @@ -0,0 +1,677 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf,len=676 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content='00268v1 [math-ph] 31 Dec 2022 AUTOCORRELATIONS OF CHARACTERISTIC POLYNOMIALS FOR THE ALTERNATIVE CIRCULAR UNITARY ENSEMBLE BRAD RODGERS, HARSHITH SAI VALLABHANENI Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' We find closed formulas for arbitrarily high mixed moments of characteristic polynomials of the Alternative Circular Unitary Ensemble (ACUE), as well as closed formulas for the averages of ratios of characteristic polynomials in this ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' A comparison is made to analogous results for the Circular Unitary Ensemble (CUE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' Both moments and ratios are studied via symmetric function theory and a general formula of Borodin-Olshanski-Strahov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' Introduction In this short note we examine mixed moments and averages of ratios of char- acteristic polynomials associated with the Alternative Circular Unitary Ensemble (ACUE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' Our main results are a closed formula for arbitrarily high mixed moments in Theorem 2 and a closed formula for averages of ratios in Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' The ACUE refers to a certain random collection of points on the unit circle of the complex plane whose distribution is meant to mimic the points of the Circular Unitary Ensemble (CUE) of random matrix theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=' Let us use the notation ∆(x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1tAyT4oBgHgl3EQfbvdS/content/2301.00268v1.pdf'} +page_content=', xN) := � 1≤j r(n) and am ≥ bn+1}, +for n ∈ N. For all n ∈ N we have ar(n) ≥ bn. For the infinitely many +n ∈ N with α − bn < 2−n we obtain +α − ar(n) ≤ α − bn < 2−n. +□ +3. Regainingly Approximable Sets of Natural Numbers +Let us call a total function f : N → N an enumeration of a set A ⊆ N +if the following two conditions are satisfied: +(1) A = {n ∈ N | (∃k ∈ N) f(k) = n + 1}, +(2) for every n ∈ A there exists exactly one k ∈ N with f(k) = n+1. +If f(k) = n+1 then we say that at stage k the function f enumerates the +number n into A. Note that here f(k) = 0 encodes that the function f +does not enumerate anything into A at stage k. +It is clear that a +set A ⊆ N is computably enumerable if and only if there exists a +computable enumeration of A. If f : N → N is an enumeration of a +subset of N then, for t ∈ N, we write +Enum(f)[t] := {n ∈ N | (∃k ∈ N)(k < t and f(k) = n + 1)}. +Definition 6. Let r: N → N be a nondecreasing, unbounded function. +(1) We call an enumeration f : N → N of a set A ⊆ N r-good if +there exist infinitely many n such that +{0, . . . , n − 1} ∩ A ⊆ Enum(f)[r(n)]. +(2) We call a set A ⊆ N regainingly r-approximable if there exists +a computable enumeration f : N → N of A that is r-good. +Example 7. Let A ⊆ N be a decidable set. +Then the function +f : N → N defined by f(n) := n + 1 if n ∈ A, f(n) := 0 if n ̸∈ A, +is a computable and idN-good enumeration of A. Hence, A is regain- +ingly idN-approximable. + +7 +Definition 8. We call a set A ⊆ N regainingly approximable if there +exists a computable, nondecreasing, unbounded function r: N → N +such that A is regainingly r-approximable. +The following theorem says that in this definition one can replace the +function r by the identity idN. +Theorem 9. For a set A ⊆ N the following two conditions are equiv- +alent. +(1) There exists a computable, nondecreasing, unbounded function +r: N → N such that A is regainingly r-approximable. +(2) A is regainingly idN-approximable. +The proof of this theorem is not fully effectively uniform, as it contains +a noneffective case distinction. Therefore, we first formulate a partial +result that does have a uniformly effective proof. It implies, for exam- +ple, that from an r-good enumeration of a set A, where r: N → N is +an arbitrary computable, nondecreasing, unbounded function, one can +effectively switch to a 2n-good enumeration of the same set A. +Lemma 10. Given two nondecreasing, unbounded functions r, s: N → +N and an r-good enumeration f : N → N of a set A, one can compute an +(idN +s)-good enumeration g : N → N of the same set A. In particular, +if r, s: N → N are computable, nondecreasing, unbounded functions +and a set A ⊆ N is regainingly r-approximable then it is regainingly +(idN + s)-approximable as well. +Proof. Let r, s: N → N be two nondecreasing, unbounded functions, +and let f : N → N be an r-good enumeration of a set A ⊆ N. The +function p: N → N defined by +p(n) := min{m ∈ N | s(m) > n}, +for all n ∈ N, is nondecreasing and can be computed from s. It satisfies, +for all n ∈ N, +p(s(n)) = min{m ∈ N | s(m) > s(n)} > n. +We define a function g : N → N recursively as follows. For t ∈ N let +M[t] := Enum(f)[r(p(t))] \ Enum(g)[t] +and +g(t) := +� +1 + min(M[t]) +if M[t] ̸= ∅, +0 +if M[t] = ∅, + +8 +The function g is an enumeration of A. +It is clear that it can be +computed from r, s, and f. By assumption, there are infinitely many n +such that +{0, . . . , n − 1} ∩ A ⊆ Enum(f)[r(n)]. +Let us consider such a number n. We claim that +{0, . . . , n − 1} ∩ A ⊆ Enum(g)[n + s(n)]. +To see this, first note that p(s(n)) > n implies +{0, . . . , n − 1} ∩ A ⊆ Enum(f)[r(n)] ⊆ Enum(f)[r(p(s(n)))]. +These are at most n numbers, and those among them which have not +yet been enumerated by g in stages strictly before stage s(n) are the +smallest elements of M[s(n)]. Thus, because no further number smaller +than n can enter M[t] for any t > s(n), they will be enumerated by g +in one of the n stages s(n), . . . , s(n) + n − 1. Consequently, they are +elements of Enum(g)[n + s(n)], as was to be shown. +□ +Proof of Theorem 9. We prove the nontrivial direction “1 ⇒ 2”. Let +us assume that r: N → N is a computable, nondecreasing, unbounded +function such that A is regainingly r-approximable. Let f : N → N be +a computable r-good enumeration of A. The function s: N → N de- +fined by s(n) := ⌊n/2⌋, for n ∈ N, is computable, nondecreasing, and +unbounded. By applying Lemma 10 we obtain a computable (idN + s)- +good enumeration g : N → N of A. So, g is ⌊3n/2⌋-good. We distin- +guish two cases for A. +First case: For almost all n ∈ N, |{0, . . . , n − 1} ∩ A| ≤ ⌊n/2⌋. In this +case, we proceed similarly as in the proof of Lemma 10. We define a +function h: N → N recursively as follows. For t ∈ N let +M[t] := Enum(g)[3t] \ Enum(h)[t] +and +h(t) := +� +1 + min(M[t]) +if M[t] ̸= ∅, +0 +if M[t] = ∅, +The function h is a computable enumeration of A. Let N ∈ N be a +number such that, for all n ≥ N, |{0, . . . , n − 1} ∩ A| ≤ ⌊n/2⌋. There +are infinitely many n ≥ N with +{0, . . . , n − 1} ∩ A ⊆ Enum(g)[⌊3n/2⌋]. +Let us consider such a number n. We claim that +{0, . . . , n − 1} ∩ A ⊆ Enum(h)[n]. + +9 +Indeed, there are at most ⌊n/2⌋ numbers in {0, . . . , n − 1} ∩ A, and all +these numbers are elements of the set +Enum(g)[⌊3n/2⌋] ⊆ Enum(g)[3 · ⌈n/2⌉]. +Furthermore, all those among these at most ⌊n/2⌋ numbers, that have +not yet been enumerated by h in stages strictly before stage ⌈n/2⌉, are +the smallest elements of M[⌈n/2⌉] and will be enumerated by h in one +of the ⌊n/2⌋ stages ⌈n/2⌉, . . . , n−1 (because no further number smaller +than n can enter M[t] for any t > ⌈n/2⌉). Thus, they are elements of +Enum(h)[n]. That was to be shown. +Second case: There exist infinitely many n ∈ N with +|{0, . . . , n − 1} ∩ A| > ⌊n/2⌋. +In this case we define two increasing and computable sequences (ni)i +and (ti)i of natural numbers as follows. First we compute the smallest +natural number t0 such that there exists a natural number n > 0 with +⌊3n/2⌋ ≤ t0 and |{0, . . . , n − 1} ∩ Enum(g)[t0]| > ⌊n/2⌋. +Then we let n0 be the smallest number n > 0 with this property. Let us +consider i > 0. Once ni−1 and ti−1 have been determined, we compute +the smallest natural number ti > ti−1 such that there exists a natural +number n with +2ni−1 ≤ n and ⌊3n/2⌋ ≤ ti and |{0, . . . , n − 1} ∩ Enum(g)[ti]| > ⌊n/2⌋. +Then we let ni be the smallest number n with this property. +Next we recursively define a function h: N → N which will be an +enumeration of the infinite set A with h(t) ̸= 0 for all t ∈ N. For +any i ∈ N, let mi be the number of elements of the following set +Mi := {0, . . . , ni − 1} ∩ Enum(g)[ti] \ Enum(h) +�� +j 0, +0 ≤ ni−1 − +� +j n0 with +{0, . . . , n − 1} ∩ A ⊆ Enum(g)[⌊3n/2⌋]. +Let us consider such a number n. We claim that +{0, . . . , n − 1} ∩ A ⊆ Enum(h)[n]. +Let i := min{j ∈ N | n ≤ nj}. Then i > 0 and ni−1 < n ≤ ni. In the +first � +j s(n) +� +, +for all n ∈ N, can be computed from f, g, r and has the desired prop- +erties. +□ +Corollary 12. Let A ⊆ N be a c.e. set, and let g : N → N be any com- +putable enumeration of A. Then the following conditions are equivalent. +(1) A is regainingly approximable. +(2) There exists a computable, increasing function s: N → N such +that g is s-good. +Theorem 13. There exists a c.e. set A ⊆ N that is not regainingly +approximable. + +11 +Proof. We use the Cantor pairing function ⟨·, ·⟩: N2 → N defined by +⟨m, n⟩ := 1 +2 (m + n) (m + n + 1) + n, +for all m, n ∈ N, and let π1: N → N and π2 : N → N denote the +two components of its inverse function, that is, ⟨π1(n), π2(n)⟩ = n for +all n ∈ N. Let ϕ0, ϕ1, ϕ2, . . . be a standard enumeration of all possibly +partial computable functions with domain and range in N. As usual, +we write ϕe(n)[t] ↓ to express that the e-th Turing machine (which +computes ϕe) stops after at most t steps on input n. +We shall construct a computable enumeration g : N → N of a set A ⊆ N +such that the following requirements (Re) will be satisfied for all e ∈ N: +(Re): if ϕe is total and increasing then +(∃ne ∈ N)(∀n > ne)({0, . . . , n − 1} ∩ A ̸⊆ Enum(g)[ϕe(n)]). +According to Corollary 12 this is sufficient. +We construct g in stages; in stage t we proceed as follows: Define +e := π1(π1(t)) and k := π2(π1(t)), hence, ⟨e, k⟩ = π1(t). Check whether +the following conditions are satisfied: +(∀n ≤ ⟨e, k + 1⟩) ϕe(n)[t] ↓ +and +(∀n < ⟨e, k + 1⟩) ϕe(n) < ϕe(n + 1) +and +t ≥ ϕe(⟨e, k + 1⟩) +and +⟨e, k⟩ ̸∈ Enum(g)[t]. +If they are, set g(t) := 1 + ⟨e, k⟩, otherwise g(t) := 0. +We come to the verification. It is clear that the function g is com- +putable and an enumeration of some c.e. set A ⊆ N. +We wish to +show that the requirements Re are satisfied for all e ∈ N. +Let us +consider a number e such that ϕe is a total and increasing function +as well as a number n > ⟨e, 0⟩. +There exists a unique k ∈ N with +⟨e, k⟩ < n ≤ ⟨e, k + 1⟩. The function g enumerates the number ⟨e, k⟩ +into A in some uniquely determined stage t, i.e., there exists exactly +one number t with g(t) = 1 + ⟨e, k⟩. Then +⟨e, k⟩ ∈ Enum(g)[t + 1] \ Enum(g)[t]. +Since n ≤ ⟨e, k + 1⟩, we have ϕe(n) ≤ ϕe(⟨e, k + 1⟩) ≤ t, and therefore +⟨e, k⟩ ̸∈ Enum(g)[t] ⊇ Enum(g)[ϕe(n)]. +Thus ⟨e, k⟩ ∈ {0, . . . , n − 1} ∩ A witnesses that Re is satisfied with +ne = ⟨e, 0⟩. +□ + +12 +The following theorem states that any c.e. set C ⊆ N can be split ef- +fectively into two disjoint c.e. sets A and B that are regainingly appro- +ximable. +Theorem 14. Given an enumeration fC : N → N of a set C ⊆ N one +can compute enumerations fA : N → N of a set A ⊆ N and fB : N → N +of a set B ⊆ N such that +(1) C is the disjoint union of A and B, and +(2) there exist infinitely many t with +A ∩ {0, . . . , t − 1} ⊆ Enum(fA)[t] +and infinitely many t with +B ∩ {0, . . . , t − 1} ⊆ Enum(fB)[t]. +In particular, for any c.e. set C ⊆ N there exist two disjoint, regainingly +approximable sets A, B ⊆ N with C = A ∪ B. +Proof. Let an enumeration fC of a set C ⊆ N be given. The algo- +rithm that defines the desired enumerations fA and fB will work in +stages −1, 0, 1, 2, . . . At the same time, we will also define a function +s: N × (N ∪ {−1}) → N and write si[t] for s(i, t). +At stage −1 we define si[−1] for all i ∈ N by si[−1] := i. +At stage t with t ∈ N we proceed as follows: +If fC(t) = 0 (recall that this means that fC does not enumerate any- +thing into C at stage t) then we set fA(t) := 0 and fB(t) := 0. Fur- +thermore, we set si[t] := si[t − 1] for all i ∈ N. +If fC(t) > 0 then the number n := fC(t) − 1 enumerated by fC into C +at stage t will be enumerated either into the set A or into the set B, +as follows. If the number +kt := min{j ∈ N | sj[t − 1] > n} +is even then we set fA(t) := 0 and fB(t) := n + 1 (which means that +n is enumerated into B); if kt is odd then we set fA(t) := n + 1 and +fB(t) := 0 (which means that n is enumerated into A). Furthermore, +we define si[t] for all i ∈ N by +si[t] := +� +si[t − 1] +if i ≤ kt, +si[t − 1] + t +if kt < i. +This ends the description of stage t and of the algorithm; we proceed +with the verification. +Claim 1: +For every t ∈ N ∪ {−1}, the sequence (si[t])i is strictly +increasing. + +13 +Proof: By induction over t. It is clear for t = −1. Let us consider some +t ∈ N and assume that the sequence (si[t − 1])i is strictly increasing. +If fC(t) = 0 then the sequence (si[t])i is identical to the sequence +(si[t − 1])i, hence, it is strictly increasing well. Let us assume that +fC(t) > 0; then kt is defined by induction hypothesis and we observe +that the sequence (si[t])i is strictly increasing: +• For any i < j ≤ kt we have +si[t] = si[t − 1] < sj[t − 1] = sj[t]. +• For any i ≤ kt < j we have +si[t] = si[t − 1] < sj[t − 1] ≤ sj[t − 1] + t = sj[t]. +• For any kt < i < j we have +si[t] = si[t − 1] + t < sj[t − 1] + t = sj[t]. +This proves Claim 1. +By Claim 1, for every t ∈ N with fC(t) > 0, the number kt is well +defined. Now, it is clear that the functions fA and fB defined by the +algorithm are enumerations of two disjoint sets A, B ⊆ N whose union +is the set C. We still need to prove the second condition stated in +Theorem 14. +Claim 2: For every i, the sequence (si[t])t≥−1 is nondecreasing and +eventually constant. +Proof: It is clear that, for every i, the sequence (si[t])t is nondecreasing. +We show by induction over i that the sequence (si[t])t is eventually +constant. For all t ∈ N we have 0 ≤ kt, hence, s0[t] = s0[t − 1] = +s0[−1] = 0. We consider any number i > 0. By induction hypothesis +there exists a number t1 such that, for all j < i and for all t ≥ t1, +sj[t] = sj[t1]. Let t2 be large enough so that t2 > t1 and +C ∩ {0, . . . , si−1[t1] − 1} ⊆ Enum(fC)[t2] +(meaning that fC does not enumerate any number smaller than si−1[t1] +into the set C in any stage t ≥ t2). +Then, for every t ≥ t2 with +fC(t) > 0, we must have i ≤ kt and consequently si[t] = si[t − 1]. By +induction we obtain si[t] = si[t2 − 1], for all t ≥ t2 − 1. Thus, (si[t])t is +eventually constant, and Claim 2 is proven. +Let the sequence (Si)i be defined by Si := limt→∞ si[t]. Due to Claim 1, +(Si)i is strictly increasing. +Claim 3: For every i ∈ N and every t ≥ Si, si[t] = Si. + +14 +Proof: If this were not true then there would be some t > Si with +si[t] ̸= si[t − 1], hence, with Si ≥ si[t] = si[t − 1] + t ≥ t > Si, a +contradiction. +Claim 4: For every even i, A ∩ {0, . . . , Si − 1} ⊆ Enum(fA)[Si]. +Proof: Consider an even number i as well as some n ∈ A \ Enum(fA)[Si]. +It is sufficient to show that n ≥ Si. Let t be the unique number with n ∈ +Enum(fA)[t]\Enum(fA)[t−1]. Then t > Si and n+1 = fA(t) = fC(t). +By construction, the number kt must be odd. Hence kt ̸= i. If kt were +smaller than i then we would obtain si[t] = si[t−1]+t > si[t−1] = Si in +contradiction to Claim 3. We conclude i < kt. This implies si[t−1] ≤ n +by the definition of kt. +As t > Si, using Claim 3 again we obtain +Si = si[t − 1] ≤ n, which proves Claim 4. +Claim 5: For every odd i, B ∩ {0, . . . , Si − 1} ⊆ Enum(fB)[Si]. +Proof: The proof is symmetric to that of Claim 4; it is enough to +interchange the words “even” and “odd” and to replace “A” by “B”. +□ +Corollary 15. There exists a regainingly approximable set A ⊆ N that +is not decidable. +Proof. Let C ⊆ N be a c.e. set that is not decidable. +By Theo- +rem 14 there exist two disjoint regainingly approximable sets A, B with +C = A ∪ B. Not both of them can be decidable. +□ +The set of all regainingly approximable sets is not closed under union, +according to Theorems 13 and 14. The following limited closure prop- +erties do hold, however, and will be useful in the proof of the next +theorem. +Lemma 16. +(1) The union of a regainingly approximable set and a decidable set +is regainingly approximable. +(2) If A is a regainingly approximable set and f : N → N is a com- +putable, nondecreasing function, then the set f(A) := {n ∈ N | +(∃k ∈ A) n = f(k)} is regainingly approximable. +Proof. Let A ⊆ N be a regainingly approximable set. By Lemma 10 +there exists a computable 2n-good enumeration g : N → N of A. +For the first assertion, let B ⊆ N be a decidable set. Then the function +h: N → N defined by h(2n) := g(n) and h(2n + 1) := n + 1 if n ∈ B, +h(2n+1) := 0 if n ̸∈ B, is a computable and (4n−1)-good enumeration +of A ∪ B. + +15 +For the second assertion, let f : N → N be a computable, nondecreasing +function. Then the function h: N → N defined by +h(n) := +� +0 +if g(n) = 0, +1 + f(g(n) − 1) +if g(n) > 0, +for n ∈ N, is a computable enumeration of f(A). +If f is bounded +then the set f(A) is finite, and the function h is trivially an idN-good +enumeration of f(A). Let us assume that f is unbounded. Then the +function r: N → N defined by +r(n) := max{m ∈ N | f(m) ≤ n}, +for n ∈ N, is computable, nondecreasing, and unbounded. We claim +that h is a (2r(n))-good enumeration of f(A). By assumption, the set +B := {n ∈ N | {0, . . . , n − 1} ∩ A ⊆ Enum(g)[2n]} +is infinite. So is the set C := f(B). Let us consider a number n ∈ C +and a number m ∈ B with f(m) = n. Then m ≤ r(n). We obtain +{0, . . . , n − 1} ∩ f(A) += +{0, . . . , f(m) − 1} ∩ f(A) +⊆ +{0, . . . , f(m − 1)} ∩ f(A) += +f({0, . . . , m − 1} ∩ A) +⊆ +f(Enum(g)[2m]) += +Enum(h)[2m] +⊆ +Enum(h)[2r(n)]. +□ +Theorem 17. There exist two regainingly approximable sets A, B ⊆ N +whose intersection A ∩ B is not regainingly approximable. +Proof. For natural numbers a, b and a set D ⊆ N we write (a · D + b) +for the set +(a · D + b) := {n ∈ N | (∃d ∈ D) n = a · d + b} +and (a · D) for the set (a · D + 0). +By Theorem 13 there exists a +c.e. set �C ⊆ N that is not regainingly approximable. By Theorem 14 +there exist two disjoint, regainingly approximable sets �A, �B ⊆ N with +�A ∪ �B = �C. By Lemma 16 the sets +A := (2 · �A) ∪ (2 · N + 1) and B := (2 · �B + 1) ∪ (2 · N) +are regainingly approximable. We claim that their intersection A∩B = +(2 · �A) ∪ (2 · �B + 1) is not regainingly approximable. Let the function +g : N → N be defined by g(n) = ⌊n/2⌋ for all n ∈ N. We observe +�C = g(A ∩ B). Thus, if A ∩ B were a regainingly approximable set, +then so would be �C according to Lemma 16(2), a contradiction. +□ + +16 +To summarize our results, every decidable set is regainingly approxi- +mable but the converse does not hold (by Example 7 and Corollary 15); +every regainingly approximable set is computably enumerable but the +converse does not hold (by Theorem 13); and the set of regainingly +approximable sets is neither closed under union nor closed under inter- +section (by Theorems 13, 14 and 17). +4. Strongly Left-computable Numbers and Regainingly +Approximable Numbers +Lemma 18. For a c.e. set A ⊆ N the following two statements are +equivalent. +(1) The set A is regainingly approximable. +(2) The real number 2−A is regainingly approximable, +Proof. +(2) ⇒ (1): Let A ⊆ N be a c.e. set such that the number 2−A is regain- +ingly approximable. Let f : N → N be an arbitrary computable enu- +meration of A. Then the sequence (an)n defined by an := 2−Enum(f)[n], +for n ∈ N, is a computable nondecreasing sequence of rational numbers +converging to 2−A. By Proposition 5 there exists a computable, increas- +ing function r: N → N such that, for infinitely many n, 2−A − ar(n) < +2−n. We obtain {0, . . . , n−1}∩A ⊆ Enum(f)[r(n)] for infinitely many +n. Hence, A is regainingly r-approximable. +(1) ⇒ (2): Let r: N → N be a computable, nondecreasing, unbounded +function such that A is regainingly r-approximable. Let f : N → N be +a computable r-good enumeration of A. Then by +an := 2−Enum(f)[r(n+1)] +a computable, nondecreasing sequence (an)n of rational numbers con- +verging to 2−A is defined. For infinitely many n we have +{0, . . . , n} ∩ A ⊆ Enum(f)[r(n + 1)], +hence, 2−A − an ≤ 2−(n+1) < 2−n. This shows that 2−A is regainingly +approximable. +□ +Corollary 19. +(1) There exists a strongly left-computable number that is not re- +gainingly approximable. +(2) There exists a strongly left-computable number that is regain- +ingly approximable but not computable. +Proof. The first assertion follows from Theorem 13 and Lemma 18. +The second assertion follows from Corollary 15 and Lemma 18 and + +17 +from the well-known fact that, for any subset A ⊆ N, the number 2−A +is computable if and only if the set A is decidable. +□ +The regainingly approximable numbers are closed downwards with re- +spect to Solovay reducibility. Let ≤S denote Solovay reducibility [5] +between left-computable real numbers. +Proposition 20. Let β be a regainingly approximable number, and let +α be a left-computable number with α ≤S β. Then α is regainingly +approximable as well. +Proof. Let f : {q ∈ Q | q < β} → Q be a computable function and +c ∈ N be a number such that, for all q ∈ {q ∈ Q | q < β}, f(q) < α and +α − f(q) < 2c · (β − q). +By Proposition 4 there exists a computable +and increasing sequence (bn)n of rational numbers converging to β such +that β − bn < 2−n−c for infinitely many n ∈ N. The sequence (an)n +defined by +an := max{f(bi) | 0 ≤ i ≤ n} +is a nondecreasing, computable sequence of rational numbers converg- +ing to α. For the infinitely many n with β − bn < 2−n−c we obtain +α − an ≤ α − f(bn) < 2c · (β − bn) < 2−n. +□ +Corollary 21. Regainingly approximable numbers are not Martin-L¨of +random. +Proof. We give two proofs. First, Kuˇcera and Slaman [3] showed that +every left-computable number is below any Martin-L¨of random left- +computable number with regards to Solovay reducibility. Thus, if a +regainingly approximable number were Martin-L¨of random, then all +left-computable numbers would be regainingly approximable according +to Proposition 20. This contradicts Corollary 19(1). +We also give an alternative direct proof: Let α be regainingly appro- +ximable and let (an)n be a computable, nondecreasing sequence of ra- +tional numbers converging to α such that α − an < 2−n for infinitely +many n ∈ N. For every n ∈ N let Un be the interval (an, an + 2−n). +Then (Un)n is a computable sequence of open intervals with rational +endpoints such that � +n∈N λ(Un) = 2 < ∞ where λ is the Lebesgue +measure on R. Since α ∈ Un for infinitely many n, (Un)n is a Solovay +test witnessing that α is not Martin-L¨of random. +□ +Corollary 22. +(1) If the sum of two left-computable real numbers is regainingly +approximable, then both of them are regainingly approximable. + +18 +(2) The sum of a regainingly approximable number and a com- +putable number is again a regainingly approximable number. +Proof. The first assertion follows from Proposition 20 and from the fact +that for any two left-computable numbers α, β one has α ≤S α+β and +β ≤S α + β. Since adding a computable number to a left-computable +number does not change its Solovay degree, the second assertion follows +from Proposition 20 as well. +□ +Corollary 23. Every strongly left-computable number can be written +as the sum of two strongly left-computable numbers that are regainingly +approximable. +Proof. By Theorem 14 and Lemma 18. +□ +Corollary 24. There exist two strongly left-computable and regainingly +approximable numbers whose sum is not regainingly approximable. +Proof. According to Corollary 19(1), there exists a strongly left-computable +number γ that is not regainingly approximable. According to Corol- +lary 23, there exist two strongly left-computable and regainingly appro- +ximable numbers α, β with α + β = γ. They witness the truth of the +assertion. +□ +Corollary 23 raises the question whether every left-computable number +can be written as the sum of two regainingly approximable numbers. +The answer is no. This follows from Proposition 21, from the fact that +there exist Martin-L¨of random left-computable numbers, and from the +result of Downey, Hirschfeldt, Nies [2, Corollary 3.6] that the sum of +two left-computable numbers that are not Martin-L¨of random is again +not Martin-L¨of random. +References +[1] R. G. Downey and D. R. Hirschfeldt. Algorithmic randomness and complexity. +Theory and Applications of Computability. Springer, New York, 2010. +[2] R. G. Downey, D. R. Hirschfeldt, and A. Nies. Randomness, computability, and +density. SIAM J. Comput., 31(4):1169–1183, 2002. +[3] A. Kuˇcera and T. A. Slaman. Randomness and recursive enumerability. SIAM +J. Comput., 31(1):199–211, 2001. +[4] A. Nies. Computability and randomness, volume 51 of Oxford Logic Guides. +Oxford University Press, Oxford, 2009. +[5] R. M. Solovay. Draft of a paper (or series of papers) on Chaitin’s work. Unpub- +lished notes, 1975. +[6] K. Weihrauch. Computable analysis. Texts in Theoretical Computer Science. +An EATCS Series. Springer-Verlag, Berlin, 2000. An introduction. + diff --git a/2NE1T4oBgHgl3EQflgRB/content/tmp_files/load_file.txt b/2NE1T4oBgHgl3EQflgRB/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f75bdeec692eea92f938a16a4d083fed646d27b --- /dev/null +++ b/2NE1T4oBgHgl3EQflgRB/content/tmp_files/load_file.txt @@ -0,0 +1,591 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf,len=590 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='03285v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='LO] 9 Jan 2023 REGAININGLY APPROXIMABLE NUMBERS AND SETS PETER HERTLING, RUPERT H¨OLZL AND PHILIP JANICKI Fakult¨at f¨ur Informatik, Universit¨at der Bundeswehr M¨unchen, 85577 Neubiberg, Germany Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We call a real number α regainingly approximable if there exists a computable nondecreasing sequence (an)n of rational numbers converging to α such that α−an < 2−n for infinitely many n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We also call a c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' set A ⊆ N regainingly approximable if the strongly left-computable number 2−A is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We characterize this property directly in terms of enumerations of A and show that there exists a c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' set A ⊆ N that is not regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Our main result is a splitting theorem: any c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' set C ⊆ N can be split effectively into two disjoint c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' sets A and B that are regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' These results imply that the set of regainingly approximable numbers lies properly between the set of computable numbers and the set of left-computable numbers and that it is not closed under addition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Keywords: left-computable numbers;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' effective approximation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' computably enumerable sets;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' splitting;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Solovay reducibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' AMS classification: 03D78, 03D25, 03D30 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Introduction We call a sequence (an)n of real numbers increasing if, for all n ∈ N, an < an+1, and nondecreasing if, for all n ∈ N, an ≤ an+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' A real number is called left-computable if there exists a computable nonde- creasing sequence of rational numbers converging to it;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' in [1, 4] these real numbers are called left-c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='. A real number α is called computable if there exists a computable sequence (an)n of rational numbers satisfying |α−an| < 2−n, for all n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' It is easy to see that any computable real number is left-computable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Computable and left-computable numbers E-mail address: peter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='hertling@unibw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='de, rupert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='hoelzl@unibw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='de, philip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='janicki@unibw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='de.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Date: November 18, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' 1 2 are important both in computable analysis [6] and in the theory of al- gorithmic randomness [1, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In this article, we study real numbers that are limits of computable, nondecreasing, converging sequences (an)n of rational numbers satisfying the condition |α−an| < 2−n not necessarily for all n ∈ N but for infinitely many n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We call a real number α regainingly approximable if there exists a computable nondecreasing sequence of rational num- bers (an)n converging to α such that we have α − an < 2−n for infinitely many n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Fact 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) Every computable number is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) Every regainingly approximable number is left-computable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) Let α be a computable number, and let (an)n be a computable sequence of rational numbers satisfying |α − an| < 2−n, for all n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then the sequence (bn)n of rational numbers defined by bn := an+3 − 2−(n+1) is computable and increasing, converges to α as well, and satisfies, for all n ∈ N, α − bn < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Hence, α is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) This is clear from the definitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' □ In Section 2 we begin by showing that Definition 1 is robust under several slight modifications, where the equivalences are effectively uni- form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In Section 3 we apply the idea of regaining approximability to c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' sets of natural numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In fact, most of our results concerning regainingly approximable numbers involve strongly left-computable real numbers and can be expressed more naturally directly in terms of sets A ⊆ N of natural numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' A real number x ∈ [0, 1] is called strongly left- computable if there exists a computably enumerable set A ⊆ N with x = 2−A := � a∈A 2−(a+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We define different variations of regaining approximability for c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' sets, and will again see that they coincide.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' However, in contrast to the sit- uation for regainingly approximable numbers, not all arguments are fully effectively uniform in this setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Next we prove that there is a c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' set that is not regainingly approximable and we prove a split- ting result, namely that there is an effectively uniform procedure that splits every c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' set C ⊆ N into two disjoint regainingly approximable sets A, B ⊆ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Note that this implies that there exists a regainingly 3 approximable set A ⊆ N that is not decidable, and that the union and intersection of two regainingly approximable sets need not be regain- ingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In Section 4 we again turn to regainingly approximable numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We observe that a set A ⊆ N is regainingly approximable if and only if the strongly left-computable number 2−A is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then we show that the set of regainingly approximable numbers is closed downwards under Solovay reduction and that regainingly appro- ximable numbers are not Martin-L¨of random.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Finally, we observe that the results from the previous section imply that there exists a strongly left-computable number that is not regainingly approximable;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' that there exists a strongly left-computable and regainingly approxi- mable number that is not computable;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' and that every strongly left- computable number can be written as the sum of two strongly left- computable numbers that are regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We conclude that the set of regainingly approximable numbers is not closed under addition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Robustness In this section, we first show that slight changes to the definition of re- gainingly approximable numbers do not lead to a different notion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The following lemma will be useful;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' note that no computability assumptions are made.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let (an)n be a nondecreasing sequence of real numbers con- verging to some real number α such that, for infinitely many n ∈ N, α − an < 2−n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then, for every unbounded function f : N → N there exist infinitely many m with α − af(m+1) < 2−f(m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By assumption, the set A := {n ∈ N | α − an < 2−n and f(0) ≤ n} is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We define a function g : A → N by g(n) := min{m ∈ N | n < f(m + 1)}, for n ∈ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The function g is well-defined because the function f is unbounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For every n ∈ A we have f(g(n)) ≤ n < f(g(n) + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The set g(A) := {g(n) | n ∈ A} is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us consider a number m ∈ g(A), and let n ∈ A be a number with m = g(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then α − af(m+1) = α − af(g(n)+1) ≤ α − an < 2−n ≤ 2−f(g(n)) = 2−f(m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' □ There are some obvious ways to modify Definition 1 that one could consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' First, instead of computable nondecreasing sequences (an)n 4 of rational numbers converging to the real number α one might consider only computable increasing sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Secondly, one might replace the condition α − an < 2−n by the condition α − an < 2−f(n) where f : N → N is an arbitrary computable, unbounded function of one’s choice;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' or, one might ask for this to hold only for some computable, nondecreasing, unbounded function f : N → N, a seemingly weaker requirement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' However, it will turn out that none of these modifications make any difference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For a real number α ∈ R the following statements are equivalent: (1) α is a regainingly approximable number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) There exists a computable, increasing sequence of rational num- bers (an)n converging to α such that, for infinitely many n ∈ N, α − an < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (3) For every computable, unbounded function f : N → N there ex- ists a computable increasing sequence of rational numbers (an)n converging to α such that, for infinitely many n ∈ N, α − an < 2−f(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (4) There exist a computable, nondecreasing, and unbounded func- tion f : N → N and a computable nondecreasing sequence of ra- tional numbers (an)n converging to α such that, for infinitely many n ∈ N, α − an < 2−f(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Note that this implies that it makes no difference whether we use “<” or “≤” in the definition of regaining approximability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We would also like to point out that all implications in the following proof are uniformly effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) ⇒ (1): Trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (3) ⇒ (2): Trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) ⇒ (3): Let α be a regainingly approximable real number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let (bn)n be a computable nondecreasing sequence of rational numbers converg- ing to α with α − bn < 2−n for infinitely many n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let f : N → N be a computable, unbounded function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then the function g : N → N defined by g(n) := 1 + n + max{f(m) | m ≤ n} is computable, increasing, and satisfies g(n) ≥ f(n) + 1, for all n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In particular, g is unbounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The sequence (an)n of rational numbers defined by an := bg(n+1) − 2−g(n) 5 is computable and increasing and converges to α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By Lemma 3 there exist infinitely many n with α − bg(n+1) < 2−g(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For all of these numbers n we obtain α − an = α − bg(n+1) + 2−g(n) < 2−g(n)+1 ≤ 2−f(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) ⇒ (4): Trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (4) ⇒ (1): Let us assume that f : N → N is a computable, nondecreas- ing, and unbounded function and (bn)n is a computable nondecreasing sequence of rational numbers converging to α such that, for infinitely many n ∈ N, α − bn < 2−f(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The function g : N → N defined by g(0) := max{m ∈ N | f(m) = f(0)} and g(n + 1) := max{m ∈ N | f(m) = f(g(n) + 1)}, for n ∈ N, is computable and increasing and satisfies, for all n ∈ N, f(g(n)) ≥ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Furthermore, for every k ∈ N there exists exactly one n ∈ N with f(k) = f(g(n)), and it satisfies k ≤ g(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The sequence (an)n of rational numbers defined by an := bg(n), for all n ∈ N, is computable and nondecreasing and converges to α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By assumption, the set B := {k ∈ N | α − bk < 2−f(k)} is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Hence, the set A := {n ∈ N | (∃k ∈ B) f(k) = f(g(n))} is infinite as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us consider a number n ∈ A, and let k ∈ B be a number with f(k) = f(g(n)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then k ≤ g(n) and α − an = α − bg(n) ≤ α − bk < 2−f(k) = 2−f(g(n)) ≤ 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' □ As the final result in this section, we show that if a left-computable number α is regainingly approximable then this will be apparent no matter which of its effective approximations we look at.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proposition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let α be a left-computable real number, and let (an)n be a computable, nondecreasing sequence of rational numbers converging to α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then the following conditions are equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) α is a regainingly approximable number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) There exists a computable, increasing function r: N → N such that, for infinitely many n, α − ar(n) < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Note that the proof is effectively uniform in both directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' 6 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) ⇒ (1): Let us assume that there exists a computable, in- creasing function r: N → N such that we have α − ar(n) < 2−n for infinitely many n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then the sequence (bn)n of rational numbers de- fined by bn := ar(n) is computable, nondecreasing, converges to α, and satisfies, for infinitely many n, α − bn < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Hence, α is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) ⇒ (2): Let us assume that α is regainingly approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By Proposition 4 there exists a computable, increasing sequence (bn)n of rational numbers converging to α such that there exist infinitely many n with α − bn < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We define a computable, increasing func- tion r: N → N by r(0) := min{m ∈ N | am ≥ b0}, and r(n + 1) := min{m ∈ N | m > r(n) and am ≥ bn+1}, for n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For all n ∈ N we have ar(n) ≥ bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For the infinitely many n ∈ N with α − bn < 2−n we obtain α − ar(n) ≤ α − bn < 2−n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' □ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Regainingly Approximable Sets of Natural Numbers Let us call a total function f : N → N an enumeration of a set A ⊆ N if the following two conditions are satisfied: (1) A = {n ∈ N | (∃k ∈ N) f(k) = n + 1}, (2) for every n ∈ A there exists exactly one k ∈ N with f(k) = n+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' If f(k) = n+1 then we say that at stage k the function f enumerates the number n into A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Note that here f(k) = 0 encodes that the function f does not enumerate anything into A at stage k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' It is clear that a set A ⊆ N is computably enumerable if and only if there exists a computable enumeration of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' If f : N → N is an enumeration of a subset of N then, for t ∈ N, we write Enum(f)[t] := {n ∈ N | (∃k ∈ N)(k < t and f(k) = n + 1)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Definition 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let r: N → N be a nondecreasing, unbounded function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) We call an enumeration f : N → N of a set A ⊆ N r-good if there exist infinitely many n such that {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(f)[r(n)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) We call a set A ⊆ N regainingly r-approximable if there exists a computable enumeration f : N → N of A that is r-good.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Example 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let A ⊆ N be a decidable set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then the function f : N → N defined by f(n) := n + 1 if n ∈ A, f(n) := 0 if n ̸∈ A, is a computable and idN-good enumeration of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Hence, A is regain- ingly idN-approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' 7 Definition 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We call a set A ⊆ N regainingly approximable if there exists a computable, nondecreasing, unbounded function r: N → N such that A is regainingly r-approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The following theorem says that in this definition one can replace the function r by the identity idN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Theorem 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For a set A ⊆ N the following two conditions are equiv- alent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (1) There exists a computable, nondecreasing, unbounded function r: N → N such that A is regainingly r-approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' (2) A is regainingly idN-approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The proof of this theorem is not fully effectively uniform, as it contains a noneffective case distinction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Therefore, we first formulate a partial result that does have a uniformly effective proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' It implies, for exam- ple, that from an r-good enumeration of a set A, where r: N → N is an arbitrary computable, nondecreasing, unbounded function, one can effectively switch to a 2n-good enumeration of the same set A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Lemma 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Given two nondecreasing, unbounded functions r, s: N → N and an r-good enumeration f : N → N of a set A, one can compute an (idN +s)-good enumeration g : N → N of the same set A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In particular, if r, s: N → N are computable, nondecreasing, unbounded functions and a set A ⊆ N is regainingly r-approximable then it is regainingly (idN + s)-approximable as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let r, s: N → N be two nondecreasing, unbounded functions, and let f : N → N be an r-good enumeration of a set A ⊆ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The function p: N → N defined by p(n) := min{m ∈ N | s(m) > n}, for all n ∈ N, is nondecreasing and can be computed from s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' It satisfies, for all n ∈ N, p(s(n)) = min{m ∈ N | s(m) > s(n)} > n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We define a function g : N → N recursively as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For t ∈ N let M[t] := Enum(f)[r(p(t))] \\ Enum(g)[t] and g(t) := � 1 + min(M[t]) if M[t] ̸= ∅, 0 if M[t] = ∅, 8 The function g is an enumeration of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' It is clear that it can be computed from r, s, and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By assumption, there are infinitely many n such that {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(f)[r(n)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us consider such a number n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We claim that {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(g)[n + s(n)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' To see this, first note that p(s(n)) > n implies {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(f)[r(n)] ⊆ Enum(f)[r(p(s(n)))].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' These are at most n numbers, and those among them which have not yet been enumerated by g in stages strictly before stage s(n) are the smallest elements of M[s(n)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Thus, because no further number smaller than n can enter M[t] for any t > s(n), they will be enumerated by g in one of the n stages s(n), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , s(n) + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Consequently, they are elements of Enum(g)[n + s(n)], as was to be shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' □ Proof of Theorem 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We prove the nontrivial direction “1 ⇒ 2”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us assume that r: N → N is a computable, nondecreasing, unbounded function such that A is regainingly r-approximable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let f : N → N be a computable r-good enumeration of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' The function s: N → N de- fined by s(n) := ⌊n/2⌋, for n ∈ N, is computable, nondecreasing, and unbounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' By applying Lemma 10 we obtain a computable (idN + s)- good enumeration g : N → N of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' So, g is ⌊3n/2⌋-good.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We distin- guish two cases for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' First case: For almost all n ∈ N, |{0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A| ≤ ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In this case, we proceed similarly as in the proof of Lemma 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We define a function h: N → N recursively as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For t ∈ N let M[t] := Enum(g)[3t] \\ Enum(h)[t] and h(t) := � 1 + min(M[t]) if M[t] ̸= ∅, 0 if M[t] = ∅, The function h is a computable enumeration of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let N ∈ N be a number such that, for all n ≥ N, |{0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A| ≤ ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' There are infinitely many n ≥ N with {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(g)[⌊3n/2⌋].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us consider such a number n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' We claim that {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A ⊆ Enum(h)[n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' 9 Indeed, there are at most ⌊n/2⌋ numbers in {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A, and all these numbers are elements of the set Enum(g)[⌊3n/2⌋] ⊆ Enum(g)[3 · ⌈n/2⌉].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Furthermore, all those among these at most ⌊n/2⌋ numbers, that have not yet been enumerated by h in stages strictly before stage ⌈n/2⌉, are the smallest elements of M[⌈n/2⌉] and will be enumerated by h in one of the ⌊n/2⌋ stages ⌈n/2⌉, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n−1 (because no further number smaller than n can enter M[t] for any t > ⌈n/2⌉).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Thus, they are elements of Enum(h)[n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' That was to be shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Second case: There exist infinitely many n ∈ N with |{0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ A| > ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' In this case we define two increasing and computable sequences (ni)i and (ti)i of natural numbers as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' First we compute the smallest natural number t0 such that there exists a natural number n > 0 with ⌊3n/2⌋ ≤ t0 and |{0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ Enum(g)[t0]| > ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then we let n0 be the smallest number n > 0 with this property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Let us consider i > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Once ni−1 and ti−1 have been determined, we compute the smallest natural number ti > ti−1 such that there exists a natural number n with 2ni−1 ≤ n and ⌊3n/2⌋ ≤ ti and |{0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , n − 1} ∩ Enum(g)[ti]| > ⌊n/2⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Then we let ni be the smallest number n with this property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' Next we recursively define a function h: N → N which will be an enumeration of the infinite set A with h(t) ̸= 0 for all t ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' For any i ∈ N, let mi be the number of elements of the following set Mi := {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2NE1T4oBgHgl3EQflgRB/content/2301.03285v1.pdf'} +page_content=' , ni − 1} ∩ Enum(g)[ti] \\ Enum(h) �� j 0. Here, the Bessel function Jν(z) and the Whittaker function of the +first kind Mκ,µ(z) are defined by [1]: +Jν(z) = +∞ +� +n=0 +(−1)n +Γ(ν + n + 1) n! +�z +2 +�ν+2n +, +(1.2) +and +Mκ,µ(z) = e−z/2 zµ+1/2 +∞ +� +n=0 +(µ − κ + 1/2)n +(2µ + 1)n n! +zn . +(1.3) +It turns out that the identity (1.1) implies the following summation formula: +M(2a; 2b; z) = +∞ +� +n=0 +(a)n(b)n(b − a)n +(b)2n(2b)2n n! +(−z2)n M(a + n; b + 2n; z) , +(1.4) +where we use the standard definition of Kummer’s confluent hypergeometric function M(a; b; z): +M(a; b; z) = +∞ +� +n=0 +(a)n +(b)n n! zn . +(1.5) +– 1 – + +By specifying indices of (1.4) in a particular manner, one finds the following summation +formula for Bessel-J: +J2ν+1/2(z) = +Γ(ν + 1) +Γ(2ν + 3/2) +∞ +� +n=0 +(ν + 1/2)n +(2ν + 3/2)n n! +�z +2 +�ν+1/2+n +Jν+n(z) . +(1.6) +To the best of our effort, we could not find either of these three identities (1.4), (1.1) and +(1.6) anywhere in literature. +2 +Integral representation of the Whittaker function of the first kind +We here show an integral representation of the Whittaker function of the first kind (1.1). +Recall that the Whittaker function of the first kind is a solution to the Whittaker differential +equation +d2y +dz2 + +� +−1 +4 + κ +z + 1/4 − µ2 +z2 +� +y = 0 , +(2.1) +subject to the boundary condition; +Mκ,µ(z) −−−→ +z→0 zµ+1/2 . +(2.2) +All we need to do is to check that the RHS of (1.1) satisfies the differential equation (2.1) +and the boundary condition (2.2). +Boundary condition. +In taking z → 0 limit, the RHS of (1.1) becomes: +(RHS of (1.1)) +−−−→ +z→0 +√π zµ+1/2 Γ(2µ + 1) +22µ Γ +� +µ+κ+1/2 +2 +� +Γ +� +µ−κ+1/2 +2 +� +Γ(ν + 1) +× +� 1 +0 +dξ ξ +−κ+µ+1/2 +2 +−1(1 − ξ) +κ+µ+1/2 +2 +−1 += zµ+1/2 . +(2.3) +We have used the series expansion of the Bessel function (1.2) in the second line. In going to +the last line, we used the following two Gamma function identities; +� 1 +0 +dt tx−1(1 − t)y−1 = Γ(x) Γ(y) +Γ(x + y) , +(2.4) +Γ(2z) = 22z−1 Γ(z) Γ(z + 1/2) +√π +. +(2.5) +– 2 – + +Differential equation. +Let us check that the left of the RHS of (1.1) satisfies the Whittaker +differential equation (2.1). After some manipulations, we see +� d2 +dz2 + +� +−1 +4 + κ +z + 1/4 − µ2 +z2 +�� +(RHS of (1.1)) +∝ +� 1 +0 +dξ ξ +−κ+1/2 +2 +−1(1 − ξ) +κ+1/2 +2 +−1 e(ξ−1/2)z +× +� d2 +dz2 + 1 +z +d +dz + ξ(1 − ξ) − µ2 +z2 + (2ξ − 1) d +dz − 2ξ(1 − ξ) + κ + ξ − 1/2 +z +� +Jµ +�� +ξ(1 − ξ)z +� +. +(2.6) +Notice that the first four terms in the last line add up to zero thanks to the Bessel differential +equation: +� d2 +dz2 + 1 +z +d +dz + 1 − ν2 +z2 +� +Jν(z) = 0 . +(2.7) +We now focus on the fifth term in (2.6). Using the identity that follows from (1.2) +(2ξ − 1) d +dz Jµ +�� +ξ(1 − ξ)z +� += −2ξ(1 − ξ) +z +d +dξ Jµ +�� +ξ(1 − ξ)z +� +, +(2.8) +we perform integration by parts with respect to ξ: +(The fifth term in (2.6)) += −2 +z +� 1 +0 +dξ ξ +−κ+1/2 +2 +−1+1(1 − ξ) +κ+1/2 +2 +−1+1 e(ξ−1/2)z +� d +dξ Jµ +�� +ξ(1 − ξ)z +�� += 2 +z +� 1 +0 +dξ Jµ +�� +ξ(1 − ξ)z +� d +dξ +� +ξ +−κ+1/2 +2 +(1 − ξ) +κ+1/2 +2 +e(ξ−1/2)z� += +� 1 +0 +dξ ξ +−κ+1/2 +2 +−1(1 − ξ) +κ+1/2 +2 +−1 e(ξ−1/2)z +� +2ξ(1 − ξ) − κ + ξ − 1/2 +z +� +Jµ +�� +ξ(1 − ξ)z +� +. +(2.9) +Here, we assumed Re (µ±κ+1/2) > 0 to drop off the surface terms. Plugging this into (2.6), +one can readily see that all terms cancel out and conclude that the RHS of (1.1) satisfies the +Whittaker differential equation (2.1). +3 +Summation formula for confluent hypergeomertric functions +We then derive the summation formula for confluent hypergeometric functions (1.3). +Assuming Re (µ ± κ + 1/2), Re (µ) > 0, we start by applying the Mellin-Barnes-type +representation of Bessel-J (3.1) to (1.1) [2], +Jν(z) = +� i ∞ +−i ∞ +dt +2πi +Γ(−t) +Γ(ν + t + 1) +�z +2 +�ν+2t +Re (ν) > 0 . +(3.1) +– 3 – + +After the change of the order of integration, we find: +(Second line of (1.1)) += √z e−z/2 +� i ∞ +−i ∞ +dt +2πi +Γ(−t) +Γ(µ + t + 1) +�z +2 +�µ+2t � 1 +0 +dξ ξ +µ−κ+1/2 +2 ++t−1(1 − ξ) +µ+κ+1/2 +2 ++t−1 ezξ . +(3.2) +Notice that the ξ-integral is nothing but the integral representation of the confluent hyper- +geometric equation: +M(a; b; z) = +Γ(b) +Γ(a)Γ(b − a) +� 1 +0 +dt ezt ta−1(1 − t)b−a−1 . +(3.3) +Using (2.5), we have: +(Second line of (1.1)) += 2µ zµ+1/2 +√π +e−z/2 +� i ∞ +−i ∞ +dt +2πi +Γ +� +µ−κ+1/2 +2 ++ t +� +Γ +� +µ+κ+1/2 +2 ++ t +� +Γ(µ + 1/2 + t) Γ(−t) +Γ(2µ + 1 + 2t)Γ(µ + 1/2 + 2t) +× z2t M +�µ − κ + 1/2 +2 ++ t, µ + 1 +2 + 2t, z +� +. +(3.4) +Deforming the integration contour to the right and picking up the residues coming from +Γ(−t), we see: +(Second line of (1.1)) = +2µ Γ +� +µ−κ+1/2 +2 +� +Γ +� +µ+κ+1/2 +2 +� +√π Γ(2µ + 1) +zµ+1/2 e−z/2 +× +∞ +� +n=0 +� +µ−κ+1/2 +2 +� +n +� +µ+κ+1/2 +2 +� +n (µ + 1/2)n +(µ + 1/2)2n(2µ + 1)2n n! +(−z2)n M +�µ − κ + 1/2 +2 ++ n, µ + 1 +2 + 2n, z +� +. +(3.5) +Substituting this for (1.1), we arrive at (1.4) for Re (a), Re (b − a), Re (b + 1/2) > 0. +We can verify that the identity (1.4) holds for any a, b ∈ C, by expanding in powers of z +comparing both sides order by order using the following formula: +(2a)k +(2b)k += +k! +(b)k +⌊k/2⌋ +� +n=0 +(a)k−n (b)n (b − a)n +(2b)2n n! (k − 2n)! (−1)n +k ∈ Z≥0 . +(3.6) +where ⌊x⌋ is the floor function that returns the largest integer less than or equal to x. This +formula (3.6) can be proven as follows. Firstly, short calculation leads:1 +(RHS of (3.6)) = (a)k +(b)k +3F2 +� +b − a, − k−1 +2 , − k +2 +b + 1 +2, 1 − a − k ; 1 +� +, +(3.7) +1Use Euler reflection formula Γ(z)Γ(1 − z) = π/ sin πz and Legendre duplication formula (2.5). +– 4 – + +with 3F2 being a generalized hypergeometric function defined by: +3F2 +�a, b, c +d, e ; z +� += +∞ +� +n=0 +(a)n(b)n(c)n +(d)n(e)n n! zn . +(3.8) +With the help of Saalsch¨utz’s theorem that asserts [3, equation (1), chapter II]: +3F2 +� +a, b, −n +c, 1 + a + b − c − n; 1 +� += (c − a)n(c − b)n +(c)n(c − a − b)n +n ∈ Z≥0 , +(3.9) +we can verify that (3.6) is an identity. +4 +Summation formula for Bessel functions +Lastly, we show the summation formula for Bessel functions (1.6). It follows from the relation +between Kummer’s confluent hypergeometric functions and Bessel functions: +Jν(z) = +e∓iz +Γ(ν + 1) +�z +2 +�ν +M(ν + 1/2; 2ν + 1, ±2iz) , +(4.1) +we have: +(LHS of (1.6)) = +e∓iz +Γ(2ν + 3/2) +�z +2 +�2ν+1/2 +M(2ν + 1; 4ν + 2; ±2iz) . +(4.2) +Plugging the summation formula (1.4) into this and using (4.1) again, one finds: +(LHS of (1.6)) += +e∓iz +Γ(2ν + 3/2) +�z +2 +�2ν+1/2 +∞ +� +n=0 +[(ν + 1/2)n]2(2ν + 1)n +(2ν + 1)2n(4ν + 2)2n n! +× (2z)2n M(ν + 1/2 + n; 2ν + 1 + 2n; ±2iz) += +Γ(ν + 1) +Γ(2ν + 3/2) +∞ +� +n=0 +24n [(ν + 1/2)n]2(ν + 1)n(2ν + 1)n +(2ν + 1)2n(4ν + 2)2n n! +�z +2 +�ν+1/2+n +Jν+n(z) += (RHS of (1.6)) . +(4.3) +We need to use (2.5) several times to reach the last line. +When ν = 0, the LHS of (1.6) reduces to a triogeometric function divided by √z: +J1/2(z) = +� +2 +πz sin z , +(4.4) +whereas the RHS remains the summation of Bessel-J’s. This reproduces the known expansion +of sin in terms of Bessel functions found in [4, equation (9.4.2.19)]: +sin z +z += +∞ +� +n=0 +1 +(2n + 1) n! +�z +2 +�n +Jn(z) . +(4.5) +– 5 – + +Acknowledgments +We are grateful to T. Nishioka for the valuable discussions. The work of Y. O. was supported +by Forefront Physics and Mathematics Program to Drive Transformation (FoPM), a World- +leading Innovative Graduate Study (WINGS) Program, the University of Tokyo. The work +of Y. O. was also supported by JSPS fellowship for young students No. 21J20750, MEXT, and +by JSR fellowship, the University of Tokyo. +References +[1] E. Whittaker and G. Watson, A Course of Modern Analysis, A Course of Modern Analysis: An +Introduction to the General Theory of Infinite Processes and of Analytic Functions, with an +Account of the Principal Transcendental Functions. Cambridge University Press, 1996. +[2] G. Watson, A Treatise on the Theory of Bessel Functions, Cambridge Mathematical Library. +Cambridge University Press, 1995. +[3] W. Bailey, Generalized Hypergeometric Series, Cambridge tracts in mathematics and +mathematical physics. The University Press, 1935. +[4] Y. L. Luke, The Special Functions and their Approximations. Vol. 2. Academic Press, New York, +1969. +– 6 – + diff --git a/2tE3T4oBgHgl3EQfoAol/content/tmp_files/load_file.txt b/2tE3T4oBgHgl3EQfoAol/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c2b24fdf63e95a3da20a4d4367e4204e5d915a8 --- /dev/null +++ b/2tE3T4oBgHgl3EQfoAol/content/tmp_files/load_file.txt @@ -0,0 +1,179 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf,len=178 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='04629v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='CA] 11 Jan 2023 Prepared for submission to JHEP OU-HET-1167 On some identities for confluent hypergeometric functions and Bessel functions Yoshitaka Okuyamaa,b aDepartment of Physics, Osaka University, Machikaneyama-Cho 1-1, Toyonaka 560-0043, Japan bDepartment of Physics, Faculty of Science, The University of Tokyo, Bunkyo-Ku, Tokyo 113-0033, Japan Abstract: We find a new integral representation of the Whittaker function of the first kind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' We also show relevant summation formulas for Kummer’s confluent hypergeometric functions and Bessel functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Contents 1 Introduction and summary 1 2 Integral representation of the Whittaker function of the first kind 2 3 Summation formula for confluent hypergeomertric functions 3 4 Summation formula for Bessel functions 5 1 Introduction and summary This paper concerns several identities related to confluent hypergeometric functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' We first show an integral representation of the Whittaker function: Mκ,µ(z) = √π Γ(2µ + 1) 2µ Γ � µ+κ+1/2 2 � Γ � µ−κ+1/2 2 � × √z � 1 0 dξ ξ −κ+1/2 2 −1(1 − ξ) κ+1/2 2 −1 e(ξ−1/2)z Jµ �� ξ(1 − ξ)z � , (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) for Re (µ ± κ + 1/2) > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Here, the Bessel function Jν(z) and the Whittaker function of the first kind Mκ,µ(z) are defined by [1]: Jν(z) = ∞ � n=0 (−1)n Γ(ν + n + 1) n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' �z 2 �ν+2n , (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) and Mκ,µ(z) = e−z/2 zµ+1/2 ∞ � n=0 (µ − κ + 1/2)n (2µ + 1)n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' zn .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='3) It turns out that the identity (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) implies the following summation formula: M(2a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 2b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z) = ∞ � n=0 (a)n(b)n(b − a)n (b)2n(2b)2n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (−z2)n M(a + n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' b + 2n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z) , (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) where we use the standard definition of Kummer’s confluent hypergeometric function M(a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z): M(a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z) = ∞ � n=0 (a)n (b)n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' zn .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5) – 1 – By specifying indices of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) in a particular manner, one finds the following summation formula for Bessel-J: J2ν+1/2(z) = Γ(ν + 1) Γ(2ν + 3/2) ∞ � n=0 (ν + 1/2)n (2ν + 3/2)n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' �z 2 �ν+1/2+n Jν+n(z) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) To the best of our effort, we could not find either of these three identities (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) anywhere in literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 2 Integral representation of the Whittaker function of the first kind We here show an integral representation of the Whittaker function of the first kind (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Recall that the Whittaker function of the first kind is a solution to the Whittaker differential equation d2y dz2 + � −1 4 + κ z + 1/4 − µ2 z2 � y = 0 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) subject to the boundary condition;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Mκ,µ(z) −−−→ z→0 zµ+1/2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) All we need to do is to check that the RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) satisfies the differential equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) and the boundary condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' In taking z → 0 limit, the RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) becomes: (RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1)) −−−→ z→0 √π zµ+1/2 Γ(2µ + 1) 22µ Γ � µ+κ+1/2 2 � Γ � µ−κ+1/2 2 � Γ(ν + 1) × � 1 0 dξ ξ −κ+µ+1/2 2 −1(1 − ξ) κ+µ+1/2 2 −1 = zµ+1/2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='3) We have used the series expansion of the Bessel function (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) in the second line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' In going to the last line, we used the following two Gamma function identities;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' � 1 0 dt tx−1(1 − t)y−1 = Γ(x) Γ(y) Γ(x + y) , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) Γ(2z) = 22z−1 Γ(z) Γ(z + 1/2) √π .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5) – 2 – Differential equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Let us check that the left of the RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) satisfies the Whittaker differential equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' After some manipulations, we see � d2 dz2 + � −1 4 + κ z + 1/4 − µ2 z2 �� (RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1)) ∝ � 1 0 dξ ξ −κ+1/2 2 −1(1 − ξ) κ+1/2 2 −1 e(ξ−1/2)z × � d2 dz2 + 1 z d dz + ξ(1 − ξ) − µ2 z2 + (2ξ − 1) d dz − 2ξ(1 − ξ) + κ + ξ − 1/2 z � Jµ �� ξ(1 − ξ)z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) Notice that the first four terms in the last line add up to zero thanks to the Bessel differential equation: � d2 dz2 + 1 z d dz + 1 − ν2 z2 � Jν(z) = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='7) We now focus on the fifth term in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Using the identity that follows from (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) (2ξ − 1) d dz Jµ �� ξ(1 − ξ)z � = −2ξ(1 − ξ) z d dξ Jµ �� ξ(1 − ξ)z � , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='8) we perform integration by parts with respect to ξ: (The fifth term in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6)) = −2 z � 1 0 dξ ξ −κ+1/2 2 −1+1(1 − ξ) κ+1/2 2 −1+1 e(ξ−1/2)z � d dξ Jµ �� ξ(1 − ξ)z �� = 2 z � 1 0 dξ Jµ �� ξ(1 − ξ)z � d dξ � ξ −κ+1/2 2 (1 − ξ) κ+1/2 2 e(ξ−1/2)z� = � 1 0 dξ ξ −κ+1/2 2 −1(1 − ξ) κ+1/2 2 −1 e(ξ−1/2)z � 2ξ(1 − ξ) − κ + ξ − 1/2 z � Jµ �� ξ(1 − ξ)z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='9) Here, we assumed Re (µ±κ+1/2) > 0 to drop off the surface terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Plugging this into (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6), one can readily see that all terms cancel out and conclude that the RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) satisfies the Whittaker differential equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 3 Summation formula for confluent hypergeomertric functions We then derive the summation formula for confluent hypergeometric functions (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Assuming Re (µ ± κ + 1/2), Re (µ) > 0, we start by applying the Mellin-Barnes-type representation of Bessel-J (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) [2], Jν(z) = � i ∞ −i ∞ dt 2πi Γ(−t) Γ(ν + t + 1) �z 2 �ν+2t Re (ν) > 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) – 3 – After the change of the order of integration, we find: (Second line of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1)) = √z e−z/2 � i ∞ −i ∞ dt 2πi Γ(−t) Γ(µ + t + 1) �z 2 �µ+2t � 1 0 dξ ξ µ−κ+1/2 2 +t−1(1 − ξ) µ+κ+1/2 2 +t−1 ezξ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) Notice that the ξ-integral is nothing but the integral representation of the confluent hyper- geometric equation: M(a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z) = Γ(b) Γ(a)Γ(b − a) � 1 0 dt ezt ta−1(1 − t)b−a−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='3) Using (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5), we have: (Second line of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1)) = 2µ zµ+1/2 √π e−z/2 � i ∞ −i ∞ dt 2πi Γ � µ−κ+1/2 2 + t � Γ � µ+κ+1/2 2 + t � Γ(µ + 1/2 + t) Γ(−t) Γ(2µ + 1 + 2t)Γ(µ + 1/2 + 2t) × z2t M �µ − κ + 1/2 2 + t, µ + 1 2 + 2t, z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) Deforming the integration contour to the right and picking up the residues coming from Γ(−t), we see: (Second line of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1)) = 2µ Γ � µ−κ+1/2 2 � Γ � µ+κ+1/2 2 � √π Γ(2µ + 1) zµ+1/2 e−z/2 × ∞ � n=0 � µ−κ+1/2 2 � n � µ+κ+1/2 2 � n (µ + 1/2)n (µ + 1/2)2n(2µ + 1)2n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (−z2)n M �µ − κ + 1/2 2 + n, µ + 1 2 + 2n, z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5) Substituting this for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1), we arrive at (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) for Re (a), Re (b − a), Re (b + 1/2) > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' We can verify that the identity (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) holds for any a, b ∈ C, by expanding in powers of z comparing both sides order by order using the following formula: (2a)k (2b)k = k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (b)k ⌊k/2⌋ � n=0 (a)k−n (b)n (b − a)n (2b)2n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (k − 2n)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (−1)n k ∈ Z≥0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) where ⌊x⌋ is the floor function that returns the largest integer less than or equal to x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' This formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) can be proven as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Firstly, short calculation leads:1 (RHS of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6)) = (a)k (b)k 3F2 � b − a, − k−1 2 , − k 2 b + 1 2, 1 − a − k ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 1 � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='7) 1Use Euler reflection formula Γ(z)Γ(1 − z) = π/ sin πz and Legendre duplication formula (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' – 4 – with 3F2 being a generalized hypergeometric function defined by: 3F2 �a, b, c d, e ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' z � = ∞ � n=0 (a)n(b)n(c)n (d)n(e)n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' zn .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='8) With the help of Saalsch¨utz’s theorem that asserts [3, equation (1), chapter II]: 3F2 � a, b, −n c, 1 + a + b − c − n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 1 � = (c − a)n(c − b)n (c)n(c − a − b)n n ∈ Z≥0 , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='9) we can verify that (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) is an identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 4 Summation formula for Bessel functions Lastly, we show the summation formula for Bessel functions (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' It follows from the relation between Kummer’s confluent hypergeometric functions and Bessel functions: Jν(z) = e∓iz Γ(ν + 1) �z 2 �ν M(ν + 1/2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 2ν + 1, ±2iz) , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) we have: (LHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6)) = e∓iz Γ(2ν + 3/2) �z 2 �2ν+1/2 M(2ν + 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 4ν + 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' ±2iz) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2) Plugging the summation formula (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) into this and using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='1) again, one finds: (LHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6)) = e∓iz Γ(2ν + 3/2) �z 2 �2ν+1/2 ∞ � n=0 [(ν + 1/2)n]2(2ν + 1)n (2ν + 1)2n(4ν + 2)2n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' × (2z)2n M(ν + 1/2 + n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 2ν + 1 + 2n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' ±2iz) = Γ(ν + 1) Γ(2ν + 3/2) ∞ � n=0 24n [(ν + 1/2)n]2(ν + 1)n(2ν + 1)n (2ν + 1)2n(4ν + 2)2n n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' �z 2 �ν+1/2+n Jν+n(z) = (RHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='3) We need to use (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5) several times to reach the last line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' When ν = 0, the LHS of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='6) reduces to a triogeometric function divided by √z: J1/2(z) = � 2 πz sin z , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4) whereas the RHS remains the summation of Bessel-J’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' This reproduces the known expansion of sin in terms of Bessel functions found in [4, equation (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='19)]: sin z z = ∞ � n=0 1 (2n + 1) n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' �z 2 �n Jn(z) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content='5) – 5 – Acknowledgments We are grateful to T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Nishioka for the valuable discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' The work of Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' was supported by Forefront Physics and Mathematics Program to Drive Transformation (FoPM), a World- leading Innovative Graduate Study (WINGS) Program, the University of Tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' The work of Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' was also supported by JSPS fellowship for young students No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 21J20750, MEXT, and by JSR fellowship, the University of Tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' References [1] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Whittaker and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Watson, A Course of Modern Analysis, A Course of Modern Analysis: An Introduction to the General Theory of Infinite Processes and of Analytic Functions, with an Account of the Principal Transcendental Functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Cambridge University Press, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' [2] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Watson, A Treatise on the Theory of Bessel Functions, Cambridge Mathematical Library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Cambridge University Press, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' [3] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Bailey, Generalized Hypergeometric Series, Cambridge tracts in mathematics and mathematical physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' The University Press, 1935.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' [4] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Luke, The Special Functions and their Approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' Academic Press, New York, 1969.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} +page_content=' – 6 –' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE3T4oBgHgl3EQfoAol/content/2301.04629v1.pdf'} diff --git a/39E3T4oBgHgl3EQfogqI/content/tmp_files/2301.04634v1.pdf.txt b/39E3T4oBgHgl3EQfogqI/content/tmp_files/2301.04634v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..c74dd017e5b6630887a963f4820d9cbf5b670f48 --- /dev/null +++ b/39E3T4oBgHgl3EQfogqI/content/tmp_files/2301.04634v1.pdf.txt @@ -0,0 +1,1004 @@ +Street-View Image Generation from a Bird’s-Eye View Layout +Alexander Swerdlow +Runsheng Xu +Bolei Zhou +University of California, Los Angeles +{aswerdlow, rxx3386}@ucla.edu, bolei@cs.ucla.edu +Abstract +Bird’s-Eye View (BEV) Perception has received increas- +ing attention in recent years as it provides a concise and +unified spatial representation across views and benefits a +diverse set of downstream driving applications. While the +focus has been placed on discriminative tasks such as BEV +segmentation, the dual generative task of creating street- +view images from a BEV layout has rarely been explored. +The ability to generate realistic street-view images that +align with a given HD map and traffic layout is critical for +visualizing complex traffic scenarios and developing robust +perception models for autonomous driving. In this paper, +we propose BEVGen, a conditional generative model that +synthesizes a set of realistic and spatially consistent sur- +rounding images that match the BEV layout of a traffic sce- +nario. BEVGen incorporates a novel cross-view transfor- +mation and spatial attention design which learn the rela- +tionship between cameras and map views to ensure their +consistency. Our model can accurately render road and +lane lines, as well as generate traffic scenes under differ- +ent weather conditions and times of day. The code will be +made publicly available. +1. Introduction +BEV perception for autonomous driving is a fast- +growing field, with the goal of learning a cross-view repre- +sentation that transforms information between the perspec- +tive and bird’s-eye view. Such representation can be used +in downstream tasks such as path planning and trajectory +forecasting [1, 55]. The recent successes in BEV percep- +tion, whether for monocular images [9,13,14] or multi-view +images [17,50,56], mostly focus on the discriminative side +of BEV perception where the inputs are street-view images +and the output is a semantic BEV layout. However, the gen- +erative side of BEV perception, which aims at synthesizing +realistic street-view images from a given BEV semantic lay- +out, is rarely explored. A BEV layout concisely describes +a traffic scenario at the semantic level, therefore generating +its corresponding street-view images can help visualize the +BEV Layout +Generated Street-View Images +Figure 1. +The proposed BEVGen generates realistic and spa- +tially consistent street-view images from BEV layout. There are +six camera views surrounding the ego vehicle as indicated by the +green rectangle in the BEV layout. +scene in a more real-world setting. +There are many potential applications for the BEV gen- +eration task. For example, we can create synthetic train- +ing data for BEV segmentation models. Whereas most cur- +rent approaches to synthetic training data involve a complex +simulator or 3D reconstructed meshes, it is simpler to adopt +a controllable generative model for diverse image genera- +tion. Another benefit provided by the BEV generation is +the ease of visualizing and editing traffic scenes. In the case +of self-driving vehicles, we often care about a small set of +rare scenarios where an accident is most likely to happen. +Human users can intuitively edit a BEV layout and then use +a generative model to output the corresponding street-view +images for training or testing a driving system. +The fundamental question for BEV generation is: what +could be a plausible set of street-view images that corre- +spond to this BEV layout? One could think of numerous +scenes with varying vehicle types, backgrounds, and more. +For a set of views to be realistic, we need to consider several +properties of the images. Similar to the problem of novel +view synthesis, images must appear consistent, as if they +were taken in the same physical location. For instance, cam- +eras with an overlapping field-of-view (FoV) should have +1 +arXiv:2301.04634v1 [cs.CV] 11 Jan 2023 + +overlapping content, and objects partially visible in one +frame should appear in a rotated frame. The visual styling +of the scene also needs to be consistent such that all virtual +views appear to be created in the same geographical area +(e.g., urban vs. rural), time of day, with the same weather +conditions, and so on. In addition to image consistency, +the images must correspond to the HD map, faithfully re- +producing the specified road layout, lane lines, and vehicle +locations. Unlike image-to-image translation with a seman- +tic mask, the BEV generation model must infer the image +layout to account for occlusions between objects and the +relative heights of objects in a scene. These two main chal- +lenges, image consistency and correspondence, are critical +to the task but can be difficult to reconcile. If we only desire +image consistency, similar to the case of image outpainting, +the model is free to generate any consistent image. How- +ever, if we also wish to maintain correspondence between +the virtual views and the HD map, portions of the virtual +views are constrained to represent certain elements (e.g., +vehicles). On the other hand, if we only care about image +correspondence, the model only needs the context of part +of the HD map in its FoV and does not need to account for +previously generated images or issues such as wraparound. +In this work, we tackle the new task of generating street- +view images from a BEV layout and propose a generative +model called BEVGen to address the underlying challenges. +We develop an autoregressive model called BEVGen that +generates a set of n realistic and spatially consistent images. +Fig. 1 shows generation examples. BEVGen has two tech- +nical novelties: (i) it incorporates spatial embeddings using +camera instrinsics and extrinsics to allow the model to at- +tend to relevant portions of the images and HD map, and +(ii) it contains a novel attention bias and decoding scheme +that maintains both image consistency and correspondence. +Thus the model can generate high-quality scenes with spa- +tial awareness and scene consistency across camera views. +Compared to baselines, the proposed model obtains sub- +stantial improvement in terms of image synthesis quality +and semantic consistency. The model can also render real- +istic scene images from out-of-domain BEV maps, such as +those provided by a driving simulator or edited by a user. +We summarize our contributions as follows: +• We tackle the new task of multi-view image generation +from BEV layout. It is the first attempt to explore the +generative side of BEV perception for driving scenes. +• We develop a novel generative model BEVGen that +can synthesize spatially consistent street-view images +by incorporating spatial embeddings and a pairwise +camera bias. +• The model achieves high-quality synthesis results and +shows promise for applications such as data augmen- +tation and 3D simulation rendering. +2. Related Work +Cross-modal Image Generation. +Cross-modal image +generation has seen a lot of attention in recent years with +work on text-to-image models [12, 32–34, 40], speech-to- +image models [5, 20], and image-to-video models [42]. +Others have focused on using more direct representations +to control generation, including generation from semantic +masks [18,26,52], or worked to convert higher-level repre- +sentations such as text [15,30], scene graphs [7,19,44,51], +and bounding boxes [22] into such a semantic mask. There +have also been several attempts at learning spatially disen- +tangled scene representations by composing latent features +that correspond to specific parts of a scene [10, 27]. Our +task is conceptually similar to image generation from a se- +mantic mask but distinct in that our semantic representation +only provides minimal layout constraints, lacking height in- +formation, direct occlusion representation, and background +information. +Image-to-image Generation. +Direct image-to-image +translation has also taken off in recent years with models +such as pix2pix [18] and cycleGAN [57]. Several works +have focused directly on the task of street-view synthe- +sis from satellite views as a subset of the image-to-image +translation problem [35, 41, 43, 54]. These works attempt +to tackle viewpoint transformation, from a top-down to an +ego-centric view, that is implicitly required for our task, but +our task does not benefit from the rich RGB representation +provided by a satellite view. Furthermore, large portions of +our virtual camera views correspond to areas entirely un- +labeled on our BEV map, requiring largely unconditional +generation for these areas. +Image Outpainting. +Spatial consistency is important for +tasks such as image outpainting, where the goal is to gener- +ate or extend an image of the same scene. Early approaches +for image outpainting used auto-regressive approaches on a +pixel-wise level [6,25,45,46]. However, this approach can +be computationally expensive and thus is limited to gen- +erating low-resolution images. Subsequently, GANs were +introduced to the task [16, 23, 39, 49] which do not suffer +from the same computational limitations as pixel-wise au- +toregressive approaches. More recent works have utilized a +Vector Quantised-Variational Autoencoder (VQ-VAE) [47] +to great success [2,4]. Similar to image outpainting, our task +requires generated images to appear coherent in weather +and location; however, we also seek to generate distinct, +partially overlapping camera views and require that portions +of these views are conditionally generated from a BEV lay- +out. +Novel View Synthesis. +The same underlying VQ-VAE +architecture has been used for the single-view novel view +synthesis (NVS) task where the goal is to generate new vir- +tual camera view given a source image. By conditioning +2 + +an autoregressive transformer with camera translation and +rotation, [38] showed that a transformer-based model can +learn the 3D relationship between images without explicit +depth maps or warping as used in prior attempts for single- +view NVS such as in [37, 48]. To improve the consistency +between frames, [36] suggests a camera-aware bias for self- +attention that encodes the similarity between consecutive +image frames. Our task requires a similar 3D understand- +ing between different viewpoints as in NVS, but lacks the +conditioning information provided by a source view(s) and +requires consistency not only between frames but also with +an HD map. If we broaden our task to allow for a source +view, as we demonstrate in Fig. 6, our task can be thought +of as a conditional NVS task. +3. Method +In this section, we introduce the framework of the pro- +posed BEVGen. We have a semantic layout in Birds-Eye +View (BEV), B ∈ RHb×Hb×cb with the ego at the cen- +ter and cb channels describing the locations of vehicles, +roads, lane lines, and more (see Sec. 4.1). Given a set of +n virtual camera views to generate, (Kk, Rk, tk)n +k=1, where +Kk, Rk, tk are the intrinsics, extrinsic rotation, and trans- +lation of the kth camera, we generate n images, Ik ∈ +RHc×Wc×3. +Fig. 2 illustrates the framework of the proposed BEV- +Gen. BEVGen consists of two autoencoders modeled by +VQ-VAE, one for images and one for the BEV representa- +tion, that allow the causal transformer to model scenes at +a high level. The key novelty lies in how the transformer +can relate information between modalities and across differ- +ent views. The cross-view transformation encodes a cross- +modal inductive 3D bias, allowing the model to attend to +relevant portions of the HD map and nearby image tokens. +We explain each part in more detail below. +3.1. Model Structure +Image Encoder. +To generate a globally coherent image, +we model our distribution in a discrete latent space instead +of pixel-space. We use the VQ-VAE model introduced by +Oord et al. [47] as an alternative generative architecture to +GANs1. Additionally, we incorporate a perceptual and a +patch-wise adversarial loss as in [11]. The VQ-VAE archi- +tecture consists of an encoder Ecam, a decoder Dcam, and a +codebook Zc = {zm}Mc +m=1 ⊂ Rnc where Mc is the number +of code vectors and nc is the embedding dimension of each +code. Given a source image, xk ∈ RHc×Wc×3 we encode +ˆzk = E(xk) ∈ Rhc×wc×nc. To obtain a discrete, tokenized +representation, we find the nearest codebook vector for each +1Note that switching to the recently developed class of diffusion mod- +els can potentially improve the image synthesis quality, but such models +require an order of magnitude of additional data and computational re- +sources for training and thus we leave it for future works. +feature vector ˆzk,ij ∈ Rnc where i, j are the row, column in- +dices in the discrete latent representation with size hc × wc: +zk,ij = arg min +m +∥ˆzk,ij − zm∥ ∈ Rhc×wc×nc. +(1) +This creates a set of tokens zk ∈ Nhc×wc that we refer to +as our image tokens. To generate an image from a set of +tokens, we decode ˜zk ∈ Rhc×wc×nc with a convolutional +decoder, Dcam(˜zk) ∈ RHc×Wc×3 using the same architec- +ture as [11]. +BEV Encoder. +To condition our model on a BEV lay- +out, we use the same discrete representation as for cam- +era images, except we replace the perceptual and adver- +sarial losses with a binary cross entropy loss for binary +channels and an L2 loss for continuous channels. We en- +code our BEV map b as before with Ebev(b) ∈ Rhb×wb×nb +and Zb = {zm}Mb +m=1 ⊂ Rnb to obtain a set of tokens, +zbev ∈ Nhb×wb. We discard the decoder stage, Dbev, after +training the 1st stage as it is not needed for our transformer +model or inference. +Autoregressive Modeling. +Given a BEV layout and k +sets of camera parameters, we seek to generate k images by +learning the prior distribution of a set of discrete tokens, z +conditioned on zbev, K, R, t. +p(z|zbev, K, R, t) = +h×w×k +� +i=0 +p(zi|z hbwb, both positions correspond to image tokens and +thus we have two direction vectors, dr, dc, computed as in +Eq. (3). As discussed in Sec. 3.1, we have a mapping be- +tween the sequence index and image token (i, j) in camera +k. If r > hbwb > c, we have a query for some image token +and a key/value pair corresponding to BEV token. Thus, we +again construct two direction vectors. In this case our BEV +direction vector consists of the 2D World coordinates (in +the ego-center frame) and our image direction vector is the +same as in Eq. (3) except with the row value as the center +of the image. Given these two direction vectors, dr, dc, we +add the cosine similarity and a learnable parameter, θrc, as +shown in Fig. 3: +βrc = +dr · dc +∥dr∥∥dc∥ + θrc. +(8) +3.4. Random Masking +A key problem that arises when generating multiple im- +ages in parallel is the quadratic complexity of the self- +attention mechanism. One solution to this issue would be to +limit the sequence length of our transformer by using per- +forming image extrapolation as in [2]. However, this lim- +its the scene context and can cause later images to appear +far different from the first image, despite having local im- +age consistency. Instead, we implement a version of sparse +attention as in [8, 53]. As opposed to a uniform random +attention mask, we instead unmask regions of the image +near the token we attend. Using the same formulation as +in Eq. (8), we create a pairwise similarity matrix for image +tokens only. As sparse attention groups the input sequence +into discrete blocks, we perform an average pooling on this +matrix down to the resolution of the sparse attention opera- +tion and use these values as weights for sampling. Addition- +ally, we have a sliding window in which we always attend +to the last r tokens, and we attend to all BEV tokens. +4. Experiments +4.1. Dataset +We evaluate the proposed method using the NuScenes +dataset [3], one of the popular driving datasets used for BEV +segmentation and detection. We chose NuScenes as it is +among the only large driving dataset to provide full 360 deg +camera coverage with a consistent camera resolution, but +our method can be easily adapted to other datasets with +different camera arrangements as demonstrated in Sec. 4.4. +NuScenes consists of 1000, 20-second scenes, captured in +Boston and Singapore. There are a total of 40k annotated in- +stances that are labeled every 2Hz, split into 34k, 6k, and 6k +instances for the train, validation, and test sets respectively. +Each instance contains ground-truth 3D bounding boxes, +6 camera images covering a 360 deg FoV, calibrated cam- +era intrinsics and extrinsics, as well as LiDAR and Radar +scans. We project these 3D bounding boxes onto a BEV +layout following standard practice used in BEV segmenta- +tion [17,29,56]. +Preprocessing. +The BEV layout representation used +in training and testing is a 256 × 256 mask representing +80m × 80m around the ego center and containing 21 chan- +nels. 14 channels are binary masks representing map in- +formation (lane lines, dividers, etc.) and actor annotations +(cars, trucks, pedestrians, etc.). The remaining 7 channels +provide instance information including the visibility of an +annotation within the camera view, the height, width, and +orientation of the annotation, and the pixel offset from the +center point of the annotation. We resize our cropped cam- +era images to 224 × 400 and appropriately modify the in- +trinsics passed to our model. To enable our weighted cross- +entropy loss, we project the provided 3D annotations onto +the camera frame and weight the corresponding tokens in +our discrete camera frame representation, zk ∈ Nhc×wc. +4.2. Training Details +VQ-VAE. We train the 1st stage camera VQ-VAE with ag- +gressive augmentation consisting of flips, rotations, color +shifts, and crops. Similarly, we train our 1st stage BEV +VQ-VAE with flips and rotations. For the 2nd stage, we +add minimal rotations and scaling but perform cropping and +modify the corresponding intrinsics that are passed to the +model. +Transformer. We crop all images to H × W = 224 × 400 +and our 4 encoder/decoder stages create a discrete latent +representation of hc × wc = 14 × 25. Our BEV layout has +a discrete latent representation of hb × hb = 16, 16. Both +the BEV and image codebooks have |Zc| = |Zb| = 1024 +codes with an embedding dimension, nc = nb = 256. Our +transformer is GPT-like [31] with 16-heads and 24-layers. +We use DeepSpeed to facilitate sparse self-attention and +16-bit training. We clip gradients at 50 to prevent insta- +bility during training and use the AdamW optimizer [24] +with β1, β2 = 0.9, 0.95 and a learning rate of λ = 5e-7. +For our sparse models, we have an attention mask density +of 35% with a sliding window length of r = 96. Except +as described in Sec. 4.4, our sparse model is derived from +fine-tuning our full-attention model for 10 epochs. +4.3. Results +We show generation results for all six camera views +trained from the NuScenes dataset. For all visualizations, +we flip the back left and right cameras along the vertical +axis to highlight the image consistency of our model. Thus, +the side, front, and back cameras meet at their outer edges in +all figures. Since our work is, to our knowledge, the first at- +5 + +Method +FID↓ +Road mIoU↑ +Vehicle mIoU↑ +Baseline +43.18 +45.80 +4.44 +BEVGen +25.54 +50.20 +5.89 +Sparse BEVGen +28.67 +50.92 +6.69 +Table 1. Baseline Comparison over all 6 views on NuScenes Vali- +dation. +tempt at conditional street-view synthesis from a BEV lay- +out, we find no existing method to directly compare with. +Instead, we compare with a baseline model consisting of +the same underlying GPT architecture and using the same +1st stage encoders/decoders as our BEVGen model. We use +a row-major decoding order and employ only a learnable +position embedding, but do not add the spatial embeddings +(Sec. 3.2) or camera bias (Sec. 3.3) and use full-attention. +Qualitative result. Fig. 5 exhibits the generation examples +from BEVGen. Our model is able to generate a diverse set +of scenes including intersections, parking lots, and boule- +vards. We observe that each camera view not only correctly +displays the surrounding of the same location, but also pre- +serves the spatial perspective. BEVGen synthesizes images +under various weather conditions, with the same weather +apparent in all images, including physical artifacts such as +rain. We also demonstrate that our model is capable of gen- +erating diverse scenes corresponding to the same BEV lay- +out. We see at the bottom of Fig. 5 the same location ren- +dered in the day and at night by the model. +We compare generation quality of BEVGen to our base- +line using the same BEV layout in Fig. 5. We see that BEV- +Gen can not only render a more accurate scene with nearby +vehicles present in the correct camera views, but our spatial +consistency is significantly improved. Our model is able to +correctly synthesize a vehicle partially present in multiple +camera views. We also see that the background of the scene +is consistent between cameras, unlike the baseline model. +Additionally, we apply a BEV segmentation model on +the synthesized images and analyze the semantic content. +As seen in Fig. 5, our images allow the model to correctly +infer the road layout whereas our baseline images do not. +Quantitative result. +We use the Fr´echet Inception Dis- +tance (FID) to evaluate our synthesized quality compared to +the source images. Unless otherwise noted, all metrics are +calculated on a subset of the NuScenes validation set. We +sample 4 images from each scene, with 600 instances over- +all, and synthesize a set of images with no post-generation +filtering. For calculating FID scores, we use clean-fid [28]. +To differentiate between the performance of our 1st and +2nd stage, we compare our results to the results obtained by +feeding the encoded tokens of the source images directly to +the decoder, as is done when training the 1st stage. This +represents the theoretical upper bound of our model’s per- +formance and it allows us to largely remove the effect of the +first stage which is not the focus of this paper. However, it +should be noted that the design of the 1st stage and the prop- +erties of the learned codebook can have a significant impact +on the 2nd stage [11,12]. +As seen in Tab. 1, our BEVGen model achieved an FID +score of 25.54 compared to the baseline score of 43.18. This +is in comparison to our reference upper-bound FID score of +9.37. Our model utilizing our sparse masking design from +Sec. 3.4 achieved an FID score of 28.67. This sparse vari- +ant is approximately 48% faster during inference and 40% +faster for training. +While FID is a common metric to measure image syn- +thesis quality, it fails to entirely capture the design goals of +our task and cannot reflect the synthesis quality of different +semantic categories. Since we seek to generate multi-view +images consistent with a BEV layout, we wish to measure +our performance on this consistency. To do this, we lever- +age a BEV segmentation network CVT from [56], trained +entirely on source data for a fair comparison. We use the +same set of generated images conditioned on a ground-truth +BEV layout as before and for each set we apply the CVT to +the generated images and then compare the predicted lay- +out with the ground-truth BEV layout. We report both the +road and vehicle class mean intersection-over-union (mIoU) +scores. As shown in Tab. 1, we beat our baseline by 4.4 and +1.45 for road and vehicle classes respectively. Note that the +performance of the BEV segmentation model on the val- +idation set is 66.31 and 27.51 for road and vehicle mIoU +respectively. This reveals that though the model can gen- +erate road regions in the image in a reasonable manner, it +still has a limited capability of generating high-quality indi- +vidual vehicles that can be recognized correctly by the seg- +mentation network. This is a common problem for scene +generation where it remains challenging to synthesize the +small objects entirely. Our work is a starting point and we +plan to improve small object synthesis in the future work. +View-conditioned generation. +We test the ability of our +model to synthesize other views when provided a view from +a single camera as seen in Fig. 6. Due to the chosen center- +out decoding order, not all image tokens are able to attend to +the source image and, instead, we simply skip inference for +provided camera views. Despite this, we observe that our +model is able to generate consistent imagery both in scene +content and time of day. +4.4. Ablation Study +To verify the effectiveness of our design choices, we run +an ablation study on key features of our model. We run +these experiments on the same subset of the NuScenes vali- +dation set as in Sec. 4.3, but only consider the 3 front-facing +views to reduce training time. The 3 front-facing views have +a larger FoV overlap than the rear view and capture more +6 + +Figure 4. Synthesized multi-view images from BEVGen. Image contents are diverse and realistic. The two instances in the bottom row +use the same BEV layout for synthesizing the same location in day and night. +Figure 5. Qualitative comparison to baseline. Left is the instance from the baseline and right is from BEVGen. We also show the predicted +layout (only for the road class) from the generated multi-view images. +relevant scene features such as cars and lane-lines when +compared to the side-facing rear views that capture a sig- +nificant amount of background. This is more relevant to our +task as it allows us to better verify the design objectives of +our model. +We test four variants of our model, one with only center- +out decoding, one with our camera bias, one with the cam- +era bias and spatial embeddings, and a final model that we +train from scratch using our sparse masking, instead of fine- +tuning. Tab. 2 shows a steady improvement in FID scores +as we add the camera bias, and spatial embeddings. +5. Applications +Generating realistic images from BEV layout has many +applications. In this section we explore the applications of +data augmentation for BEV segmentation and image gener- +ation from simulated BEV. +Method +FID↓ +Center-out decoding +42.32 ++ Camera Bias +41.20 ++ Camera Bias, Spatial Embedding +40.48 ++ Camera Bias, Spatial Embedding, Sparse Mask +48.31 +Table 2. Ablation of the key model components. +Data augmentation for BEV segmentation. +An impor- +tant application of our BEV conditional generative model +is generating synthetic data to improve prediction models. +Thus, we seek to verify the effectiveness of our model by +incorporating our generated images as augmented samples +during training of a BEV segmentation model. +We use +CVT [56] as our model, which is also used in Sec. 4.3, and +compare our results to training without any synthetic sam- +7 + +Figure 6. View-conditioned generation. Green box indicates the provided source tokens. +Figure 7. Generating images based on the BEV layouts provided by the MetaDrive driving simulator +Road mIoU +Vehicle mIoU +CVT (w/o augmentation) +71.3 +36.0 +CVT (w/ augmentation) +71.9 +36.6 +Table 3. Application of data augmentation. We report the segmen- +tation results on the validation set of NuScenes trained from the +original training set and the one augmented with synthetic data. +ples. We generate 6,000 unique instances using the BEV +layout from the train set on NuScenes. These synthetic in- +stances are associated with the ground truth BEV layout for +training, with no relation to results from Sec. 4.3. To reduce +the effect of randomness during training, we set the random +seed and disable non-deterministic operations for all train- +ing. As seen in Tab. 3, our data improves validation mIoU +by 0.6 for both the road category and the vehicle category. +Image generation from simulated BEV. Since one mo- +tivation for our task definition lies in the simplicity of the +BEV layout, we wish to determine whether this enables our +model to generate new scenes from out-of-domain (OOD) +HD maps. We use MetaDrive simulator [21] to generate +random traffic scenarios and their associated BEV layouts +in simulation, and then input the BEV layouts in our BEV- +Gen. Generated images are shown in Fig. 7. We can see that +our model can turn the simulated scenes into realistic street +images using the BEV layout as a bridge. It has potential to +address the sim2real gap. +6. Discussion and Conclusion +Limitations and Future work. +Despite the performance +achieved with sparse attention, future work may benefit +from use of a bidirectional transformer to allow for paral- +lel decoding as demonstrated in [4]. We will also explore +replacing the encoder with a diffusion model to improve the +image synthesis quality. The proposed model still struggles +on generating small objects like pedestrians and some vehi- +cles. We plan to decouple the generation of foreground and +background to address this issue in the future work. +In this work we tackle the BEV generation task by de- +veloping a generative model called BEVGen. After training +on real-world driving dataset, the proposed model can gen- +erate spatially consistent multi-view images from a given +BEV layout. We further show its application on data aug- +mentation and simulated BEV generation. +8 + +References +[1] Adil Kaan Akan and Fatma G¨uney. StretchBEV: Stretch- +ing Future Instance Prediction Spatially and Temporally. In +Shai Avidan, Gabriel J. Brostow, Moustapha Ciss´e, Gio- +vanni Maria Farinella, and Tal Hassner, editors, Computer +Vision - ECCV 2022 - 17th European Conference, Tel Aviv, +Israel, October 23-27, 2022, Proceedings, Part XXXVIII, +volume 13698 of Lecture Notes in Computer Science, pages +444–460. 1 +[2] Naofumi Akimoto, Yuhi Matsuo, and Yoshimitsu Aoki. Di- +verse Plausible 360-Degree Image Outpainting for Efficient +3DCG Background Creation. pages 11441–11450. 2, 5 +[3] Holger Caesar, Varun Bankiti, Alex H. Lang, Sourabh Vora, +Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Gi- +ancarlo Baldan, and Oscar Beijbom. nuScenes: A Multi- +modal Dataset for Autonomous Driving. In 2020 IEEE/CVF +Conference on Computer Vision and Pattern Recognition +(CVPR), pages 11618–11628. 5 +[4] Huiwen Chang, +Han Zhang, +Lu Jiang, +Ce Liu, +and +William T. Freeman. +MaskGIT: Masked Generative Im- +age Transformer. In 2022 IEEE/CVF Conference on Com- +puter Vision and Pattern Recognition (CVPR), pages 11305– +11315. 2, 8 +[5] Lele Chen, Sudhanshu Srivastava, Zhiyao Duan, and Chen- +liang Xu. Deep Cross-Modal Audio-Visual Generation. In +Proceedings of the on Thematic Workshops of ACM Multi- +media 2017, Thematic Workshops ’17, pages 349–357. 2 +[6] Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Hee- +woo Jun, David Luan, and Ilya Sutskever. Generative Pre- +training From Pixels. In Proceedings of the 37th Interna- +tional Conference on Machine Learning, pages 1691–1703. +2 +[7] Helisa Dhamo, Azade Farshad, Iro Laina, Nassir Navab, +Gregory D. Hager, Federico Tombari, and Christian Rup- +precht. Semantic Image Manipulation Using Scene Graphs. +pages 5213–5222. 2 +[8] Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, +Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, +Hongxia Yang, and Jie Tang. CogView: Mastering Text-to- +Image Generation via Transformers. In Advances in Neural +Information Processing Systems, volume 34, pages 19822– +19835. 5 +[9] Pramit Dutta, Ganesh Sistu, Senthil Kumar Yogamani, Edgar +Galv´an, and John B. McDonald. ViT-BEVSeg: A Hierar- +chical Transformer Network for Monocular Birds-Eye-View +Segmentation. abs/2205.15667. 1 +[10] Dave Epstein, Taesung Park, Richard Zhang, Eli Shecht- +man, and Alexei A. Efros. BlobGAN: Spatially Disentangled +Scene Representations. abs/2205.02837. 2 +[11] Patrick Esser, Robin Rombach, and Bj¨orn Ommer. +Tam- +ing Transformers for High-Resolution Image Synthesis. In +2021 IEEE/CVF Conference on Computer Vision and Pat- +tern Recognition (CVPR), pages 12868–12878. 3, 6 +[12] Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, +Devi Parikh, and Yaniv Taigman. Make-A-Scene: Scene- +Based Text-to-Image Generation with Human Priors. +In +Shai Avidan, Gabriel J. Brostow, Moustapha Ciss´e, Gio- +vanni Maria Farinella, and Tal Hassner, editors, Computer +Vision - ECCV 2022 - 17th European Conference, Tel Aviv, +Israel, October 23-27, 2022, Proceedings, Part XV, volume +13675 of Lecture Notes in Computer Science, pages 89–106. +2, 6 +[13] Shi Gong, Xiaoqing Ye, Xiao Tan, Jingdong Wang, Errui +Ding, Yu Zhou, and Xiang Bai. GitNet: Geometric Prior- +Based Transformation for Birds-Eye-View Segmentation. In +Shai Avidan, Gabriel J. Brostow, Moustapha Ciss´e, Gio- +vanni Maria Farinella, and Tal Hassner, editors, Computer +Vision - ECCV 2022 - 17th European Conference, Tel Aviv, +Israel, October 23-27, 2022, Proceedings, Part I, volume +13661 of Lecture Notes in Computer Science, pages 396– +411. 1 +[14] Nikhil Gosala and Abhinav Valada. Bird’s-Eye-View Panop- +tic Segmentation Using Monocular Frontal View Images. +7(2):1968–1975. 1 +[15] Seunghoon Hong, Dingdong Yang, Jongwook Choi, and +Honglak Lee. Inferring Semantic Layout for Hierarchical +Text-to-Image Synthesis. +In 2018 IEEE/CVF Conference +on Computer Vision and Pattern Recognition, pages 7986– +7994. 2 +[16] Basile Van Hoorick. Image Outpainting and Harmonization +using Generative Adversarial Networks. abs/1912.10960. 2 +[17] Anthony Hu, Zak Murez, Nikhil Mohan, Sof´ıa Dudas, Jef- +frey Hawke, Vijay Badrinarayanan, Roberto Cipolla, and +Alex Kendall. FIERY: Future Instance Prediction in Bird’s- +Eye View From Surround Monocular Cameras. +pages +15273–15282. 1, 5 +[18] Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. +Efros. Image-To-Image Translation With Conditional Ad- +versarial Networks. pages 1125–1134. 2 +[19] Justin Johnson, Agrim Gupta, and Li Fei-Fei. Image Gen- +eration From Scene Graphs. In 2018 IEEE Conference on +Computer Vision and Pattern Recognition, CVPR 2018, Salt +Lake City, UT, USA, June 18-22, 2018, pages 1219–1228. 2 +[20] Jiguo Li, Xinfeng Zhang, Chuanmin Jia, Jizheng Xu, Li +Zhang, Yue Wang, Siwei Ma, and Wen Gao. Direct Speech- +to-image Translation. 14(3):517–529. Comment: Accepted +by JSTSP. 2 +[21] Quanyi Li, Zhenghao Peng, Lan Feng, Qihang Zhang, +Zhenghai Xue, and Bolei Zhou. +Metadrive: Composing +diverse driving scenarios for generalizable reinforcement +learning. 8 +[22] Zejian Li, Jingyu Wu, Immanuel Koh, Yongchuan Tang, +and Lingyun Sun. +Image Synthesis from Layout with +Locality-Aware Mask Adaption. +In 2021 IEEE/CVF In- +ternational Conference on Computer Vision (ICCV), pages +13799–13808. 2 +[23] Han Lin, Maurice Pagnucco, and Yang Song. +Edge +Guided Progressively Generative Image Outpainting. +In +2021 IEEE/CVF Conference on Computer Vision and Pat- +tern Recognition Workshops (CVPRW), pages 806–815. 2 +[24] Ilya Loshchilov and Frank Hutter. Decoupled Weight Decay +Regularization. Comment: Published as a conference paper +at ICLR 2019. 5 +9 + +[25] Jacob Menick and Nal Kalchbrenner. Generating High Fi- +delity Images with Subscale Pixel Networks and Multidi- +mensional Upscaling. In International Conference on Learn- +ing Representations. 2 +[26] Sangwoo Mo, +Minsu Cho, +and Jinwoo Shin. +In- +staGAN: +Instance-aware +Image-to-Image +Translation. +abs/1812.10889. 2 +[27] Thu H Nguyen-Phuoc, Christian Richardt, Long Mai, +Yongliang Yang, and Niloy Mitra. BlockGAN: Learning 3D +Object-aware Scene Representations from Unlabelled Im- +ages. In Advances in Neural Information Processing Sys- +tems, volume 33, pages 6767–6778. 2 +[28] Gaurav Parmar, Richard Zhang, and Jun-Yan Zhu. +On +aliased resizing and surprising subtleties in GAN evaluation. +In CVPR. 6 +[29] Jonah Philion and Sanja Fidler. Lift, Splat, Shoot: Encoding +Images from Arbitrary Camera Rigs by Implicitly Unproject- +ing to 3D. In Andrea Vedaldi, Horst Bischof, Thomas Brox, +and Jan-Michael Frahm, editors, Computer Vision – ECCV +2020, Lecture Notes in Computer Science, pages 194–210. +5 +[30] Tingting Qiao, Jing Zhang, Duanqing Xu, and Dacheng Tao. +Learn, Imagine and Create: Text-to-Image Generation from +Prior Knowledge. In Advances in Neural Information Pro- +cessing Systems, volume 32. 2 +[31] Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario +Amodei, and Ilya Sutskever. Language models are unsuper- +vised multitask learners. 5 +[32] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, +and Mark Chen. Hierarchical Text-Conditional Image Gen- +eration with CLIP Latents. abs/2204.06125. 2 +[33] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, +Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. +Zero-Shot Text-to-Image Generation. In Proceedings of the +38th International Conference on Machine Learning, pages +8821–8831. 2 +[34] Scott Reed, Zeynep Akata, Xinchen Yan, Lajanugen Lo- +geswaran, Bernt Schiele, and Honglak Lee. Generative Ad- +versarial Text to Image Synthesis. +In Proceedings of The +33rd International Conference on Machine Learning, pages +1060–1069. 2 +[35] Krishna Regmi and Ali Borji. Cross-View Image Synthesis +Using Conditional GANs. In 2018 IEEE/CVF Conference +on Computer Vision and Pattern Recognition, pages 3501– +3510. 2 +[36] Xuanchi Ren and Xiaolong Wang. Look Outside the Room: +Synthesizing a Consistent Long-Term 3D Scene Video From +a Single Image. pages 3563–3573. 3, 4 +[37] Chris Rockwell, David F. Fouhey, and Justin Johnson. Pixel- +Synth: Generating a 3D-Consistent Experience From a Sin- +gle Image. pages 14104–14113. 3 +[38] Robin +Rombach, +Patrick +Esser, +and +Bjorn +Ommer. +Geometry-Free View Synthesis: Transformers and no 3D +Priors. +In 2021 IEEE/CVF International Conference on +Computer Vision (ICCV), pages 14336–14346. 3 +[39] Mark Sabini and Gili Rusak. Painting Outside the Box: Im- +age Outpainting with GANs. abs/1808.08483. 2 +[40] Chitwan Saharia, William Chan, Saurabh Saxena, Lala +Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed +Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, +Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J. +Fleet, and Mohammad Norouzi. +Photorealistic Text-to- +Image Diffusion Models with Deep Language Understand- +ing. abs/2205.11487. 2 +[41] Yujiao Shi, Dylan Campbell, Xin Yu, and Hongdong Li. +Geometry-Guided Street-View Panorama Synthesis From +Satellite Imagery. 44(12):10009–10022. 2 +[42] Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, +Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, +Oran Gafni, Devi Parikh, Sonal Gupta, and Yaniv Taig- +man. +Make-A-Video: Text-to-Video Generation without +Text-Video Data. abs/2209.14792. 2 +[43] Aysim Toker, Qunjie Zhou, Maxim Maximov, and Laura +Leal-Taixe. Coming Down to Earth: Satellite-to-Street View +Synthesis for Geo-Localization. In 2021 IEEE/CVF Confer- +ence on Computer Vision and Pattern Recognition (CVPR), +pages 6484–6493. 2 +[44] Subarna Tripathi, Anahita Bhiwandiwalla, Alexei Bastidas, +and Hanlin Tang. Using Scene Graph Context to Improve +Image Generation. abs/1901.03762. 2 +[45] Aaron van den Oord, Nal Kalchbrenner, Lasse Espeholt, ko- +ray kavukcuoglu, Oriol Vinyals, and Alex Graves. +Con- +ditional Image Generation with PixelCNN Decoders. +In +Advances in Neural Information Processing Systems, vol- +ume 29. 2 +[46] A¨aron van den Oord, +Nal Kalchbrenner, +and Koray +Kavukcuoglu. Pixel Recurrent Neural Networks. In Pro- +ceedings of The 33rd International Conference on Machine +Learning, pages 1747–1756. 2 +[47] Aaron van den Oord, Oriol Vinyals, and koray kavukcuoglu. +Neural Discrete Representation Learning. +In Advances in +Neural Information Processing Systems, volume 30. 2, 3 +[48] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin +Johnson. SynSin: End-to-End View Synthesis From a Single +Image. In 2020 IEEE/CVF Conference on Computer Vision +and Pattern Recognition (CVPR), pages 7465–7475. 3 +[49] Qingguo Xiao, Guangyao Li, and Qiaochuan Chen. Image +Outpainting: Hallucinating Beyond the Image. 8:173576– +173583. 2 +[50] Runsheng Xu, Zhengzhong Tu, Hao Xiang, Wei Shao, Bolei +Zhou, and Jiaqi Ma. CoBEVT: Cooperative Bird’s Eye View +Semantic Segmentation with Sparse Transformers. In 2022 +Conference on Robot Learning (CoRL). 1 +[51] Chiao-An Yang, Cheng-Yo Tan, Wan-Cyuan Fan, Cheng-Fu +Yang, Meng-Lin Wu, and Yu-Chiang Frank Wang. Scene +Graph Expansion for Semantics-Guided Image Outpainting. +In 2022 IEEE/CVF Conference on Computer Vision and Pat- +tern Recognition (CVPR), pages 15596–15605. 2 +[52] Yurong You, Xinlei Pan, Ziyan Wang, and Cewu Lu. Virtual +to Real Reinforcement Learning for Autonomous Driving. +abs/1704.03952. 2 +[53] Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, +Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip +Pham, Anirudh Ravula, Qifan Wang, Li Yang, and Amr +10 + +Ahmed. Big Bird: Transformers for Longer Sequences. In +Advances in Neural Information Processing Systems, vol- +ume 33, pages 17283–17297. 5 +[54] Menghua Zhai, Zachary Bessinger, Scott Workman, and +Nathan Jacobs. Predicting Ground-Level Scene Layout from +Aerial Imagery. In 2017 IEEE Conference on Computer Vi- +sion and Pattern Recognition (CVPR), pages 4132–4140. 2 +[55] Yunpeng Zhang, Zheng Zhu, Wenzhao Zheng, Junjie Huang, +Guan Huang, Jie Zhou, and Jiwen Lu. BEVerse: Unified Per- +ception and Prediction in Birds-Eye-View for Vision-Centric +Autonomous Driving. abs/2205.09743. 1 +[56] Brady Zhou and Philipp Kr¨ahenb¨uhl. +Cross-view Trans- +formers for real-time Map-view Semantic Segmentation. In +2022 IEEE/CVF Conference on Computer Vision and Pat- +tern Recognition (CVPR), pages 13750–13759. 1, 3, 5, 6, +7 +[57] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A. +Efros. Unpaired Image-to-Image Translation Using Cycle- +Consistent Adversarial Networks. +In 2017 IEEE Interna- +tional Conference on Computer Vision (ICCV), pages 2242– +2251. 2 +11 + diff --git a/39E3T4oBgHgl3EQfogqI/content/tmp_files/load_file.txt b/39E3T4oBgHgl3EQfogqI/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..743e962fc1141c88fed1a40d6a39f69f11354ca1 --- /dev/null +++ b/39E3T4oBgHgl3EQfogqI/content/tmp_files/load_file.txt @@ -0,0 +1,552 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf,len=551 +page_content='Street-View Image Generation from a Bird’s-Eye View Layout Alexander Swerdlow Runsheng Xu Bolei Zhou University of California, Los Angeles {aswerdlow, rxx3386}@ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='edu, bolei@cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='edu Abstract Bird’s-Eye View (BEV) Perception has received increas- ing attention in recent years as it provides a concise and unified spatial representation across views and benefits a diverse set of downstream driving applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' While the focus has been placed on discriminative tasks such as BEV segmentation, the dual generative task of creating street- view images from a BEV layout has rarely been explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The ability to generate realistic street-view images that align with a given HD map and traffic layout is critical for visualizing complex traffic scenarios and developing robust perception models for autonomous driving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' In this paper, we propose BEVGen, a conditional generative model that synthesizes a set of realistic and spatially consistent sur- rounding images that match the BEV layout of a traffic sce- nario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' BEVGen incorporates a novel cross-view transfor- mation and spatial attention design which learn the rela- tionship between cameras and map views to ensure their consistency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Our model can accurately render road and lane lines, as well as generate traffic scenes under differ- ent weather conditions and times of day.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The code will be made publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Introduction BEV perception for autonomous driving is a fast- growing field, with the goal of learning a cross-view repre- sentation that transforms information between the perspec- tive and bird’s-eye view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Such representation can be used in downstream tasks such as path planning and trajectory forecasting [1, 55].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The recent successes in BEV percep- tion, whether for monocular images [9,13,14] or multi-view images [17,50,56], mostly focus on the discriminative side of BEV perception where the inputs are street-view images and the output is a semantic BEV layout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' However, the gen- erative side of BEV perception, which aims at synthesizing realistic street-view images from a given BEV semantic lay- out, is rarely explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' A BEV layout concisely describes a traffic scenario at the semantic level, therefore generating its corresponding street-view images can help visualize the BEV Layout Generated Street-View Images Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The proposed BEVGen generates realistic and spa- tially consistent street-view images from BEV layout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' There are six camera views surrounding the ego vehicle as indicated by the green rectangle in the BEV layout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' scene in a more real-world setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' There are many potential applications for the BEV gen- eration task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' For example, we can create synthetic train- ing data for BEV segmentation models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Whereas most cur- rent approaches to synthetic training data involve a complex simulator or 3D reconstructed meshes, it is simpler to adopt a controllable generative model for diverse image genera- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Another benefit provided by the BEV generation is the ease of visualizing and editing traffic scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' In the case of self-driving vehicles, we often care about a small set of rare scenarios where an accident is most likely to happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Human users can intuitively edit a BEV layout and then use a generative model to output the corresponding street-view images for training or testing a driving system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The fundamental question for BEV generation is: what could be a plausible set of street-view images that corre- spond to this BEV layout?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' One could think of numerous scenes with varying vehicle types, backgrounds, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' For a set of views to be realistic, we need to consider several properties of the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Similar to the problem of novel view synthesis, images must appear consistent, as if they were taken in the same physical location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' For instance, cam- eras with an overlapping field-of-view (FoV) should have 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='04634v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='CV] 11 Jan 2023 overlapping content, and objects partially visible in one frame should appear in a rotated frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The visual styling of the scene also needs to be consistent such that all virtual views appear to be created in the same geographical area (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=', urban vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' rural), time of day, with the same weather conditions, and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' In addition to image consistency, the images must correspond to the HD map, faithfully re- producing the specified road layout, lane lines, and vehicle locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Unlike image-to-image translation with a seman- tic mask, the BEV generation model must infer the image layout to account for occlusions between objects and the relative heights of objects in a scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' These two main chal- lenges, image consistency and correspondence, are critical to the task but can be difficult to reconcile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' If we only desire image consistency, similar to the case of image outpainting, the model is free to generate any consistent image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' How- ever, if we also wish to maintain correspondence between the virtual views and the HD map, portions of the virtual views are constrained to represent certain elements (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=', vehicles).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' On the other hand, if we only care about image correspondence, the model only needs the context of part of the HD map in its FoV and does not need to account for previously generated images or issues such as wraparound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' In this work, we tackle the new task of generating street- view images from a BEV layout and propose a generative model called BEVGen to address the underlying challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We develop an autoregressive model called BEVGen that generates a set of n realistic and spatially consistent images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 1 shows generation examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' BEVGen has two tech- nical novelties: (i) it incorporates spatial embeddings using camera instrinsics and extrinsics to allow the model to at- tend to relevant portions of the images and HD map, and (ii) it contains a novel attention bias and decoding scheme that maintains both image consistency and correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Thus the model can generate high-quality scenes with spa- tial awareness and scene consistency across camera views.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Compared to baselines, the proposed model obtains sub- stantial improvement in terms of image synthesis quality and semantic consistency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The model can also render real- istic scene images from out-of-domain BEV maps, such as those provided by a driving simulator or edited by a user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We summarize our contributions as follows: We tackle the new task of multi-view image generation from BEV layout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' It is the first attempt to explore the generative side of BEV perception for driving scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We develop a novel generative model BEVGen that can synthesize spatially consistent street-view images by incorporating spatial embeddings and a pairwise camera bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The model achieves high-quality synthesis results and shows promise for applications such as data augmen- tation and 3D simulation rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Related Work Cross-modal Image Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Cross-modal image generation has seen a lot of attention in recent years with work on text-to-image models [12, 32–34, 40], speech-to- image models [5, 20], and image-to-video models [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Others have focused on using more direct representations to control generation, including generation from semantic masks [18,26,52], or worked to convert higher-level repre- sentations such as text [15,30], scene graphs [7,19,44,51], and bounding boxes [22] into such a semantic mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' There have also been several attempts at learning spatially disen- tangled scene representations by composing latent features that correspond to specific parts of a scene [10, 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Our task is conceptually similar to image generation from a se- mantic mask but distinct in that our semantic representation only provides minimal layout constraints, lacking height in- formation, direct occlusion representation, and background information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Image-to-image Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Direct image-to-image translation has also taken off in recent years with models such as pix2pix [18] and cycleGAN [57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Several works have focused directly on the task of street-view synthe- sis from satellite views as a subset of the image-to-image translation problem [35, 41, 43, 54].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' These works attempt to tackle viewpoint transformation, from a top-down to an ego-centric view, that is implicitly required for our task, but our task does not benefit from the rich RGB representation provided by a satellite view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Furthermore, large portions of our virtual camera views correspond to areas entirely un- labeled on our BEV map, requiring largely unconditional generation for these areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Image Outpainting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Spatial consistency is important for tasks such as image outpainting, where the goal is to gener- ate or extend an image of the same scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Early approaches for image outpainting used auto-regressive approaches on a pixel-wise level [6,25,45,46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' However, this approach can be computationally expensive and thus is limited to gen- erating low-resolution images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Subsequently, GANs were introduced to the task [16, 23, 39, 49] which do not suffer from the same computational limitations as pixel-wise au- toregressive approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' More recent works have utilized a Vector Quantised-Variational Autoencoder (VQ-VAE) [47] to great success [2,4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Similar to image outpainting, our task requires generated images to appear coherent in weather and location;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' however, we also seek to generate distinct, partially overlapping camera views and require that portions of these views are conditionally generated from a BEV lay- out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Novel View Synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The same underlying VQ-VAE architecture has been used for the single-view novel view synthesis (NVS) task where the goal is to generate new vir- tual camera view given a source image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' By conditioning 2 an autoregressive transformer with camera translation and rotation, [38] showed that a transformer-based model can learn the 3D relationship between images without explicit depth maps or warping as used in prior attempts for single- view NVS such as in [37, 48].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' To improve the consistency between frames, [36] suggests a camera-aware bias for self- attention that encodes the similarity between consecutive image frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Our task requires a similar 3D understand- ing between different viewpoints as in NVS, but lacks the conditioning information provided by a source view(s) and requires consistency not only between frames but also with an HD map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' If we broaden our task to allow for a source view, as we demonstrate in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 6, our task can be thought of as a conditional NVS task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Method In this section, we introduce the framework of the pro- posed BEVGen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We have a semantic layout in Birds-Eye View (BEV), B ∈ RHb×Hb×cb with the ego at the cen- ter and cb channels describing the locations of vehicles, roads, lane lines, and more (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Given a set of n virtual camera views to generate, (Kk, Rk, tk)n k=1, where Kk, Rk, tk are the intrinsics, extrinsic rotation, and trans- lation of the kth camera, we generate n images, Ik ∈ RHc×Wc×3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 2 illustrates the framework of the proposed BEV- Gen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' BEVGen consists of two autoencoders modeled by VQ-VAE, one for images and one for the BEV representa- tion, that allow the causal transformer to model scenes at a high level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The key novelty lies in how the transformer can relate information between modalities and across differ- ent views.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The cross-view transformation encodes a cross- modal inductive 3D bias, allowing the model to attend to relevant portions of the HD map and nearby image tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We explain each part in more detail below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Model Structure Image Encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' To generate a globally coherent image, we model our distribution in a discrete latent space instead of pixel-space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We use the VQ-VAE model introduced by Oord et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' [47] as an alternative generative architecture to GANs1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Additionally, we incorporate a perceptual and a patch-wise adversarial loss as in [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' The VQ-VAE archi- tecture consists of an encoder Ecam, a decoder Dcam, and a codebook Zc = {zm}Mc m=1 ⊂ Rnc where Mc is the number of code vectors and nc is the embedding dimension of each code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Given a source image, xk ∈ RHc×Wc×3 we encode ˆzk = E(xk) ∈ Rhc×wc×nc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' To obtain a discrete, tokenized representation, we find the nearest codebook vector for each 1Note that switching to the recently developed class of diffusion mod- els can potentially improve the image synthesis quality, but such models require an order of magnitude of additional data and computational re- sources for training and thus we leave it for future works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' feature vector ˆzk,ij ∈ Rnc where i, j are the row, column in- dices in the discrete latent representation with size hc × wc: zk,ij = arg min m ∥ˆzk,ij − zm∥ ∈ Rhc×wc×nc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' (1) This creates a set of tokens zk ∈ Nhc×wc that we refer to as our image tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' To generate an image from a set of tokens, we decode ˜zk ∈ Rhc×wc×nc with a convolutional decoder, Dcam(˜zk) ∈ RHc×Wc×3 using the same architec- ture as [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' BEV Encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' To condition our model on a BEV lay- out, we use the same discrete representation as for cam- era images, except we replace the perceptual and adver- sarial losses with a binary cross entropy loss for binary channels and an L2 loss for continuous channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We en- code our BEV map b as before with Ebev(b) ∈ Rhb×wb×nb and Zb = {zm}Mb m=1 ⊂ Rnb to obtain a set of tokens, zbev ∈ Nhb×wb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' We discard the decoder stage, Dbev, after training the 1st stage as it is not needed for our transformer model or inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Autoregressive Modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' Given a BEV layout and k sets of camera parameters, we seek to generate k images by learning the prior distribution of a set of discrete tokens, z conditioned on zbev, K, R, t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/39E3T4oBgHgl3EQfogqI/content/2301.04634v1.pdf'} +page_content=' p(z|zbev, K, R, t) = h×w×k � i=0 p(zi|z 0}. +(3) +The matrix of O↑ +d+1,1(R) representing the inversion sb through the boundary of a sphere b is given by +Sb := Id+2 − 2i(b)T i(b)Qd+2 +(4) +where Id+2 is the identity matrix of size d + 2. +2.2. Polytopal Apollonian packings. A polytopal sphere packing BP in dimension d ≥ 1, is the image, +up to M¨obius transformations, of the ball-arrangement projection β of an edge-scribed (d + 1)-polytope +P on � +Rd. The mapping β sends vertices of P to spheres of BP and the tangency relations are encoded +by the edges of P. For every 1 ≤ n ≤ d, there is a natural realization of the n-skeleton of P as a CW- +complex contained in BP, which we call the n-skeleton of BP, and is made by realizing the vertices of P +as the centers of BP, and then, for every face f of P, taking the convex hull of the centers corresponding +to the vertices of f. The 1-skeleton of BP corresponds to the natural realization of the tangency graph +of BP usually called the carrier of the packing [Ste05]. Every polytopal sphere packing admits a dual +arrangement B∗ +P induced by the ball arrangement projection of the polar of P. The Apollonian group +A(BP) is the Klenian group generated by the inversions through the dual spheres of BP, i.e. the spheres of +B∗ +P. If we add the symmetries of BP to the set of generators, then we obtain the symmetrized Apollonian +group of BP, denoted by SA(BP). When the interiors of every pair of spheres in P(BP) := A(BP) · BP +are disjoint, then we obtain an infinite sphere packing that we call polytopal Apollonian packing. This +class of infinite sphere packings can be seen as a particular case of the crystallographic sphere packings +introduced in [KN19], where they are called polyhedral packings. +Remark 1. For d ≥ 2, every polytopal sphere packing and its endowed structures are unique up to M¨obius +transformations. This can be seen as a consequence of the Mostow Rigidity Theorem [KN19]. In other +words, any two edge-scribed realizations of a d-polytope are connected by a M¨obius transformation. +2.3. The hyperoctahedral group. We denoted by T d, Od and Cd, the analogue of the regular tetra- +hedron, octahedron and cube in dimension d ≥ 2, respectively (we refer to [RR21a; Ras21] for results +on polytopal sphere packings arising from these polytopes). We recall that, for every d ≥ 2, Od and Cd +are dual from each other, while T d is self-dual. Among these families of polytopes, two of them are of +special relevance for this paper: the cube C3 and the hyperoctahedron O4, also called orthoplex. The cor- +responding polytopal packings induced from these two polytopes are called cubic packings BC3 [Sta15] and +orthoplicial packings BO4 [Nak14]. We shall index the elements by an antipodal labelling, where sphere +bi and bi correspond to antipodal vertices in the polytope, and we shall use the bar notation ¯i := −i. +The vertices of Od will be labelled by {1, . . . , d, 1, . . . , d}, where the facets are the (d − 1)-simplices with +vertices {±1, . . . , ±d}. Since facets of Od corresponds to vertices of Cd, we shall label each vertex of +Cd by the concatenation of the labelling of the vertices in Od incident to the corresponding facet. The +symmetry group of Od (or equivalently Cd) is called the hyperoctahedral group, which corresponds to the +Coxeter group Bd. Under the antipodal labelling, the hyperoctahedral group is generated by the signed +permutations rij := (ij)(ij). +3 + +2.4. Apollonian sections. An Apollonian section of P(BP) is a subset S (BP) = Γ · X ⊂ P(BP) +where Γ < SA(BP) and X ⊂ BP. Two Apollonian sections S (BP) = Γ · X and S (BQ) = Γ′ · X′ of two +different Apollonian packings are said to be algebraically equivalent if Γ and Γ′ are isomorphic and there +is an equivariant bijection φ : S (BP) → S (BQ) with respect to the actions. With this notion in the +hand, the second author proved in [Ras21] that any orthoplicial Apollonian packing P(BO4) contains +a tetrahedral ST 3(BO4), octahedral SO3(BO4) and cubic section SC3(BO4), i.e. an Apollonian section +which is algebraically equivalent to a tetrahedral P(BT 3), octahedral P(BO3) and cubic Apollonian +packing P(BC3), respectively. We shall use a cubic section SC3(BO4) as a geometric framework for the +constructions introduced in section 4. +2.5. Algebraic links. A 2-tangle (in short tangle) is a pair (U, t) where U is a compact set of R3 +homeomorphic to a 3-ball and t is a collection {γ1, γ2, . . . , γm} of m ≥ 2 disjoint arcs contained in U +satisfying that γ1 and γ2 are open arcs whose endpoints lie on the boundary of U, and the rest of the +arcs are closed. Two tangles (U, t) and (U′, t′) are said to be equivalent if there is an isotopy of R3 +carrying U to U′, t to t′ and the endpoints of (U, t) to the endpoints of (U′, t′). We shall denote this +equivalence relation t ≃ t′. Up to equivalence, we may consider that the endpoints of t lie on a same +plane H. A tangle diagram of (U, t) is a regular projection of t on H, together with U ∩ H and the +crossing information. If it is not required, we shall refer to a tangle (U, t) by t. We shall name the +endpoints in a tangle diagram by the cardinal points NE, NW, SE and SW. The elementary tangles t0, +t1 and t∞ are the tangles illustrated in Figure 2. +NE +NW +SW +SE +t0 +NE +NW +SW +SE +t1 +NE +NW +SW +SE +t∞ +Figure 2. The elementary tangles. +For any two tangles t and t′, we have the following operations: +(i) the sum t + t′, obtained by connecting the East endpoints of t to the West endpoints of t′, +t′ +t +t + t′ +Figure 3. Sum of tangles. +(ii) the mirror −t: the image of t under the reflection on the plane containing the equator, +(iii) the flip F(t): the image of t under the reflection on the plane perpendicular to the equator and +passing through the endpoints SW and NE, +(iv) the positive half-twist H+ : t �→ t1 + t, +(v) the negative half-twist H− : t �→ −t1 + t. +t +−t +F(t) +H+(t) +H−(t) +Figure 4. Mirror, flip and half-twist operations of tangles. +4 + +Rational tangles were introduced by Conway in his work on enumerating and classifying knots and +links [Con70]. For a given sequence of integers a1, . . . , an all non-zero except maybe a1, we denote by +t(a1, · · · , an) the rational tangle given by the following Conway’s algorithm [Cro04] (see Figure 5). +t(a1, · · · , an) := Ha1F · · · HanF(t∞). +(5) +H−3F +t∞ +H−2F +t(−3) +H2F +t(−2, −3) +t(2, −2, −3) +Figure 5. The rational tangle t(2, −2, −3) obtained by the Conway’s algorithm. +The slope of a rational tangle t(a1, . . . , an) is the rational number p/q obtained by the continued +fraction expansion +[a1, . . . , an] := a1 + +1 +... + +1 +an += p +q . +(6) +The origin of the name of rational tangle came from the connection established by the Conway’s theorem +[Con70], between the family of tangles produced by the Conways’s algorithm and rational numbers, +which states that two rational tangles are equivalent if and only if they have the same slope. We shall +denote by tp/q the class of rational tangles with slope p/q up to isotopy. The closure of a tangle (U, t) is +the link formed by joining the endpoints by two disjoint and unlinked paths at the exterior of U. Up to +equivalence, there are two possible closures, the numerator N(t), obtained by joining the northern and +the southern endpoints separately, and the denominator D(t), obtained by joining the western and the +eastern endpoints (see Figure 6). +D +N +t +t +t +Figure 6. The tangle closures. +A rational link is the closure of a rational tangle. Algebraic tangles are those obtained by sums and +flips of rational tangles [Ada94]. Equivalently, links which are obtained by the closure of algebraic tangles +are said to be algebraic or arborescent [GT86]. Pretzel links P(q1, . . . , qn) := N(t1/q1 + · · · + t1/qn) are +a particular case of algebraic links, see Figure 7. +Figure 7. The Pretzel knot P(3, −2, 3) which corresponds to the knot 819 in the Alexander- +Briggs notation. +5 + +3. Necklace representations in polytopal Apollonian packings +In this section, we investigate the following question: given a link L and a polytopal Apollonian sphere +packing P(BP4), can we find a necklace representation of L contained in P(BP4)? We answer positively +this question for some 4-polytopes. We first have the following +Theorem 3.1. Let L be a link and let P(BO4) be an orthoplicial Apollonian sphere packing. There is +a necklace representation of L contained in P(BO4). +Let us first introduce a previous notion. Let P(BP) be a polytopal Apollonian sphere packing, where +P is an edge-scribed (d + 1)-polytope. For every edge {i, j} ∈ P, we define the edge-figure section of +P(BP) as the Apollonian section Sij(BP) := Γij · BP where Γij is the stabilizer subgroup of the Apollo- +nian group of P for {bi, bj}. The subgroup Γij corresponds to a Euclidean reflection group. Indeed, we +may apply an inversion to BP through a sphere centered at the tangency point of bi and bj mapping these +two spheres into two parallel half-spaces tangent at the infinity. We then observe that every generator +in Γij must be a reflection on a hyperplane orthogonal to bi and bj (see Figure 9, left). +Proof of Theorem 3.1. Let B12 +O4 the orthoplicial packing depicted in Figure 8. The edge-figure section +S12(B12 +O4) := Γ12 ·B12 +O4 is generated by the action of the parabolic subgroup of the orthoplicial Apollonian +group Γ12 := ⟨s1234, s1234, s1234, s1234⟩. +B12 +O4 +κ (δ if κ = 0) +c (�n if κ = 0) +i(b)T +b1 +0 (1) +0 +0 +1 +0 +0 +1 +1 +1 +b2 +0 (1) +0 +0 +−1 +0 +0 +−1 +1 +1 +b3 +1 +1 +1 +0 +1 +1 +0 +0 +1 +b4 +1 +−1 +1 +0 +−1 +1 +0 +0 +1 +b1 +2 +0 +0 +−1/2 +0 +0 +−1 +−1 +1 +b2 +2 +0 +0 +1/2 +0 +0 +1 +−1 +1 +b3 +1 +−1 +−1 +0 +−1 +−1 +0 +0 +1 +b4 +1 +1 +−1 +0 +1 +−1 +0 +0 +1 +3 +4 +1 +2 +2 +1 +4 +3 +Figure 8. The orthoplicial packing B12 +O4. +We notice that the 1-skeleton of S12(B12 +O4) contains an infinite square-grid, with two vertices lying in +the orthogonal line to each square and connected to every corner (see Figure 9). +3 +4 +4 +3 +2 +s1234 +s1234 +s1234 +s1234 +1 +2 +Figure 9. (Left) B12 +O4 with the mirrors of the generators of Γ12, view from above; (right) +S12(B12 +O4) with its 1-skeleton. +6 + +The well-known Alexander’s Theorem [Ale23] implies that there is a braid γ such that its closure is +isotopically equivalent to L. We can always draw a diagram of γ in a regular square-grid, where the +crossings are drawn at the intersections of the diagonals of the squares, and the rest of arcs use the edges +of the grid, as in Figure 10 (center). This square-grid diagram induces a polygonal closed path in the +1-skeleton of S12(B12 +O4), as in Figure 10 (right), which gives us a necklace representation NL ⊂ P(B12 +O4). +Since M¨obius transformations preserving the orientation are ambient isotopies of � +R3 then, by Remark +1, we have that there is a M¨obius transformation µ carrying P(B12 +O4) to P(BO4) and NL to a necklace +representation of L contained in P(BO4). +□ +Figure 10. (Left) A diagram of the trefoil obtained as a closed braid; (center) a square- +grid diagram of the same closed braid; (right) a necklace representation of the trefoil in +S12(B12 +O4). +We wonder if Theorem 3.1 can be proved without invoking Alexander’s Theorem. The construction +used in the proof of above can be used to show the inequality in the Conjecture 1 for 2-braid links. +Corollary 3.1. For any 2-braid link L, we have that ball(L) ≤ 4cr(L). +Proof. The necklace representation induced by the square-grid diagram of an alternating 2-braid with n +crossings has 4n + 2 spheres (see Figure 11 (left)). For the closure, we can exchange the last 4 spheres +with the two half-spaces of S12(B12 +O4) (Figure 11 (right)). +□ +Figure 11. (Left) A necklace representation of the 2-braid of 4 crossings in the square-grid +section, with 18 spheres; (right) a necklace representation of the closure of the 2-braid of 4 +crossings, in the square-grid section, with 16 spheres. +The upper bound of Corollary 3.1 cannot be extended to n-braid links when n ≥ 3. The main reason +is that the half-spaces of the square-grid section cannot be used to close all the strands of the braid. +The latter might increase the number of spheres to more than 4 times the number of crossings. +A +similar strategy as used in the proof of Theorem 3.1 can be employed to prove that every link admits +a necklace representation in other polytopal Apollonian sphere packings P(BP4). For instance, if P4 +has a regular triangle as edge-figure, then the 1-skeleton of the edge-figure section contains a subgraph +topologically equivalent to a triangular grid. In this case, two tangent triangles in the triangular grid +made up a rhombus which can play the same role as the square in the square-grid. Indeed, if there is a +7 + +chain of spheres connecting the opposite vertices in the great diagonal of the rhombus, then we can use +them to construct a crossing. It turns out that this is the case for the 4-simplex, hypercube, 24-cell or +the 120-cell (see Figure 12). Although these triangular constructions produce necklace representations +with more spheres that the orthoplicial one, these could be interesting for other issues like constructing +4-polytopes containing a given link in its graph [Epp14]. +Figure 12. A necklace representation of the trefoil knot in P(BT 4) (left) and P(BC4) +(right). +4. Orthocubic representations of algebraic links +Let BC3 and BO4 be the cubic and the orthoplicial packing given in Figures 13 and 14, respectively. +We point out that the labelling of BO4 has been given in such a way that for every bi ∈ BO4, the label i +is positive if and only if the third coordinate of the center of bi is positive. +BC3 +κ +c +i(b)T +b123 +1 + +√ +2 (−1 + +√ +2) ( +1 −1) ( 1 −1 −1 +√ +2) +b123 +1 + +√ +2 (−1 + +√ +2) (−1 +1) (−1 +1 −1 +√ +2) +b123 −1 + +√ +2 ( +1 + +√ +2) (−1 −1) (−1 −1 +1 +√ +2) +b123 −1 + +√ +2 ( +1 + +√ +2) ( +1 +1) ( 1 +1 +1 +√ +2) +b123 −1 + +√ +2 ( +1 + +√ +2) (−1 +1) (−1 +1 +1 +√ +2) +b123 −1 + +√ +2 ( +1 + +√ +2) ( +1 −1) ( 1 −1 +1 +√ +2) +b123 +1 + +√ +2 (−1 + +√ +2) ( +1 +1) ( 1 +1 −1 +√ +2) +b123 +1 + +√ +2 (−1 + +√ +2) (−1 −1) (−1 −1 −1 +√ +2) +123 +123 +123 +123 +123 +123 +123 +123 +Figure 13. The cubic packing BC3. +BO4 +κ +c +i(b)T +b1 +1 + 1/ +√ +2 (−1 + +√ +2) ( +1 −1 +1) 1/ +√ +2 ( +1 −1 +1 −1 +√ +2) +b2 +1 + 1/ +√ +2 (−1 + +√ +2) (−1 +1 +1) 1/ +√ +2 (−1 +1 +1 −1 +√ +2) +b3 +1 − 1/ +√ +2 ( +1 + +√ +2) (−1 −1 +1) 1/ +√ +2 (−1 −1 +1 +1 +√ +2) +b4 +1 − 1/ +√ +2 ( +1 + +√ +2) ( +1 +1 +1) 1/ +√ +2 ( +1 +1 +1 +1 +√ +2) +b1 +1 − 1/ +√ +2 ( +1 + +√ +2) (−1 +1 −1) 1/ +√ +2 (−1 +1 −1 +1 +√ +2) +b2 +1 − 1/ +√ +2 ( +1 + +√ +2) ( +1 −1 −1) 1/ +√ +2 ( +1 −1 −1 +1 +√ +2) +b3 +1 + 1/ +√ +2 (−1 + +√ +2) ( +1 +1 −1) 1/ +√ +2 ( +1 +1 −1 −1 +√ +2) +b4 +1 + 1/ +√ +2 (−1 + +√ +2) (−1 −1 −1) 1/ +√ +2 (−1 −1 −1 −1 +√ +2) +4 +1 +3 +2 +4 1 +3 +2 +Figure 14. The orthoplicial packing BO4. +Let SC3(BO4) := ΓC3 · BO4 be a cubic Apollonian section of P(BO4), where +ΓC3 := ⟨s1234, s1234, s1234, s1234, s1234, s1234⟩. +8 + +The equivariant bijection φ : P(BC3) → SC3(BO4) is induced by the following isomorphisms (see Figure +15). +A(BC3) +−→ +ΓC3 +BC3 +−→ +BO4 +s±1 +�→ +s±(1234) +b±(123) +�→ +b±1 +s±2 +�→ +s±(1234) +b±(123) +�→ +b±2 +s±3 +�→ +s±(1234) +b±(123) +�→ +b±3 +b±(123) +�→ +b±4 +4 +1 +3 +2 +4 +1 +3 +2 +1234 +1234 +1234 +1234 +1234 +1234 +123 +123 +1 +2 +3 +2 +1 +123 +123 +123 +123 +123 +123 +3 +Figure 15. (Right) the cubic packing BC3 with its dual, (left) BO4 with the mirrors of the +generators of the cubic section. +An alternative geometric way to obtain the bijection between the cubic section and cubic Apollonian +packing results by taking the intersection of BO4 and its dual with the XY -plane. The relative position +of the centers of the spheres in the cubic section, with respect to the XY -plane, induces a 2-coloring +of the cubic Apollonian packing where two disks of same color never intersect (see Figure 16). We call +this coloring the z-coloring. By extending the z-coloring to the vertices of the 1-skeleton of P(BC3), we +obtain a proper 2-coloring of the tangency graph. +Figure 16. (Left) P(BC3) with the z-coloring, (right) SC3(BO4) with the XY -plane. +9 + +On the same direction as Theorem 3.1, we present the following result allowing us to prove the +inequality of the Conjecture 1 for an infinite family of alternating algebraic links (containing, in particular, +2-braid links). +Theorem 4.1. For any algebraic link L, there is a necklace representation of L contained in SC3(BO4). +4.1. Orthocubic shifts. Let BO3 be the octahedral packing which is the dual arrangement of the cubic +packing BC3. The former can be also obtained by intersecting the dual arrangement of BO4 with the +XY -plane. Let us consider the symmetries r12, r13, r23, r13, r23, r33, of BO3. By duality, these are also +symmetries of BC3. We recall that rij denotes the signed permutation (ij)(ij). In the octahedral packing +BO3, we have that r12 corresponds to the reflection on the line {x = y}, r±13 is the inversion through +the circle centered at (±1, 0) and radius +√ +2, and r33 is the inversion through the unit circle centered at +the origin (see Figure 17). +123 +123 +r13 +r23 +r23 +r13 +r33 +r12 +123 +123 +123 +123 +123 +123 +3 +1 +2 +3 +2 +1 +Figure 17. BC3 with the mirrors of the generators of the cubic shifts. +We define the cubic shifts as the following six elements belonging to the symmetrized Apollonian +group of BC3 +µi := siri3 +for every i ∈ {±1, ±2, −3} +and +µ3 := s3r33. +(7) +In Figure 18, we show the action of the cubical shifts on the 1-skeleton of BC3 with the z-coloring. We +notice that µ±1 and µ±2 (resp. µ±3) preserves (resp. reverses) the z-coloring. The bijection φ : BC3 → +BO4 induces the following morphisms: +φ : +Sym(BC3) +−→ +Sym(BO4) +φ: +SA(BC3) +−→ +SA(BO4) +r12 +�−→ +r12 +µ1 +�−→ +s1234 r13 +r13 +�−→ +r13 +µ−1 +�−→ +s1234 r24 +r23 +�−→ +r23 +µ2 +�−→ +s1234 r23 +r13 +�−→ +r24 +µ−2 +�−→ +s1234 r14 +r23 +�−→ +r14 +µ3 +�−→ +s1234 r12r34 +r33 +�−→ +r12r34 +µ−3 +�−→ +s1234 r12r34 +For every i = ±1, ±2, ±3, we call the elements φ(µi) ∈ SA(BO4) the orthocubic shifts. +10 + +µ1 +µ2 +µ−1 +µ−2 +µ3 +µ−3 +Figure 18. The action of the cubic shifts on the 1-skeleton of BC3 with the z-coloring. +4.2. Orthocubic coordinates. The cubic Apollonian packing P(BC3) can be seen as a Coxeter system +(W, S) where W = A(BC3) and system of generators S = {s±1, s±2, s±3}. Its Coxeter graph is the graph +of the cube with ∞ label at each edge. Therefore, the reduced words of (W, S) are the words without +consecutive repeated letters. We have that for each b ∈ SC3(BO4), there is a reduce word of w = sj1 · · · sjn +and an element bi ∈ BC3 such that b = w · bi. The depth of b is the minimal length of w in terms of the +generators. By combining the reduced words of (W, S) with the bijection φ : P(BC3) → SC3(BO4) we +can give a coordinate system to the spheres is the cubic section. We define the orthocubic coordinates of +every b ∈ SC3(BO4) as the label +ij1···jn := φ(sj1) · · · φ(sjn) · bi = b +(8) +where i ∈ {±1, ±2, ±3, ±4} and jl ∈ {±1, ±2, ±3}. In Figure 19, we show the orthocubic coordinates of +the elements of SC3(BO4) with depth ≤ 1. +11 + +33 +23 +43 +13 +4 +1 +3 +2 +3 +2 +4 +1 +21 +11 +31 +41 +12 +22 +32 +42 +31 +41 +21 +11 +32 +42 +12 +22 +43 +13 +33 +23 +Figure 19. The orthocubic coordinates of the elements of SC3(BO4) of depth≤ 1. +4.3. Orthocubic representations. We define an orthocubic path γ as a polygonal curve in the 1- +skeleton of SC3(BO4). A cubic diagram of γ will be its orthogonal projection on the XY -plane. The +orthogonal projection of the 1-skeleton of SC3(BO4) on the XY -plane is the 1-skeleton of P(BC3) plus +the diagonal edges of each square-face, which join two vertices of same color under the z-coloring. The +crossings of any cubic diagram are obtained by the intersection of the two diagonal edges of a same square- +face. With the information given by the z-coloring, the over/under crossing information can be deduced +from the color of the vertices of the diagonal edges (black=over/white=under). We define an orthocubic +representation of a link L as a collection of disjoint closed orthocubic paths isotopically equivalent to L. +Every orthocubic representation induces a necklace representation in SC3(BO4). In Figure 20, we show +an orthocubic representation of the trefoil knot, and its corresponding cubic diagram. +Figure 20. (Left) An orthocubic representation of the trefoil knot and its corresponding +cubic diagram (right). +12 + +An orthocubic path will be encoded by a sequence of the orthocubic coordinates �iw1, · · · , iwn� of +the elements given in the linear order induced by γ. Since we shall consider unoriented paths, and the +concatenation of two paths gives another path, vectors encoding orthocubic paths must be quotient by +the following relations: +(i) (Symmetry) �iw1, · · · , iwn� = �iwn, . . . , iw1�. +(ii) (Concatenation) {�iw1, · · · , iwn�, �iwn, · · · , iwm�} = {�iw1, · · · , iwn, · · · , iwm�}. +4.4. Orthocubic tangles. Let T be the tetrahedron in the 3-skeleton of BO4 with vertices {1, 2, 3, 4}. +We define an orthocubic tangle as a tangle (T , t ) where t is a collection {γ1, γ2, . . . , γm} of m ≥ 2 +disjoint orthocubic paths contained in T satisfying that the endpoints of γ1 and γ2 lie in the corners of +T , and the rest of the orthocubic paths are closed. In what follows, we construct the respective analog +of the elementary tangles, sum, mirror, flip and half-twists for orthocubic tangles by using elements of +the symmetrized Apollonian group of BO4. +(i) The orthocubic elementary tangles: +t0 +:= {�1, 4�, �3, 2�}, +t1 +:= {�1, 2�, �3, 4�} and t∞ +:= +{�1, 3�, �2, 4�}. +4 +1 +3 +2 +t0 +4 +1 +3 +2 +t1 +4 +1 +3 +2 +t∞ +Figure 21. The elementary orthocubic tangles. +(ii) The orthocubic flip FO t := r12 t , where r12 ∈ Sym(BO4) acting as the reflection on the plane +{y = x} in R3. +(iii) The orthocubic mirror − t := φ(µ−3) t ∪ {�1, 2�, �1, 2�, �3, 4�, �3, 4�}. +4 +1 +2 +3 +t +t +4 +1 +2 +3 +t +FO t +4 +1 +2 +3 +3 +2 +1 +4 +t +− t +Figure 22. The orthocubic flip and mirror. +(iv) The orthocubic sum t′ + t := φ(µ−1) t′ ∪ {�1, 4�, �2, 3�} ∪ φ(µ1) t . +(v) The orthocubic half-twists H+ +O t := t1 + t and H− t := − t1 + t . +13 + +t′ +t +4 +1 +2 +3 +3 +2 +1 +4 +t′ + t +4 +1 +2 +3 +3 +2 +1 +4 +t +H+ +O t +4 +1 +2 +3 +3 +2 +1 +4 +t +H− +O t +Figure 23. The orthocubic sum and half-twists. +We define the orthocubic tangle closures by: +(vi) The orthocubic numerator NO t := t ∪ {�1, 23, 33, 4�, �2, 13, 43, 3�} +(vi) The orthocubic denominator DO t := t ∪ {�1, 23, 43, 3�, �2, 13, 33, 4�} +DO +NO +t +4 +1 +2 +3 +t +33 +23 +13 +43 +4 +1 +2 +3 +t +33 +23 +13 +43 +4 +1 +2 +3 +Figure 24. The orthocubic tangle closures. +Since the orthocubic elementary tangles, operations and closures are isotopically equivalent to their +homonym in the classic framework of tangles, we can mimic the Conway’s method to define an orthocubic +rational tangle tO[a1, · · · , an] ≃ t[a1, · · · , an], by +tO[a1, · · · , an] := Ha1 +O FO · · · Han +O FO t∞ +(9) +We have now all the elements to prove the Theorem 4.1. +Proof of Theorem 4.1. Every rational tangle admits a necklace representation in S (BO4), via the or- +thocubic version of Conway’s construction. By combining the latter with the orthocubic tangle operations +we obtain that any algebraic link admits an orthocubic representation. +□ +4.5. Improvement of the upper bound of the ball number. The orthocubic Conway’s algorithm +can be slightly adapted in order to improve the get the upper bound of Theorem 4.2. For every a1 ≥ 0, +a2, . . . , an > 0, we define the reduced orthocubic Conway’s algorithm �tO[a1, · · · , an] by +�tO[a1, · · · , an] := Ha1 +O FO · · · Han−1 +O +t1 +(10) +Clearly, for every a1 ≥ 0, a2, . . . , an > 0, we have tO[a1, · · · , an] ≃ �tO[a1, · · · , an]. +Theorem 4.2. Let L be an algebraic link obtained by the closure of the algebraic tangle +tp1/q1 + · · · + tpm/qm +where all the pi/qi have same sign. Then, ball(L) ≤ 4cr(L). +Proof. Let L be an algebraic link made by the closure N(t) where t is the algebraic tangle +tp1/q1 + · · · + tpm/qm. +The condition that all pi/qi have the same sign implies that we have alternating diagram of L induced +by the closure of t, and thus, by the Tait conjecture on the crossing number of alternating diagrams +[Kau87; Thi87; Mur87], the crossing number of L is equal to the sum of the crossing numbers of each +14 + +tpi/qi. Without loss of generality, we can consider that all pi/qi are positive. For every pi/qi with positive +continued fraction [a1, · · · , an], let tpi/qi := �tO[a1, · · · , an]. Since the FO does not change the necklace +length, and H+ +O increases the necklace length by 4, we have that +| tpi/qi | = 4(a1 + . . . + an − 1) + | t1 | += 4(a1 + . . . + an) = 4cr(tpi/qi) +Let t be the orthocubic tangle made by the orthocubic sums tp1/q1 +· · ·+ tpm/qm . By the equivalence +between the orthocubic and tangle operations we have that t ≃ t. Since the necklace length is additive +for the sum, +| t | = | tp1/q1 | + · · · + | tpm/qm | += 4cr(tp1/q1) + · · · + 4cr(tpm/qm) += 4cr(L). +Finally, we notice that the exterior orthocubic paths �1, 4� and �2, 3� are not included in any orthocubic +tangle obtained after applying an orthocubic sum. Therefore, we can use the exterior paths to close t , +and in this way obtain a necklace representation of L with 4cr(L) spheres. +□ +4.6. No tightness for non-alternating links. The family of algebraic links considered in Theorem +4.2 contains all the rational links and other well-known families as the Montesinos links with positive +coefficients. These are the links obtained by the closure of +tp1/q1 + · · · + tpn/qn + tr +with pi/qi > 0 and r ≥ 0. If r = 0 and every pi = 1, then we obtain the Pretzel link P(q1, . . . , qn). +In the non-alternating case, it is possible to construct orthocubic algebraic tangles with necklace length +strictly less than 4 times the crossing number. The first non-trivial example that we have found satisfying +this property, is the Pretzel knot P(3, −2, 3), which corresponds to the knot 819 in the Alexander-Briggs- +Rolfsen notation. This knot is not alternating [Cro04] and it admits an orthocubic necklace representation +with 28 spheres (= 3 +2cr(819), see Figure 25). However, it becomes more tricky to establish a relation +with the crossing number in the non-alternating case since, in general, the crossing number does not +correspond to the sum of the crossings of its rational factors. +Figure 25. An orthocubic representation of the knot 819 with 28 spheres (left) and its +cubic diagram (right). +15 + +5. A new visualization of the slope of rational tangles +The slope p/q of a rational tangle tp/q can be identified with the slope of the meridian of a solid torus +that is the branched double covering of a rational tangle [Cro04]. We shall present a new geometric in- +terpretation of the correspondance between rational tangles and rational numbers. We do so by relating +the slope of a tangle with the slope of the line passing through the origin and the last tangency point +in the orthocubic Conway’s construction. Astonishingly, this approach turns out to be helpful to find +infinitely many primitive solutions of the Diophantine equation x4 + y4 + z4 = 2t2. +Let p/q be a positive fraction with positive continued fraction expansion [a1, · · · , an]. We define the +orthocubic point ηp/q of the rational tangle tp/q as the tangency point of the two disks in the cubic +diagram of tO(a1, · · · , an) corresponding to the last edge of the orthocubic tangle. By last edge, we mean +the edge connecting the disk in the upper-right corner (see Figure 26). We point out that the disk in +the upper-right corner corresponds to the sphere b123 ∈ BC3 which remains fixed under the orthocubic +Conway’s algorithm. We can naturally extend the notion of orthocubic point to tangles with negative +fractions, by applying a reflection through the plane {x = 0} to the whole setting. +Theorem 5.1. For every p/q ∈ Q± ∪ {∞}, ηp/q is the first intersection of the line passing through the +origin and having slope ±(p/q)−2, with the boundary of the disk b±123 ∈ BC3. +Proof. It is enough to prove the positive case. Let p ≥ 0 and q ≥ 1 be two coprime integers. We claim +that +i(ηp/q) = +� +� +� +� +p2 +q2 +(p − q)2 +√ +2(p2 − pq + q2) +� +� +� +� . +(11) +This would imply that the Cartesian coordinates of ηp/q are +1 +√ +2pq − (1 − +√ +2)(p − q)2 (p2, q2), +which is exactly the first point of intersection of the line {p2y = q2x} and the circle centred at +(1 + +√ +2, 1 + +√ +2) and radius (1 + +√ +2), which is the boundary of b123 ∈ BC3. +Let us prove the equality (11). +The positiveness of p and q implies that we can find a positive +continued fraction expansion [a1, · · · , an] = p/q with a1 ≥ 0 and ai ≥ 1 for every 1 < i ≤ n. Let +tp/q +the orthocubic tangle tO[a1, . . . , an]. Let ηp/q and η∞ be the orthocubic points of tp/q and t∞, +respectively. Now, by the definitions of the orthocubic operations HO and FO, the isomorphism φ : +SA(BC3) −→ SA(BO4) and the definition of orthocubic rational tangles given in (9), we have that +tp/q = Ha1 +O FO · · · Han +O FO t∞ ⇒ ηp/q = µa1 +1 r12 · · · µan +x r12(η∞) += (s1r13)a1r12 · · · (s1r13)anr12(η∞) +where s1, r13 and r12 are the elements of SA(BC3) described in subsection 4.1. The inversive coordinates +of η∞ and the matrices representing s1, r13 and r12 can be computed by using the equations (2) (with +λ = 1+ +√ +2 +2 +) and (4), giving +i(η∞) = +� +� +� +1 +0 +1 +√ +2 +� +� +�, +s1 �→ S1 = +� +� +� +−3 +0 +0 +2 +√ +2 +0 +1 +0 +0 +0 +0 +1 +0 +−2 +√ +2 +0 +0 +3 +� +� +�, +r13 �→ R13 = +� +� +� +0 +0 +1 +0 +0 +1 +0 +0 +1 +0 +0 +0 +0 +0 +0 +1 +� +� +�, +r12 �→ R12 = +� +� +� +0 +1 +0 +0 +1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +1 +� +� +�. +16 + +Let M(k) := (S1R13)kR12. By induction on k, it can be found that +M(k) = +� +� +� +� +0 +1 − k2 +−k(k + 2) +√ +2k(k + 1) +1 +0 +0 +0 +0 +−k(k − 2) +1 − k2 +√ +2k(k − 1) +0 +− +√ +2k(k − 1) +− +√ +2k(k + 1) +2k2 + 1 +� +� +� +� +We will finally prove the equality (11) by induction on the number of coefficients n in the fraction +expansion of p/q. For n = 1 (that is p = a1 and q = 1) we have +i(ηa1) = M(a1) +� +� +� +1 +0 +1 +√ +2 +� +� +� = +� +� +� +a2 +1 +1 +(a1 − 1)2 +√ +2(a2 +1 − a1 + 1) +� +� +�. +We suppose equality (11) to be true for n − 1 ≥ 1. Let r/s = a2 + +1 +···+ 1 +an . Then, +i(ηp/q) = M(a1)M(a2) · · · M(an) +� +� +� +� +1 +0 +1 +√ +2 +� +� +� +� = M(a1) +� +� +� +� +r2 +s2 +(r − s)2 +√ +2(r2 − rs + s) +� +� +� +� += +� +� +� +� +(ra1 + s)2 +r2 +(ra1 + s − r)2 +√ +2((ra1 + s)2 − r(ra1 + s) + r2) +� +� +� +� +We finally notice that +ra1 + s +r += a1 + s +r = a1 + +1 +r/s = a1 + +1 +a2 + +1 +···+ 1 +an += p +q +and therefore, equality (11) holds. +□ +Corollary 5.1. The Diophantine equation +x4 + y4 + z4 = 2t2 +(12) +has an infinite number of primitive solutions. +Proof. Since points of � +R2 correspond to light-like vectors of L3,1, we can use the inversive coordinates +of the orthocubic point of every rational tangle given in equation (11) to produce primitive solutions of +the Diophantine equation by taking +x = p, +y = q, +z = p − q, +t = p2 − pq + q2. +(13) +□ +We hope and expect the above approach to be helpful to investigate solutions of other type of Dio- +phantine equations. +17 + +32y = 22x +η3/2 +Figure 26. The orthocubic point (red) of the rational tangle t3/2 corresponding to the +primitive solution 34 + 24 + 14 = 2 × 72. +References +[Ada94] +C. C. Adams. The knot book. American Mathematical Soc., 1994. +[Ale23] +J. W. Alexander. “A lemma on systems of knotted curves”. In: Proceedings of the National +Academy of Sciences of the United States of America 9.3 (1923), p. 93. +[AM95] +S. V. Anishchik and N. N. Medvedev. “Three-Dimensional Apollonian Packing as a Model +for Dense Granular Systems”. In: Phys. Rev. Lett. 75 (23 1995), pp. 4314–4317. doi: 10. +1103/PhysRevLett.75.4314. url: https://link.aps.org/doi/10.1103/PhysRevLett. +75.4314. +[Con70] +J. H. Conway. “An enumeration of knots and links, and some of their algebraic properties”. +In: Computational problems in abstract algebra. Elsevier. 1970, pp. 329–358. +[Cro04] +P. R. Cromwell. Knots and Links. Cambridge University Press, 2004. doi: 10.1017/CBO9780511809767. +[Epp14] +D. Eppstein. “Links and knots in the graphs of four-dimensional polytopes”. In: (2014). url: +https://11011110.github.io/blog/2014/12/13/links-and-knots.html. +[Gab+21] +D. Gabai, R. Haraway, R. Meyerhoff, N. Thurston, and A. Yarmola. Hyperbolic 3-manifolds +of low cusp volume. 2021. arXiv: 2109.14570 [math.GT]. +[GT86] +D. Gabai and W. P. Thurston. Genera of Arborescent Links: 1986. Vol. 339. American +Mathematical Soc., 1986. +[Gra+03] +R. Graham, J. C. Lagarias, C. L. Mallows, A. R. Wilks, and C. H. Yan. “Apollonian circle +packings: number theory”. In: Journal of Number Theory 100.1 (2003), pp. 1–45. issn: 0022- +314X. doi: https://doi.org/10.1016/S0022-314X(03)00015-5. url: https://www. +sciencedirect.com/science/article/pii/S0022314X03000155. +18 + +[Kau87] +L. H. Kauffman. “State models and the Jones polynomial”. In: Topology 26.3 (1987), pp. 395– +407. +[KN19] +A. Kontorovich and K. Nakamura. “Geometry and arithmetic of crystallographic sphere +packings”. In: Proceedings of the National Academy of Sciences 116.2 (2019), pp. 436– +441. issn: 0027-8424. doi: 10.1073/pnas.1721104116. eprint: https://www.pnas.org/ +content/116/2/436.full.pdf. url: https://www.pnas.org/content/116/2/436. +[Kwo+20] +S. Kwok, R. Botet, L. Sharpnack, and B. Cabane. “Apollonian packing in polydisperse +emulsions”. In: Soft Matter 16 (10 2020), pp. 2426–2430. doi: 10.1039/C9SM01772K. url: +http://dx.doi.org/10.1039/C9SM01772K. +[Mae07] +H. Maehara. “On Configurations of Solid Balls in 3-Space: Chromatic Numbers and Knotted +Cycles”. In: Graphs and Combinatorics 23.1 (2007), pp. 307–320. issn: 1435-5914. doi: 10. +1007/s00373-007-0702-7. url: https://doi.org/10.1007/s00373-007-0702-7. +[Mur87] +K. Murasugi. “Jones polynomials and classical conjectures in knot theory”. In: Topology 26.2 +(1987), pp. 187–194. +[Nak14] +K. Nakamura. The local-global principle for integral bends in orthoplicial Apollonian sphere +packings. 2014. arXiv: 1401.2980 [math.NT]. +[RR21a] +J. L. Ram´ırez Alfons´ın and I. Rasskin. “A polytopal generalization of Apollonian packings +and Descartes’ theorem”. In: (2021). arXiv: 2107.09432 [math.CO]. +[RR21b] +J. L. Ram´ırez Alfons´ın and I. Rasskin. “Ball packings for links”. In: European Journal of +Combinatorics 96 (2021), p. 103351. issn: 0195-6698. doi: https://doi.org/10.1016/ +j.ejc.2021.103351. url: https://www.sciencedirect.com/science/article/pii/ +S0195669821000433. +[Ras21] +I. Rasskin. “Regular polytopes, sphere packings and Apollonian sections”. In: arXiv preprint +arXiv:2109.00655 (2021). +[Sta15] +K. E. Stange. “The Apollonian structure of Bianchi groups”. In: Transactions of the Amer- +ican Mathematical Society 370 (May 2015). doi: 10.1090/tran/7111. +[Ste05] +K. Stephenson. Introduction to circle packing: The theory of discrete analytic functions. +Cambridge University Press, 2005. +[Thi87] +M. B. Thistlethwaite. “A spanning tree expansion of the Jones polynomial”. In: Topology +26.3 (1987), pp. 297–309. +[Wil81] +J. B. Wilker. “Inversive Geometry”. In: (1981). Ed. by Chandler Davis, Branko Gr¨unbaum, +and F. A. Sherk, pp. 379–442. +IMAG, Univ. Montpellier, CNRS, Montpellier, France +Email address: jorge.ramirez-alfonsin@umontpellier.fr +Institute of Analysis and Number Theory, TU Graz, Austria +Email address: ivan.rasskin@math.tugraz.at +19 + diff --git a/9tE1T4oBgHgl3EQfUQPn/content/tmp_files/load_file.txt b/9tE1T4oBgHgl3EQfUQPn/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d1d0cee979fde368db3402afad190e9096fdf38 --- /dev/null +++ b/9tE1T4oBgHgl3EQfUQPn/content/tmp_files/load_file.txt @@ -0,0 +1,900 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf,len=899 +page_content='LINKS IN ORTHOPLICIAL APOLLONIAN PACKINGS JORGE L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' RAM´IREZ ALFONS´IN† AND IV´AN RASSKIN‡ Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In this paper, we introduce a connection between Apollonian packings and links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We present new representations of links embedded in the tangency graph of orthoplicial Apollonian packings and show that any algebraic link can be projected onto the tangency graph of a cubic Apollonian packing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We use these representations to improve the upper bound on the ball number of an infinite family of alternating algebraic links, to reinterpret the correspondence of rational tangles and rational numbers, and to find primitive solutions of the Diophantine equation x4 + y4 + z4 = 2t2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Introduction Apollonian packings and their generalizations appear in many different fields of science: in the mod- elling of granular systems [AM95], fluid emulsions [Kwo+20], in number theory [Gra+03], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In this paper, we push further the applications of Apollonian packings into the novel direction of knot theory by introducing new representations of links based on a generalization of the classic Apollonian packing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Main results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We begin by proving that any link can be realized in the tangency graph of any orthoplicial Apollonian packing (Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We then focus our attention to algebraic links and show that any algebraic link can be regularly projected onto the tangency graph of a cubic Apollonian packing (Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The diagrams arising from the latter construction, called orthocubic representations, have the following interesting applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Ball number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A necklace representation of a link L is a sphere packing containing a collection of disjoint cycles in its tangency graph realizing L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Necklace representations have been used for the study of the volume of hyperbolic 3-manifolds [Gab+21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The ball number of L, denoted by ball(L), is defined as the minimum number of spheres needed to construct a necklace representation of L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' It is known that ball(22 1) = 8 and 9 ≤ ball(31) ≤ 12 [Mae07], where 22 1 denotes the Hopf link and 31 the trefoil knot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Nowadays, the Hopf link remains the only link such that its ball number is known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In [RR21b], the authors gave a constructive proof showing that for every non-trivial and non-splittable link L, ball(L) ≤ 5cr(L) and put forward the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Conjecture 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For any nontrivial and nonsplittable link L, ball(L) ≤ 4cr(L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Moreover, the equality holds if L is alternating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic representations allow us to show the validity of the inequality in the Conjecture 1 for an infinite family of alternating algebraic links (Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2), containing the family of rational links, alternating Pretzel links or, more generally, alternating Montesinos links (see Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2010 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 52C26, 57K10, 11D72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Apollonian sphere packings, Ball number, Knots, Links, Diophantine equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' † Partially supported by grant IEA-CNRS ‡ Supported by the Austrian Science Fund (FWF), projects F-5503 and P-34763.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='03089v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='GT] 8 Jan 2023 Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) A necklace representation of the “Figure-Eight” knot 41 obtained by the method of [RR21b] with 20 spheres, (right) an orthocubic representation of the same knot with 16 spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A new visualization of the slope of rational tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' It is well-known that rational tangles are in correspondence to Q ∪ {∞}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic representations allow us to reinterpret this correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Indeed, we show that the slope of rational tangle, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' the corresponding rational number, can be obtained from the coordinates of the intersection of an orthocubic representation of the rational tangle with a certain circle in a cubic circle packing (Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Primitive solutions of a Diophantine equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By combining the coordinates of the intersection point described above with the Lorentz model of the space of spheres, we shall find infinitely many primitive solutions of the Diophantine equation x4 + y4 + z4 = 2t2 (Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Organization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In section 2, we present the background on polytopal Apollonian packings and rational tangles needed throughout the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In section 3, we show that every link can be embedded in the 1-skeleton of several polytopal Apollonian packings, and discuss about the optimality of the orthoplicial case regarding the number of spheres used for the constructions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In section 4, we introduce and study the orthocubic representations of rational links and show the existence of orthocubic representations of algebraic links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Finally, in Section 5, we discuss a geometric visualization of rational tangles as well as its connection with the solutions of Diophantine equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Acknowledgements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We would like to thank Alex Kontorovich for enlightening conversations on several aspects of Apollonian packings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' General background In this section, we shall review notions and definitions needed in the rest of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We refer the reader to [RR21b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' RR21a] for more details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Lorentz model of the space of spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' An oriented hypersphere (in short, sphere) of � Rd is the image of spherical cap of Sd under the stereographic projection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let Ld+1,1 be the Lorentz space of dimension d+2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' the real vector space endowed with an inner product of signature (d+1, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' It is well- known that there is a bijection between spheres and points of � Rd and vectors of Ld+1,1 with Lorentzian norm 1 and 0, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' M¨obius transformations of � Rd corresponds to linear maps of Ld+1,1 preserving the Lorentz product and the time-direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The inversive coordinates of a sphere (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' point) are the Cartesian coordinates of the corresponding vector in Ld+1,1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' There are several equivalent ways (up to basis exchange) to compute the inversive coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We use the Wilker’s convention ([Wil81]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For a sphere (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' half-space) b with curvature κ ̸= 0 and center c (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' κ = 0, normal vector �n and signed 2 distance to the origin δ) its inversive coordinates are i(b) = � � � κ 2 (2c, ∥c∥2 − 1 − κ−2, ∥c∥2 + 1 − κ−2)T if κ ̸= 0, (�n, δ, δ)T if κ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (1) For points η ∈ � Rd, the inversive coordinates are i(η) = � � � (2η, ∥η∥2 − 1, ∥η∥2 + 1)T if η ̸= ∞ (0d, 1, 1)T if η = ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (2) Reciprocally, if i(η) = (x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , xd+2), then η = 1 xd+2−xd+1 (x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , xd).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We recall that the inversive coordinates of points are homogeneous, in the sense that for every λ ̸= 0, λi(η) are valid inversive coordinates of the same point of � Rd [Wil81].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Under the Wilker’s convention, the matrix of the Lorentz product is the diagonal matrix Qd+2 with diagonal entries (1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , 1, −1), and M¨obius transformations are represented by the group of matrices O↑ d+1,1(R) = {M ∈ GLd+2(R) | MT Qd+2M = Qd+2 and Md+2,d+2 > 0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (3) The matrix of O↑ d+1,1(R) representing the inversion sb through the boundary of a sphere b is given by Sb := Id+2 − 2i(b)T i(b)Qd+2 (4) where Id+2 is the identity matrix of size d + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Polytopal Apollonian packings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A polytopal sphere packing BP in dimension d ≥ 1, is the image, up to M¨obius transformations, of the ball-arrangement projection β of an edge-scribed (d + 1)-polytope P on � Rd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The mapping β sends vertices of P to spheres of BP and the tangency relations are encoded by the edges of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For every 1 ≤ n ≤ d, there is a natural realization of the n-skeleton of P as a CW- complex contained in BP, which we call the n-skeleton of BP, and is made by realizing the vertices of P as the centers of BP, and then, for every face f of P, taking the convex hull of the centers corresponding to the vertices of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The 1-skeleton of BP corresponds to the natural realization of the tangency graph of BP usually called the carrier of the packing [Ste05].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Every polytopal sphere packing admits a dual arrangement B∗ P induced by the ball arrangement projection of the polar of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The Apollonian group A(BP) is the Klenian group generated by the inversions through the dual spheres of BP, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' the spheres of B∗ P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' If we add the symmetries of BP to the set of generators, then we obtain the symmetrized Apollonian group of BP, denoted by SA(BP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' When the interiors of every pair of spheres in P(BP) := A(BP) · BP are disjoint, then we obtain an infinite sphere packing that we call polytopal Apollonian packing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' This class of infinite sphere packings can be seen as a particular case of the crystallographic sphere packings introduced in [KN19], where they are called polyhedral packings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For d ≥ 2, every polytopal sphere packing and its endowed structures are unique up to M¨obius transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' This can be seen as a consequence of the Mostow Rigidity Theorem [KN19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In other words, any two edge-scribed realizations of a d-polytope are connected by a M¨obius transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The hyperoctahedral group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We denoted by T d, Od and Cd, the analogue of the regular tetra- hedron, octahedron and cube in dimension d ≥ 2, respectively (we refer to [RR21a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Ras21] for results on polytopal sphere packings arising from these polytopes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We recall that, for every d ≥ 2, Od and Cd are dual from each other, while T d is self-dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Among these families of polytopes, two of them are of special relevance for this paper: the cube C3 and the hyperoctahedron O4, also called orthoplex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The cor- responding polytopal packings induced from these two polytopes are called cubic packings BC3 [Sta15] and orthoplicial packings BO4 [Nak14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall index the elements by an antipodal labelling, where sphere bi and bi correspond to antipodal vertices in the polytope, and we shall use the bar notation ¯i := −i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The vertices of Od will be labelled by {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , d, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , d}, where the facets are the (d − 1)-simplices with vertices {±1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , ±d}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since facets of Od corresponds to vertices of Cd, we shall label each vertex of Cd by the concatenation of the labelling of the vertices in Od incident to the corresponding facet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The symmetry group of Od (or equivalently Cd) is called the hyperoctahedral group, which corresponds to the Coxeter group Bd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Under the antipodal labelling, the hyperoctahedral group is generated by the signed permutations rij := (ij)(ij).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Apollonian sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' An Apollonian section of P(BP) is a subset S (BP) = Γ · X ⊂ P(BP) where Γ < SA(BP) and X ⊂ BP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Two Apollonian sections S (BP) = Γ · X and S (BQ) = Γ′ · X′ of two different Apollonian packings are said to be algebraically equivalent if Γ and Γ′ are isomorphic and there is an equivariant bijection φ : S (BP) → S (BQ) with respect to the actions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' With this notion in the hand, the second author proved in [Ras21] that any orthoplicial Apollonian packing P(BO4) contains a tetrahedral ST 3(BO4), octahedral SO3(BO4) and cubic section SC3(BO4), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' an Apollonian section which is algebraically equivalent to a tetrahedral P(BT 3), octahedral P(BO3) and cubic Apollonian packing P(BC3), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall use a cubic section SC3(BO4) as a geometric framework for the constructions introduced in section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Algebraic links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A 2-tangle (in short tangle) is a pair (U, t) where U is a compact set of R3 homeomorphic to a 3-ball and t is a collection {γ1, γ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , γm} of m ≥ 2 disjoint arcs contained in U satisfying that γ1 and γ2 are open arcs whose endpoints lie on the boundary of U, and the rest of the arcs are closed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Two tangles (U, t) and (U′, t′) are said to be equivalent if there is an isotopy of R3 carrying U to U′, t to t′ and the endpoints of (U, t) to the endpoints of (U′, t′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall denote this equivalence relation t ≃ t′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Up to equivalence, we may consider that the endpoints of t lie on a same plane H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A tangle diagram of (U, t) is a regular projection of t on H, together with U ∩ H and the crossing information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' If it is not required, we shall refer to a tangle (U, t) by t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall name the endpoints in a tangle diagram by the cardinal points NE, NW, SE and SW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The elementary tangles t0, t1 and t∞ are the tangles illustrated in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' NE NW SW SE t0 NE NW SW SE t1 NE NW SW SE t∞ Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The elementary tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For any two tangles t and t′, we have the following operations: (i) the sum t + t′, obtained by connecting the East endpoints of t to the West endpoints of t′, t′ t t + t′ Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Sum of tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (ii) the mirror −t: the image of t under the reflection on the plane containing the equator, (iii) the flip F(t): the image of t under the reflection on the plane perpendicular to the equator and passing through the endpoints SW and NE, (iv) the positive half-twist H+ : t �→ t1 + t, (v) the negative half-twist H− : t �→ −t1 + t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' t −t F(t) H+(t) H−(t) Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Mirror, flip and half-twist operations of tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4 Rational tangles were introduced by Conway in his work on enumerating and classifying knots and links [Con70].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For a given sequence of integers a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an all non-zero except maybe a1, we denote by t(a1, · · · , an) the rational tangle given by the following Conway’s algorithm [Cro04] (see Figure 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' t(a1, · · · , an) := Ha1F · · · HanF(t∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (5) H−3F t∞ H−2F t(−3) H2F t(−2, −3) t(2, −2, −3) Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The rational tangle t(2, −2, −3) obtained by the Conway’s algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The slope of a rational tangle t(a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an) is the rational number p/q obtained by the continued fraction expansion [a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an] := a1 + 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' + 1 an = p q .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (6) The origin of the name of rational tangle came from the connection established by the Conway’s theorem [Con70], between the family of tangles produced by the Conways’s algorithm and rational numbers, which states that two rational tangles are equivalent if and only if they have the same slope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall denote by tp/q the class of rational tangles with slope p/q up to isotopy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The closure of a tangle (U, t) is the link formed by joining the endpoints by two disjoint and unlinked paths at the exterior of U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Up to equivalence, there are two possible closures, the numerator N(t), obtained by joining the northern and the southern endpoints separately, and the denominator D(t), obtained by joining the western and the eastern endpoints (see Figure 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' D N t t t Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The tangle closures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A rational link is the closure of a rational tangle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Algebraic tangles are those obtained by sums and flips of rational tangles [Ada94].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Equivalently, links which are obtained by the closure of algebraic tangles are said to be algebraic or arborescent [GT86].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Pretzel links P(q1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , qn) := N(t1/q1 + · · · + t1/qn) are a particular case of algebraic links, see Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The Pretzel knot P(3, −2, 3) which corresponds to the knot 819 in the Alexander- Briggs notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Necklace representations in polytopal Apollonian packings In this section, we investigate the following question: given a link L and a polytopal Apollonian sphere packing P(BP4), can we find a necklace representation of L contained in P(BP4)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We answer positively this question for some 4-polytopes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We first have the following Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let L be a link and let P(BO4) be an orthoplicial Apollonian sphere packing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' There is a necklace representation of L contained in P(BO4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let us first introduce a previous notion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let P(BP) be a polytopal Apollonian sphere packing, where P is an edge-scribed (d + 1)-polytope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For every edge {i, j} ∈ P, we define the edge-figure section of P(BP) as the Apollonian section Sij(BP) := Γij · BP where Γij is the stabilizer subgroup of the Apollo- nian group of P for {bi, bj}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The subgroup Γij corresponds to a Euclidean reflection group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Indeed, we may apply an inversion to BP through a sphere centered at the tangency point of bi and bj mapping these two spheres into two parallel half-spaces tangent at the infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We then observe that every generator in Γij must be a reflection on a hyperplane orthogonal to bi and bj (see Figure 9, left).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let B12 O4 the orthoplicial packing depicted in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The edge-figure section S12(B12 O4) := Γ12 ·B12 O4 is generated by the action of the parabolic subgroup of the orthoplicial Apollonian group Γ12 := ⟨s1234, s1234, s1234, s1234⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' B12 O4 κ (δ if κ = 0) c (�n if κ = 0) i(b)T b1 0 (1) 0 0 1 0 0 1 1 1 b2 0 (1) 0 0 −1 0 0 −1 1 1 b3 1 1 1 0 1 1 0 0 1 b4 1 −1 1 0 −1 1 0 0 1 b1 2 0 0 −1/2 0 0 −1 −1 1 b2 2 0 0 1/2 0 0 1 −1 1 b3 1 −1 −1 0 −1 −1 0 0 1 b4 1 1 −1 0 1 −1 0 0 1 3 4 1 2 2 1 4 3 Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthoplicial packing B12 O4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We notice that the 1-skeleton of S12(B12 O4) contains an infinite square-grid, with two vertices lying in the orthogonal line to each square and connected to every corner (see Figure 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 3 4 4 3 2 s1234 s1234 s1234 s1234 1 2 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) B12 O4 with the mirrors of the generators of Γ12, view from above;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (right) S12(B12 O4) with its 1-skeleton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 6 The well-known Alexander’s Theorem [Ale23] implies that there is a braid γ such that its closure is isotopically equivalent to L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We can always draw a diagram of γ in a regular square-grid, where the crossings are drawn at the intersections of the diagonals of the squares, and the rest of arcs use the edges of the grid, as in Figure 10 (center).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' This square-grid diagram induces a polygonal closed path in the 1-skeleton of S12(B12 O4), as in Figure 10 (right), which gives us a necklace representation NL ⊂ P(B12 O4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since M¨obius transformations preserving the orientation are ambient isotopies of � R3 then, by Remark 1, we have that there is a M¨obius transformation µ carrying P(B12 O4) to P(BO4) and NL to a necklace representation of L contained in P(BO4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' □ Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) A diagram of the trefoil obtained as a closed braid;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (center) a square- grid diagram of the same closed braid;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (right) a necklace representation of the trefoil in S12(B12 O4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We wonder if Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 can be proved without invoking Alexander’s Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The construction used in the proof of above can be used to show the inequality in the Conjecture 1 for 2-braid links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For any 2-braid link L, we have that ball(L) ≤ 4cr(L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The necklace representation induced by the square-grid diagram of an alternating 2-braid with n crossings has 4n + 2 spheres (see Figure 11 (left)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For the closure, we can exchange the last 4 spheres with the two half-spaces of S12(B12 O4) (Figure 11 (right)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' □ Figure 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) A necklace representation of the 2-braid of 4 crossings in the square-grid section, with 18 spheres;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (right) a necklace representation of the closure of the 2-braid of 4 crossings, in the square-grid section, with 16 spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The upper bound of Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 cannot be extended to n-braid links when n ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The main reason is that the half-spaces of the square-grid section cannot be used to close all the strands of the braid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The latter might increase the number of spheres to more than 4 times the number of crossings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A similar strategy as used in the proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 can be employed to prove that every link admits a necklace representation in other polytopal Apollonian sphere packings P(BP4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For instance, if P4 has a regular triangle as edge-figure, then the 1-skeleton of the edge-figure section contains a subgraph topologically equivalent to a triangular grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In this case, two tangent triangles in the triangular grid made up a rhombus which can play the same role as the square in the square-grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Indeed, if there is a 7 chain of spheres connecting the opposite vertices in the great diagonal of the rhombus, then we can use them to construct a crossing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' It turns out that this is the case for the 4-simplex, hypercube, 24-cell or the 120-cell (see Figure 12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Although these triangular constructions produce necklace representations with more spheres that the orthoplicial one, these could be interesting for other issues like constructing 4-polytopes containing a given link in its graph [Epp14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A necklace representation of the trefoil knot in P(BT 4) (left) and P(BC4) (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic representations of algebraic links Let BC3 and BO4 be the cubic and the orthoplicial packing given in Figures 13 and 14, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We point out that the labelling of BO4 has been given in such a way that for every bi ∈ BO4, the label i is positive if and only if the third coordinate of the center of bi is positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='BC3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='κ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='i(b)T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1) ( 1 −1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 −1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 −1) (−1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 −1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) ( 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 −1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 −1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1) ( 1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) ( 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 −1) (−1 −1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='Figure 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The cubic packing BC3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='BO4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='κ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='i(b)T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 − 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 − 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 − 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 − 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 −1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ( ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 −1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='b4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 + 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) (−1 −1 −1) 1/ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (−1 −1 −1 −1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='Figure 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthoplicial packing BO4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let SC3(BO4) := ΓC3 · BO4 be a cubic Apollonian section of P(BO4), where ΓC3 := ⟨s1234, s1234, s1234, s1234, s1234, s1234⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 8 The equivariant bijection φ : P(BC3) → SC3(BO4) is induced by the following isomorphisms (see Figure 15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A(BC3) −→ ΓC3 BC3 −→ BO4 s±1 �→ s±(1234) b±(123) �→ b±1 s±2 �→ s±(1234) b±(123) �→ b±2 s±3 �→ s±(1234) b±(123) �→ b±3 b±(123) �→ b±4 4 1 3 2 4 1 3 2 1234 1234 1234 1234 1234 1234 123 123 1 2 3 2 1 123 123 123 123 123 123 3 Figure 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Right) the cubic packing BC3 with its dual, (left) BO4 with the mirrors of the generators of the cubic section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' An alternative geometric way to obtain the bijection between the cubic section and cubic Apollonian packing results by taking the intersection of BO4 and its dual with the XY -plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The relative position of the centers of the spheres in the cubic section, with respect to the XY -plane, induces a 2-coloring of the cubic Apollonian packing where two disks of same color never intersect (see Figure 16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We call this coloring the z-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By extending the z-coloring to the vertices of the 1-skeleton of P(BC3), we obtain a proper 2-coloring of the tangency graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Figure 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) P(BC3) with the z-coloring, (right) SC3(BO4) with the XY -plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 9 On the same direction as Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1, we present the following result allowing us to prove the inequality of the Conjecture 1 for an infinite family of alternating algebraic links (containing, in particular, 2-braid links).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For any algebraic link L, there is a necklace representation of L contained in SC3(BO4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let BO3 be the octahedral packing which is the dual arrangement of the cubic packing BC3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The former can be also obtained by intersecting the dual arrangement of BO4 with the XY -plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let us consider the symmetries r12, r13, r23, r13, r23, r33, of BO3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By duality, these are also symmetries of BC3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We recall that rij denotes the signed permutation (ij)(ij).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In the octahedral packing BO3, we have that r12 corresponds to the reflection on the line {x = y}, r±13 is the inversion through the circle centered at (±1, 0) and radius √ 2, and r33 is the inversion through the unit circle centered at the origin (see Figure 17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 123 123 r13 r23 r23 r13 r33 r12 123 123 123 123 123 123 3 1 2 3 2 1 Figure 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' BC3 with the mirrors of the generators of the cubic shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define the cubic shifts as the following six elements belonging to the symmetrized Apollonian group of BC3 µi := siri3 for every i ∈ {±1, ±2, −3} and µ3 := s3r33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (7) In Figure 18, we show the action of the cubical shifts on the 1-skeleton of BC3 with the z-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We notice that µ±1 and µ±2 (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' µ±3) preserves (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' reverses) the z-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The bijection φ : BC3 → BO4 induces the following morphisms: φ : Sym(BC3) −→ Sym(BO4) φ: SA(BC3) −→ SA(BO4) r12 �−→ r12 µ1 �−→ s1234 r13 r13 �−→ r13 µ−1 �−→ s1234 r24 r23 �−→ r23 µ2 �−→ s1234 r23 r13 �−→ r24 µ−2 �−→ s1234 r14 r23 �−→ r14 µ3 �−→ s1234 r12r34 r33 �−→ r12r34 µ−3 �−→ s1234 r12r34 For every i = ±1, ±2, ±3, we call the elements φ(µi) ∈ SA(BO4) the orthocubic shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 10 µ1 µ2 µ−1 µ−2 µ3 µ−3 Figure 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The action of the cubic shifts on the 1-skeleton of BC3 with the z-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The cubic Apollonian packing P(BC3) can be seen as a Coxeter system (W, S) where W = A(BC3) and system of generators S = {s±1, s±2, s±3}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Its Coxeter graph is the graph of the cube with ∞ label at each edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Therefore, the reduced words of (W, S) are the words without consecutive repeated letters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We have that for each b ∈ SC3(BO4), there is a reduce word of w = sj1 · · · sjn and an element bi ∈ BC3 such that b = w · bi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The depth of b is the minimal length of w in terms of the generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By combining the reduced words of (W, S) with the bijection φ : P(BC3) → SC3(BO4) we can give a coordinate system to the spheres is the cubic section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define the orthocubic coordinates of every b ∈ SC3(BO4) as the label ij1···jn := φ(sj1) · · · φ(sjn) · bi = b (8) where i ∈ {±1, ±2, ±3, ±4} and jl ∈ {±1, ±2, ±3}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In Figure 19, we show the orthocubic coordinates of the elements of SC3(BO4) with depth ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 11 33 23 43 13 4 1 3 2 3 2 4 1 21 11 31 41 12 22 32 42 31 41 21 11 32 42 12 22 43 13 33 23 Figure 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic coordinates of the elements of SC3(BO4) of depth≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define an orthocubic path γ as a polygonal curve in the 1- skeleton of SC3(BO4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A cubic diagram of γ will be its orthogonal projection on the XY -plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthogonal projection of the 1-skeleton of SC3(BO4) on the XY -plane is the 1-skeleton of P(BC3) plus the diagonal edges of each square-face, which join two vertices of same color under the z-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The crossings of any cubic diagram are obtained by the intersection of the two diagonal edges of a same square- face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' With the information given by the z-coloring, the over/under crossing information can be deduced from the color of the vertices of the diagonal edges (black=over/white=under).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define an orthocubic representation of a link L as a collection of disjoint closed orthocubic paths isotopically equivalent to L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Every orthocubic representation induces a necklace representation in SC3(BO4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In Figure 20, we show an orthocubic representation of the trefoil knot, and its corresponding cubic diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Figure 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (Left) An orthocubic representation of the trefoil knot and its corresponding cubic diagram (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 12 An orthocubic path will be encoded by a sequence of the orthocubic coordinates �iw1, · · · , iwn� of the elements given in the linear order induced by γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since we shall consider unoriented paths, and the concatenation of two paths gives another path, vectors encoding orthocubic paths must be quotient by the following relations: (i) (Symmetry) �iw1, · · · , iwn� = �iwn, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , iw1�.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (ii) (Concatenation) {�iw1, · · · , iwn�, �iwn, · · · , iwm�} = {�iw1, · · · , iwn, · · · , iwm�}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Orthocubic tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let T be the tetrahedron in the 3-skeleton of BO4 with vertices {1, 2, 3, 4}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define an orthocubic tangle as a tangle (T , t ) where t is a collection {γ1, γ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , γm} of m ≥ 2 disjoint orthocubic paths contained in T satisfying that the endpoints of γ1 and γ2 lie in the corners of T , and the rest of the orthocubic paths are closed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In what follows, we construct the respective analog of the elementary tangles, sum, mirror, flip and half-twists for orthocubic tangles by using elements of the symmetrized Apollonian group of BO4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (i) The orthocubic elementary tangles: t0 := {�1, 4�, �3, 2�}, t1 := {�1, 2�, �3, 4�} and t∞ := {�1, 3�, �2, 4�}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4 1 3 2 t0 4 1 3 2 t1 4 1 3 2 t∞ Figure 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The elementary orthocubic tangles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (ii) The orthocubic flip FO t := r12 t , where r12 ∈ Sym(BO4) acting as the reflection on the plane {y = x} in R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (iii) The orthocubic mirror − t := φ(µ−3) t ∪ {�1, 2�, �1, 2�, �3, 4�, �3, 4�}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4 1 2 3 t t 4 1 2 3 t FO t 4 1 2 3 3 2 1 4 t − t Figure 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic flip and mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (iv) The orthocubic sum t′ + t := φ(µ−1) t′ ∪ {�1, 4�, �2, 3�} ∪ φ(µ1) t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (v) The orthocubic half-twists H+ O t := t1 + t and H− t := − t1 + t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 13 t′ t 4 1 2 3 3 2 1 4 t′ + t 4 1 2 3 3 2 1 4 t H+ O t 4 1 2 3 3 2 1 4 t H− O t Figure 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic sum and half-twists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define the orthocubic tangle closures by: (vi) The orthocubic numerator NO t := t ∪ {�1, 23, 33, 4�, �2, 13, 43, 3�} (vi) The orthocubic denominator DO t := t ∪ {�1, 23, 43, 3�, �2, 13, 33, 4�} DO NO t 4 1 2 3 t 33 23 13 43 4 1 2 3 t 33 23 13 43 4 1 2 3 Figure 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic tangle closures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since the orthocubic elementary tangles, operations and closures are isotopically equivalent to their homonym in the classic framework of tangles, we can mimic the Conway’s method to define an orthocubic rational tangle tO[a1, · · · , an] ≃ t[a1, · · · , an], by tO[a1, · · · , an] := Ha1 O FO · · · Han O FO t∞ (9) We have now all the elements to prove the Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Every rational tangle admits a necklace representation in S (BO4), via the or- thocubic version of Conway’s construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By combining the latter with the orthocubic tangle operations we obtain that any algebraic link admits an orthocubic representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' □ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Improvement of the upper bound of the ball number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic Conway’s algorithm can be slightly adapted in order to improve the get the upper bound of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For every a1 ≥ 0, a2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an > 0, we define the reduced orthocubic Conway’s algorithm �tO[a1, · · · , an] by �tO[a1, · · · , an] := Ha1 O FO · · · Han−1 O t1 (10) Clearly, for every a1 ≥ 0, a2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an > 0, we have tO[a1, · · · , an] ≃ �tO[a1, · · · , an].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let L be an algebraic link obtained by the closure of the algebraic tangle tp1/q1 + · · · + tpm/qm where all the pi/qi have same sign.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Then, ball(L) ≤ 4cr(L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let L be an algebraic link made by the closure N(t) where t is the algebraic tangle tp1/q1 + · · · + tpm/qm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The condition that all pi/qi have the same sign implies that we have alternating diagram of L induced by the closure of t, and thus, by the Tait conjecture on the crossing number of alternating diagrams [Kau87;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Thi87;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Mur87], the crossing number of L is equal to the sum of the crossing numbers of each 14 tpi/qi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Without loss of generality, we can consider that all pi/qi are positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For every pi/qi with positive continued fraction [a1, · · · , an], let tpi/qi := �tO[a1, · · · , an].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since the FO does not change the necklace length, and H+ O increases the necklace length by 4, we have that | tpi/qi | = 4(a1 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' + an − 1) + | t1 | = 4(a1 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' + an) = 4cr(tpi/qi) Let t be the orthocubic tangle made by the orthocubic sums tp1/q1 +· · ·+ tpm/qm .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By the equivalence between the orthocubic and tangle operations we have that t ≃ t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since the necklace length is additive for the sum, | t | = | tp1/q1 | + · · · + | tpm/qm | = 4cr(tp1/q1) + · · · + 4cr(tpm/qm) = 4cr(L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Finally, we notice that the exterior orthocubic paths �1, 4� and �2, 3� are not included in any orthocubic tangle obtained after applying an orthocubic sum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Therefore, we can use the exterior paths to close t , and in this way obtain a necklace representation of L with 4cr(L) spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' □ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' No tightness for non-alternating links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The family of algebraic links considered in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 contains all the rational links and other well-known families as the Montesinos links with positive coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' These are the links obtained by the closure of tp1/q1 + · · · + tpn/qn + tr with pi/qi > 0 and r ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' If r = 0 and every pi = 1, then we obtain the Pretzel link P(q1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , qn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In the non-alternating case, it is possible to construct orthocubic algebraic tangles with necklace length strictly less than 4 times the crossing number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The first non-trivial example that we have found satisfying this property, is the Pretzel knot P(3, −2, 3), which corresponds to the knot 819 in the Alexander-Briggs- Rolfsen notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' This knot is not alternating [Cro04] and it admits an orthocubic necklace representation with 28 spheres (= 3 2cr(819), see Figure 25).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' However, it becomes more tricky to establish a relation with the crossing number in the non-alternating case since, in general, the crossing number does not correspond to the sum of the crossings of its rational factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Figure 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' An orthocubic representation of the knot 819 with 28 spheres (left) and its cubic diagram (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 15 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A new visualization of the slope of rational tangles The slope p/q of a rational tangle tp/q can be identified with the slope of the meridian of a solid torus that is the branched double covering of a rational tangle [Cro04].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We shall present a new geometric in- terpretation of the correspondance between rational tangles and rational numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We do so by relating the slope of a tangle with the slope of the line passing through the origin and the last tangency point in the orthocubic Conway’s construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Astonishingly, this approach turns out to be helpful to find infinitely many primitive solutions of the Diophantine equation x4 + y4 + z4 = 2t2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let p/q be a positive fraction with positive continued fraction expansion [a1, · · · , an].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We define the orthocubic point ηp/q of the rational tangle tp/q as the tangency point of the two disks in the cubic diagram of tO(a1, · · · , an) corresponding to the last edge of the orthocubic tangle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By last edge, we mean the edge connecting the disk in the upper-right corner (see Figure 26).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We point out that the disk in the upper-right corner corresponds to the sphere b123 ∈ BC3 which remains fixed under the orthocubic Conway’s algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We can naturally extend the notion of orthocubic point to tangles with negative fractions, by applying a reflection through the plane {x = 0} to the whole setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For every p/q ∈ Q± ∪ {∞}, ηp/q is the first intersection of the line passing through the origin and having slope ±(p/q)−2, with the boundary of the disk b±123 ∈ BC3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' It is enough to prove the positive case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let p ≥ 0 and q ≥ 1 be two coprime integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We claim that i(ηp/q) = � � � � p2 q2 (p − q)2 √ 2(p2 − pq + q2) � � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (11) This would imply that the Cartesian coordinates of ηp/q are 1 √ 2pq − (1 − √ 2)(p − q)2 (p2, q2), which is exactly the first point of intersection of the line {p2y = q2x} and the circle centred at (1 + √ 2, 1 + √ 2) and radius (1 + √ 2), which is the boundary of b123 ∈ BC3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let us prove the equality (11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The positiveness of p and q implies that we can find a positive continued fraction expansion [a1, · · · , an] = p/q with a1 ≥ 0 and ai ≥ 1 for every 1 < i ≤ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let tp/q the orthocubic tangle tO[a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' , an].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let ηp/q and η∞ be the orthocubic points of tp/q and t∞, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Now, by the definitions of the orthocubic operations HO and FO, the isomorphism φ : SA(BC3) −→ SA(BO4) and the definition of orthocubic rational tangles given in (9), we have that tp/q = Ha1 O FO · · · Han O FO t∞ ⇒ ηp/q = µa1 1 r12 · · · µan x r12(η∞) = (s1r13)a1r12 · · · (s1r13)anr12(η∞) where s1, r13 and r12 are the elements of SA(BC3) described in subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The inversive coordinates of η∞ and the matrices representing s1, r13 and r12 can be computed by using the equations (2) (with λ = 1+ √ 2 2 ) and (4), giving i(η∞) = � � � 1 0 1 √ 2 � � �, s1 �→ S1 = � � � −3 0 0 2 √ 2 0 1 0 0 0 0 1 0 −2 √ 2 0 0 3 � � �, r13 �→ R13 = � � � 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 1 � � �, r12 �→ R12 = � � � 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 1 � � �.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 16 Let M(k) := (S1R13)kR12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' By induction on k, it can be found that M(k) = � � � � 0 1 − k2 −k(k + 2) √ 2k(k + 1) 1 0 0 0 0 −k(k − 2) 1 − k2 √ 2k(k − 1) 0 − √ 2k(k − 1) − √ 2k(k + 1) 2k2 + 1 � � � � We will finally prove the equality (11) by induction on the number of coefficients n in the fraction expansion of p/q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' For n = 1 (that is p = a1 and q = 1) we have i(ηa1) = M(a1) � � � 1 0 1 √ 2 � � � = � � � a2 1 1 (a1 − 1)2 √ 2(a2 1 − a1 + 1) � � �.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' We suppose equality (11) to be true for n − 1 ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Let r/s = a2 + 1 ···+ 1 an .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Then, i(ηp/q) = M(a1)M(a2) · · · M(an) � � � � 1 0 1 √ 2 � � � � = M(a1) � � � � r2 s2 (r − s)2 √ 2(r2 − rs + s) � � � � = � � � � (ra1 + s)2 r2 (ra1 + s − r)2 √ 2((ra1 + s)2 − r(ra1 + s) + r2) � � � � We finally notice that ra1 + s r = a1 + s r = a1 + 1 r/s = a1 + 1 a2 + 1 ···+ 1 an = p q and therefore, equality (11) holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' □ Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The Diophantine equation x4 + y4 + z4 = 2t2 (12) has an infinite number of primitive solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Since points of � R2 correspond to light-like vectors of L3,1, we can use the inversive coordinates of the orthocubic point of every rational tangle given in equation (11) to produce primitive solutions of the Diophantine equation by taking x = p, y = q, z = p − q, t = p2 − pq + q2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' (13) □ We hope and expect the above approach to be helpful to investigate solutions of other type of Dio- phantine equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 17 32y = 22x η3/2 Figure 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The orthocubic point (red) of the rational tangle t3/2 corresponding to the primitive solution 34 + 24 + 14 = 2 × 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' References [Ada94] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Adams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The knot book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' American Mathematical Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=', 1994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Ale23] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Alexander.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “A lemma on systems of knotted curves”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Proceedings of the National Academy of Sciences of the United States of America 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3 (1923), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [AM95] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Anishchik and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Medvedev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Three-Dimensional Apollonian Packing as a Model for Dense Granular Systems”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 75 (23 1995), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 4314–4317.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4314.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='aps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/doi/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='4314.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Con70] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Conway.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “An enumeration of knots and links, and some of their algebraic properties”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Computational problems in abstract algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Elsevier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1970, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 329–358.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Cro04] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Cromwell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Knots and Links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Cambridge University Press, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1017/CBO9780511809767.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Epp14] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Eppstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Links and knots in the graphs of four-dimensional polytopes”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://11011110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='io/blog/2014/12/13/links-and-knots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Gab+21] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Gabai, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Haraway, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Meyerhoff, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Thurston, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Yarmola.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Hyperbolic 3-manifolds of low cusp volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' arXiv: 2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='14570 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='GT].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [GT86] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Gabai and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Thurston.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Genera of Arborescent Links: 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 339.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' American Mathematical Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=', 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Gra+03] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Graham, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Lagarias, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Mallows, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Wilks, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Yan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Apollonian circle packings: number theory”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Journal of Number Theory 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 (2003), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1–45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' issn: 0022- 314X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1016/S0022-314X(03)00015-5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='com/science/article/pii/S0022314X03000155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 18 [Kau87] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Kauffman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “State models and the Jones polynomial”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Topology 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3 (1987), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 395– 407.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [KN19] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Kontorovich and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Nakamura.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Geometry and arithmetic of crystallographic sphere packings”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Proceedings of the National Academy of Sciences 116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 436– 441.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' issn: 0027-8424.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1721104116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' eprint: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/ content/116/2/436.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/content/116/2/436.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Kwo+20] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Kwok, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Botet, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Sharpnack, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Cabane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Apollonian packing in polydisperse emulsions”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Soft Matter 16 (10 2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2426–2430.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1039/C9SM01772K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: http://dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1039/C9SM01772K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Mae07] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Maehara.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “On Configurations of Solid Balls in 3-Space: Chromatic Numbers and Knotted Cycles”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Graphs and Combinatorics 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 307–320.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' issn: 1435-5914.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 1007/s00373-007-0702-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1007/s00373-007-0702-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Mur87] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Murasugi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Jones polynomials and classical conjectures in knot theory”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Topology 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2 (1987), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 187–194.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Nak14] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Nakamura.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' The local-global principle for integral bends in orthoplicial Apollonian sphere packings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' arXiv: 1401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2980 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='NT].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [RR21a] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Ram´ırez Alfons´ın and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Rasskin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “A polytopal generalization of Apollonian packings and Descartes’ theorem”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' arXiv: 2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='09432 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='CO].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [RR21b] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Ram´ırez Alfons´ın and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Rasskin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Ball packings for links”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: European Journal of Combinatorics 96 (2021), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 103351.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' issn: 0195-6698.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1016/ j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='ejc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='103351.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' url: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='com/science/article/pii/ S0195669821000433.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Ras21] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Rasskin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Regular polytopes, sphere packings and Apollonian sections”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: arXiv preprint arXiv:2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='00655 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Sta15] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Stange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “The Apollonian structure of Bianchi groups”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Transactions of the Amer- ican Mathematical Society 370 (May 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='1090/tran/7111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Ste05] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Stephenson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Introduction to circle packing: The theory of discrete analytic functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Cambridge University Press, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Thi87] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Thistlethwaite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “A spanning tree expansion of the Jones polynomial”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: Topology 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='3 (1987), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 297–309.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' [Wil81] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Wilker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' “Inversive Geometry”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' In: (1981).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' by Chandler Davis, Branko Gr¨unbaum, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Sherk, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' 379–442.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' IMAG, Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content=' Montpellier, CNRS, Montpellier, France Email address: jorge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='ramirez-alfonsin@umontpellier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='fr Institute of Analysis and Number Theory, TU Graz, Austria Email address: ivan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='rasskin@math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='tugraz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} +page_content='at 19' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/9tE1T4oBgHgl3EQfUQPn/content/2301.03089v1.pdf'} diff --git a/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/2301.13269v1.pdf.txt b/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/2301.13269v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b55bc00ede6698994df77484f35221bb0b2e3c2 --- /dev/null +++ b/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/2301.13269v1.pdf.txt @@ -0,0 +1,8441 @@ +Maria Curie-Skłodowska University in Lublin +Faculty of Mathematics, Physics and Computer Science +Maryia Shpak +Structure Learning and Parameter Estimation +for Graphical Models via Penalized Maximum +Likelihood Methods +PhD dissertation +Supervisor +dr hab. Mariusz Bieniek, prof. UMCS +Institute of Mathematics +University of Maria Curie-Sklodowska +April 2022 +arXiv:2301.13269v1 [stat.ML] 30 Jan 2023 + +Abstract +Probabilistic graphical models (PGMs) provide a compact and flexible framework to model +very complex real-life phenomena. +They combine the probability theory which deals +with uncertainty and logical structure represented by a graph which allows to cope with +the computational complexity and also interprete and communicate the obtained know- +ledge. In the thesis we consider two different types of PGMs: Bayesian networks (BNs) +which are static, and continuous time Bayesian networks which, as the name suggests, +have temporal component. We are interested in recovering their true structure, which +is the first step in learning any PGM. This is a challenging task, which is interesting +in itself from the causal point of view, for the purposes of interpretation of the model +and the decision making process. +All approaches for structure learning in the thesis +are united by the same idea of maximum likelihood estimation with LASSO penalty. +The problem of structure learning is reduced to the problem of finding non-zero coefficients +in the LASSO estimator for a generalized linear model. In case of CTBNs we consider +the problem both for complete and incomplete data. We support the theoretical results +with experiments. +Keywords and phrases: +Probabilistic graphical models, PGM, Bayesian +networks, BN, continuous time Bayesian networks, CTBN, maximum like- +lihood, LASSO penalty, structure learning, Markov Jump Process, MJP, +Markov chain, Markov chain Monte Carlo, MCMC, Stochastic Proximal Gra- +dient Descent, drift condition, incomplete data, Expectation-Maximization, +EM. +ii + +Acknowledgements +Throughout the process of writing this thesis I have received a lot of support and assistance +and I wish to express my gratitude. +First, I would like to thank my supervisor, Professor Mariusz Bieniek, who was a great +support during this challenging process. His curiosity, open-mindedness and extensive +knowledge gave me a chance to research things that are outside of his main field of +expertise, and his strive for quality and perfection never let me settle for mediocre results. +Next, I want to thank my second advisor Professor Błażej Miasojedow from University +of Warsaw, who introduced us to the field of probabilistic graphical models and some other +areas of statistics, stochastic processes and numerical approximation. His great expertise +and enormous patience allowed me to gain massive knowledge and understanding of these +fields, when sometimes I did not believe I could. +I also would like to thank dr. Wojciech Rejchel from Nicolaus Copernicus University in +Toruń , whose expertise in model selection was key in the analysis of theoretical properties +of our novel methods for structure learning. I wish to thank mgr. Grzegorz Preisbich and +mgr. Tomasz Cąkała for making many numerical results for our methods possible. +I want to thank my university, Maria Skłodowska-Curie University, for an academic +leave giving me the opportunity to finish the dissertation and some additional funding. +The part of the research was also supported by the Polish National Science Center grant: +NCN contract with the number UMO-2018/31/B/ST1/00253. Also I would like to show +my appreciation to other people from my university helping me in various ways, among +them are Professor Maria Nowak, Professor Jarosław Bylina, Professor Tadeusz Kuczu- +mow, Professor Jurij Kozicki and many others. +Finally, I would like to thank my parents, Pavel and Natallia, who were always there +for me to guide me and help me through years of research, without their support this +thesis would not be possible. I also wish to extend my special thanks to my dear friends +for their emotional support and helping me to stay disciplined, especially I thank Elvira +Tretiakova and Olga Kostina. +iii + +Contents +Abstract +ii +Acknowledgements +iii +1 +Introduction +1 +1.1 +Motivation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +1 +1.2 +Probabilistic Graphical Models +. . . . . . . . . . . . . . . . . . . . . . . . +3 +1.3 +Overview of the thesis and its contributions +. . . . . . . . . . . . . . . . . +4 +2 +Preliminaries +6 +2.1 +Notation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +6 +2.2 +Bayesian networks +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +7 +2.3 +Continuous Time Markov Processes . . . . . . . . . . . . . . . . . . . . . . +12 +2.4 +Conditional Markov Processes . . . . . . . . . . . . . . . . . . . . . . . . . +14 +2.5 +Continuous time Bayesian networks . . . . . . . . . . . . . . . . . . . . . . +15 +2.6 +The LASSO penalty +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +19 +3 +Statistical inference for networks with known structure +21 +3.1 +Learning probabilities in BNs +. . . . . . . . . . . . . . . . . . . . . . . . . +21 +3.2 +Inference in Bayesian networks . . . . . . . . . . . . . . . . . . . . . . . . . +25 +3.3 +Learning probabilities in BNs for incomplete data . . . . . . . . . . . . . . +47 +3.4 +Learning parameters for CTBNs . . . . . . . . . . . . . . . . . . . . . . . . +49 +3.5 +Inference for CTBNs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +55 +4 +Structure learning for Bayesian networks +59 +4.1 +Problem of learning structure of Bayesian Networks . . . . . . . . . . . . . +59 +4.2 +Partition MCMC method . . . . . . . . . . . . . . . . . . . . . . . . . . . . +61 +4.3 +The novel approach to structure learning . . . . . . . . . . . . . . . . . . . +62 +4.4 +Discrete case +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +71 +4.5 +Numerical results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +73 +5 +Structure learning for CTBNs for complete data +77 +5.1 +Notation and preliminaries . . . . . . . . . . . . . . . . . . . . . . . . . . . +77 +5.2 +Main results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +81 +iv + +5.3 +Proofs of the main results +. . . . . . . . . . . . . . . . . . . . . . . . . . . +83 +5.4 +Numerical examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +93 +5.5 +Extension of the results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +96 +6 +Structure learning for CTBNs for incomplete data +98 +6.1 +Introduction and notation . . . . . . . . . . . . . . . . . . . . . . . . . . . +98 +6.2 +Sampling the Markov chain with Rao and Teh’s algorithm +. . . . . . . . . 100 +6.3 +Structure learning via penalized maximum likelihood function +. . . . . . . 102 +6.4 +Numerical results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 114 +6.5 +FFBS Algorithm +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 117 +7 +Conclusions and discussion +119 +v + +Chapter 1 +Introduction +1.1 +Motivation +It is a common knowledge that we live in the world where data plays crucial role in many +areas and applications of great importance for our society and the importance of data is +still growing. The amount of data in the world is now estimated in dozens of zettabytes, +and by 2025 the amount of data generated daily is expected to reach hundreds of exa- +bytes. There is a demand for models and algorithms that can deal with these amounts +of data effectively finding useful patterns and providing better insights into the data. On +top of it, most environments require reasoning under uncertainty. Probabilistic graphical +models (PGMs) provide such a framework that allows to deal with these and many other +challenges in various situations. The models combine the probability theory which deals +with uncertainty in a mathematically consistent way, and logical structure which is repre- +sented by a graph encoding certain independence relationships among variables allowing +to cope with the computational complexity. +PGMs encode joint distributions over a set of random variables (often of a significant +amount) combining the graph theory and probabilities, which allows to represent many +complex real-world phenomena compactly and overcome the complexity of the model +which is exponential in the number of variables. There are also some other advantages +that these models have. Namely, because of their clear structure, PGMs enable us to +visualize, interprete and also communicate the gained knowledge to others as well as +make decisions. Some models, for example Bayesian networks, have directed graphs in +their core and offer ways to establish causality in various cases. +Moreover, graphical +models allow us not only to fit the observed data but also elegantly incorporate prior +knowledge, e.g. from experts in the domain, into the model. Besides, certain models take +into account a temporal component and consider systems’ dynamics in time. +Graphical models are successfully applied to a large number of domains such as image +processing and object recognition, medical diagnosis, manufacturing, finance, statistical +physics, speech recognition, natural language processing and many others. Let us briefly +present here a few examples of various applications. +Bayesian networks, one of the PGMs considered in this thesis, are extensively used in +1 + +the development of medical decision support systems helping doctors to diagnose patients +more accurately. In the work by Wasyluk et al. (2001) the authors built and described a +probabilistic causal model for diagnosis of liver disorders. In the domain of hepatology, +inexperienced clinicians have been found to make a correct diagnosis in jaundiced patients +in less than 45% of the cases. Moreover, the number of cases of liver disorders is on +the rise and, especially at early stages of a disease, the correct diagnosis is difficult yet +critical, because in many cases damage to the liver caused by an untreated disorder +may be irreversible. +As we already mentioned and as it is stressed out in the work +above, a huge advantage that these models have is that they allow to combine existing +frequency data with expert judgement within the framework as well as update themselves +when the new data are obtained, for example patients data within a hospital or a clinic. +What is also important in the medical diagnosis is that PGMs, Bayesian networks in +particular, efficiently model simultaneous presence of multiple disorders, which happens +quite often, but in many classification approaches the disorders are considered to be +mutually exclusive. The overall model accuracy, as the authors Wasyluk et al. (2001) +claim, seems to be better than that of beginning diagnosticians and reaches almost 80%, +which can be used for the diagnosis itself as well as the way to help new doctors to +learn the strategy and optimization of the diagnosis process. A few other examples of +the PGMs application in medical field are management of childhood malaria in Malawi +(Bathla Taneja et al. (2021)), estimating risk of coronary artery disease (Gupta et al. +(2019)), etc. +The next popular area of graphical models application is computational biology, for +example Gene Regulatory Network (GRN) inference. GRN consists of genes or parts of +genes, regulatory proteins and interactions between them and plays a key role in medi- +ating cellular functions and signalling pathways in cells. Accurate inference of GRN for +a specific disease returns disease-associated regulatory proteins and genes, serving as po- +tential targets for drug treatment. Chen and Xuan (2020) argued that Bayesian inference +is particularly suitable for GRNs as it is very flexible for large-scale data integration, +because the main challenge of GRNs is that there exist hundreds of proteins and tens +of thousands of genes with one protein possibly regulating hundreds of genes and their +regulatory relationship may vary across different cell types, tissues, or diseases. More- +over, the estimation is more robust and easier to compare on multiple datasets. Chen +and Xuan (2020) demonstrated this by applying their model to breast cancer data and +identified genes relevant to breast cancer recurrence. As another example in this area, +Sachs et al. (2005) used Bayesian network computational methods for derivation of causal +influences in cellular signalling networks. These methods automatically elucidated most +of the traditionally reported signalling relationships and predicted novel interpathway +network causalities, which were verified experimentally. Reconstruction of such networks +might be applied to understanding native-state tissue signalling biology, complex drug +actions, and dysfunctional signalling in diseased cells. +The use of probability models is extensive also in computer vision applications. In their +work Frey and Jojic (2005) advocate for the use of PGMs in the computer vision problems +2 + +requiring decomposing the data into interacting components, for example, methods for +automatic scene analysis. They apply different techniques in a vision model of multiple, +occluding objects and compare their performances. Occlusion is a very important effect +and one of the biggest challenges in computer vision that needs to be taken into account, +and PGMs are considered to be a good tool to handle that effect. PGMs are also used +for tracking different moving objects in video sequences, for example long-term tracking +of groups of pedestrians on the street (Jorge et al. (2007)), where the main difficulties +concern total occlusions of the objects to be tracked, as well as group merging and split- +ting. Another example is on-line object tracking (Jorge et al. (2004)) useful in real time +applications such as video surveillance, where authors overcame the problem of needing +to analyze the whole sequence before labelling trajectories to be able to use the tracker +on-line and also the problem of unboundedly growing complexity of the network. +1.2 +Probabilistic Graphical Models +In the previous subsection we described the advantages of PGMs and why one might be +interested in studying them. In this work we focus on two types of PGMs: Bayesian +Networks (BN) and Continuous Time Bayesian Networks (CTBN). The first term has +rather long history and tracks back to 1980s (Pearl (1985)) whereas the second term is +relatively modern (Nodelman et al. (2002)). The underlying structure for both models +is a directed graph, which can be treated either as a representation of a certain set of +independencies or as a skeleton for factorizing a distribution. In some cases the directions +of arrows in the graph can suggest causality under certain conditions and allow not only +the inference from the data but also intervene into the model and manipulate desired +parameters in the future. BNs are static models, i.e. they do not consider a temporal +component, while in CTBNs as the name suggests we study models in the context of +continuous time. The framework of CTBNs is based on homogeneous Markov processes, +but utilizes ideas from Bayesian networks to provide a graphical representation language +for these systems. +A broad and comprehensive tutorial on existing research for learning Bayesian net- +works and some adjacent models can be found in Daly et al. (2011). +The subject of +causality is extensively explored in Spirtes et al. (2000) and Pearl (2000), some references +are also given in Daly et al. (2011). Several examples of the use of BNs were presented +above. +In contrast to regular Bayesian networks, CTBNs have not been studied that well yet. +The most extensive work concerning CTBNs is PhD thesis of Nodelman (2007). Some +related works include for example learning CTBNs in non-stationary domains (Villa and +Stella (2018)), in relational domains (Yang et al. (2016)) and continuous time Bayesian +network classifiers (Stella and Amer (2012)). As an example, CTBNs have been suc- +cessfully used to model the presence of people at their computers together with their +availability (Nodelman and Horvitz (2004)), for dynamical systems reliability modeling +3 + +and analysis (Boudali and Dugan (2006)), for network intrusion detection (Xu and Shel- +ton (2008)), to model social networks (Fan and Shelton (2012)), to model cardiogenic +heart failure (Gatti et al. (2012)), and for gene network inference (Stella et al. (2014) or +Stella et al. (2016)). +1.3 +Overview of the thesis and its contributions +There are several problems within both the BN and CTBN frameworks. Both of them +have graph structures which need to be discovered and this is considered to be one of the +main challenges in the field. This thesis is dedicated exclusively to solving this problem +in both frameworks. Another problem is to learn the parameters of the model: in the +case of BNs it is a set of conditional probability distributions and in the case of CTBNs +it is a set of conditional intensity matrices (for details see Chapter 2). The last problem +is the statistical inference based on the obtained network (details are in Chapter 3). +The thesis is constructed as follows. In Chapter 2 we provide all the necessary pre- +liminaries for better understanding the frameworks of Bayesian networks and continuous +time Bayesian networks. Next, in Chapter 3 we overview known results on learning net- +works’ parameters as well as inference to fully cover the concept of interest. Chapter 4 is +dedicated to the structure learning problem for BNs, where we provide novel algorithms +for both discrete and continuous data. Chapters 5 and Chapter 6 cover the problems +of structure learning for CTBNs in cases of complete and incomplete data, respectively. +Finally, Chapter 7 concludes the thesis with the summary and the discussion of obtained +results. +Algorithms in both Chapters 4 and 5 lean on feature selection in generalized linear +models with the use of LASSO (Least Absolute Shrinkage and Selection Operator) penalty +function. +It relies on the idea of penalizing the parameters of the model, i.e. adding +or subtracting the sum of absolute values of the parameters of the model with some +hyperparameter, in order to better fit the model and perform a variable selection by +forcing some parameters to be equal to 0. The term first appeared in Tibshirani (1996). +More on the topic of LASSO can be found for example in Hastie et al. (2015). In Section +2.6 we provide a short description of the concept. +The main contributions of the thesis are collected in Chapters 4, 5 and 6 and they are +as follows: +• we provide the novel algorithm for learning the structure of BNs based on penalized +maximum likelihood function both for discrete and continuous data; +• we present and prove the consistency results for the algorithm in case of continuous +data; +• we compare the effectiveness of our method with other most popular methods for +structure learning applied to benchmark networks of continuous data of different +sizes; +4 + +• we provide the novel algorithm for learning the structure of CTBNs based on pena- +lized maximum likelihood function for complete data and present two theoretical +consistency results with proofs; +• we provide the novel algorithm for learning the structure of CTBNs based on pe- +nalized maximum likelihood function for incomplete data where the log-likelihood +function is replaced by its Markov Chain Monte Carlo (MCMC) approximation due +to inability to express it explicitly; +• we present and prove the convergence of the proposed MCMC scheme and the +consistency of the learning algorithm; +• for the mentioned above MCMC approximation we designed the algorithm to pro- +duce necessary samples; +• in both cases of complete and incomplete data we provide results of the simulations +to show the effectiveness of proposed algorithms. +Part of the content (Chapter 5) in its early stages has been published on arXiv: +Shpak, M., Miasojedow, B., and Rejchel, W., Structure learning for CTBNs via pe- +nalized maximum likelihood methods, arXiv e-prints, 2020, https://doi.org/10.48550/ +arXiv.2006.07648. +5 + +Chapter 2 +Preliminaries +In this chapter we provide theoretical background on Bayesian networks (BNs), Markov +processes, conditional Markov processes and continuous time Bayesian networks (CTBNs). +We start with the notation common for BNs and CTBNs which we will use through +the whole thesis. Then we provide a few basic definitions needed to define and under- +stand the concepts of BNs and CTBNs with their interpretation and examples. Most of +the contents of this chapter comes from the Nodelman et al. (2002), Nodelman (2007), +Koller and Friedman (2009). +2.1 +Notation +First, by upper case letters, for example, Xi, B, Y , we denote random variables. +In +the case of CTBNs upper case letters represent the whole collection of random variables +indexed by continuous time, hence in this case Xi(t), Y (t) are random variables for par- +ticular time points t. +Values of variables are denoted by lower case letters, sometimes indexed by numbers +or otherwise representing different values of the same random variable - e.g. xi, s, s′. +The set of possible values for a variable X is denoted by Val(X) and by |X| we will +denote the number of its elements. +Sets of variables are denoted by bold-face upper case letters - e.g. X - and correspon- +ding sets of values are denoted by bold-face lower case letters - e.g. x or x. The set of +possible values and its size is denoted by Val(X) and |X|. +A pair G = (V, E) denotes a directed graph, where V is the set of nodes and E is +the set of edges. The notation u → w means that there exists an edge from the node u to +the node w. We will also call them arrows. The set V \ {w} is denoted by −w. Moreover, +we define the set of the parents of the node w in the graph G by +paG(w) = {u ∈ V : u → w}. +When there is no confusion, for convenience we sometimes write pa(w) instead of paG(w). +Other useful and relevant locally notation we provide in the corresponding sections. +6 + +2.2 +Bayesian networks +In this section we provide an overview of Bayesian networks (BNs). We start with the in- +tuition behind BNs followed by the representation of BNs together with its formal defini- +tion and notation. The problems of inference and learning for BNs are considered more +thoroughly in Section 3.2 and Chapter 4 respectively. +The goal is to represent a joint distribution p over some set of random variables +X = {X1, . . . , Xn}. Even in the simplest case where these variables are binary-valued, +the joint distribution requires the specification of 2n − 1 numbers - the probabilities of +the 2n different assignments of the values {x1, . . . , xn}. The explicit representation of +the joint distribution is hard to handle from every perspective except for small values +of n. Computationally, it is very expensive to manipulate and generally too large to store +in computer memory. Cognitively, it is impossible to acquire so many numbers from a +human expert; moreover, most of the numbers would be very small and would correspond +to events that people cannot reasonably consider. Statistically, if we want to learn the +distribution from data, we would need ridiculously large amounts of data to estimate so +many parameters robustly (Koller and Friedman (2009)). +Bayesian networks help us specify a high-dimensional joint distribution compactly by +exploiting its independence properties. The key notion behind the BN representation is +conditional independence, which on the one hand allows to reduce amount of estimated +parameters significantly and on the other hand, allows to avoid very strong and naive +independence assumptions. +Definition 2.1. Two random variables X and Y are independent (denoted by X ⊥ Y ) +if and only if the equality +P(X ∈ A, Y ∈ B) = P(X ∈ A)P(Y ∈ B) +holds for all Borel sets A, B ⊆ R. +For short, we will write it in the form P(X, Y ) = P(X)P(Y ). There is also another +way to think of independence. If the random variables X and Y are independent, then +P(X ∈ · | Y ) = P(X ∈ ·). Intuitively, this says that having evidence about Y does not +change the distribution of our beliefs on the occurrence of X. +If we wish to model a more complex domain represented by some set of variables, +it is unlikely that any of the variables will be independent of each other. Conditional +independence is a weaker notion of independence, but it is more common in real-life +situations. +Definition 2.2. Two random variables X and Y are conditionally independent given +a set of random variables C (symbolically X ⊥ Y | C) if and only if +P(X ∈ A, Y ∈ B | C) = P(X ∈ A | C)P(Y ∈ B | C) +(2.1) +holds for all Borel sets A, B ⊆ R. +7 + +Obviously (2.1) implies +P(X ∈ A | C, Y ) = P(X ∈ A | C), +which can be written shortly as +P(X | C, Y ) = P(X | C). +So intuitively, the influence that X and Y have on each other is mediated through +the variables in the set C. It means that, when we have some evidence about variables +from C, having any additional information about Y does not change our beliefs about X. +Let us demonstrate this definition on a simplified example. Let X be a random variable +representing the case if a person has lung cancer and Y representing the case if the same +person has yellow teeth. These variables are not independent as having yellow teeth is +one of the secondary symptoms of lung cancer. However, when we know that the person +is a smoker knowing that they have yellow teeth does not give us any additional insight on +lung cancer, and vice versa, as we consider smoking to be the reason of both symptoms. +It is easier to believe that in a given domain most variables will not directly affect most +other variables. Instead, for each variable only a limited set of other variables influence it. +This is the intuition which leads to the notion of a Bayesian network B over a set of random +variables B which is a compact representation of a specific joint probability distribution. +The formal definition is as follows. +Definition 2.3. A Bayesian network B over a set of random variables B is formed by +• a directed acyclic graph (DAG) G whose nodes correspond to the random variables +Bi ∈ B, i = 1, . . . , n. +• the set of conditional probability distributions (CPDs) for each Bi, specifying the +conditional distribution P(Bi | paG(Bi)) of Bi as a function of its parent set in G. +The CPDs form a set of local probability models that can be combined to describe +the full joint distribution over the variables B via the chain rule: +P(B1, B2, ..., Bn) = +n +� +i=1 +P(Bi | paG(Bi)). +(2.2) +The graph G of a Bayesian network encodes a set of conditional independence assump- +tions. In particular, a variable B ∈ B is independent of its non-descendants given the set +of its parents paG(B). See for example Figure 2.1 of an Extended Student network taken +from Koller and Friedman (2009). As it can be seen, each variable is connected only to +a small amount of other variables in the network. In this example according to (2.2) the +joint distribution takes the following form: +P(C, D, I, G, S, L, J, H) = += P(C)P(D | C)P(I)P(G | D, I)P(S | I)P(L | G)P(J | L, S)P(H | G, J). +8 + +Figure 2.1: The Extended Student network +This example will be considered in more detail further in the thesis. +Now we discuss basic structures for BNs including some examples and give the inter- +pretation of the structures. BNs represent probability distributions that can be formed +via products of smaller, local conditional probability distributions (one for each variable). +If the joint distribution is expressed in this form, it means that the independence assump- +tions for certain variables are introduced into our model. To understand what types of +independencies are described by directed graphs for simplicity let us start from looking at +BN B with three nodes: X, Y, and Z. In this case, B essentially has only three possible +structures, each of which leads to different independence assumptions. +• Common parent, also called common cause. If G is of the form X ← Y → Z, and Y +is observed, then X ⊥ Z | Y . However, if Y is unobserved, then X ̸⊥ Z. Intuitively +this stems from the fact that Y contains all the information that determines the +outcomes of X and Z; once it is observed, there is nothing else that affects these +variables’ outcomes. The case with smoking and lung cancer described above is such +an example of common cause. See the illustration (c) in Figure 2.2. +• Cascade, or indirect connection. +If G is of the form X → Y → Z, and Y is +observed, then, again X ⊥ Z | Y . However, if Y is unobserved, then X ̸⊥ Z. Here, +the intuition is again that Y holds all the information that determines the outcome +of Z; thus, it does not matter what value X takes. In Figure 2.2 in (a) and (b) +there are shown cases of indirect causal and indirect evidential effects, respectively. +• V -structure or common effect, also known as explaining away. If G is of the form +X → Y ← Z, then knowing Y couples X and Z. In other words, X ⊥ Z if Y is +unobserved, but X ̸⊥ Z | Y if Y is observed. See the case (d) in Figure 2.2. +The last case requires additional explanation. Suppose that Y is a Boolean variable +9 + +Coherence +Difficulty +Intelligence +Grade +SAT +Letter +Job +HappyFigure 2.2: The four possible two-edge trails from X to Y via Z: (a) An indirect causal +effect; (b) An indirect evidential effect; (c) A common cause; (d) A common effect. +that indicates whether our lawn is wet one morning; X and Z are two explanations for it +being wet: either it rained (indicated by X), or the sprinkler turned on (indicated by Z). +If we know that the grass is wet (Y is true) and the sprinkler did not go on (Z is false), +then the probability that X is true must be one, because that is the only other possible +explanation. Hence, X and Z are not independent given Y . +To generalize this for a case of more variables and demonstrate the power but also +the limitations of Bayesian networks we will need the notions of d-separation and I-maps. +Let Q, W, and O be three sets of nodes in a Bayesian network B represented by G, +where the variables O are observed. Let us use the notation I(p) to denote the set of all +independencies of the form (Q ⊥ W | O) that hold in a joint distribution p. To extend +structures mentioned above to more general networks we can apply them recursively over +any larger graph, which leads to the notion of d-separation. +Recall that we say that there exists an undirected path in G between the nodes u +and w if there exists the sequence v1, . . . , vn ∈ V such that vi → vi+1 or vi ← vi+1 for each +i = 0, 1, . . . , n, where v0 = u and vn+1 = w. Moreover, an undirected path in G between +Q ∈ Q and W ∈ W is called active given observed variables O if for every consecutive +triple of variables X, Y, Z on the path, one of the following holds: +• common cause: X ← Y → Z and Y /∈ O (Y is unobserved); +• causal trail: X → Y → Z and Y /∈ O (Y is unobserved); +• evidential trail: X ← Y ← Z and Y /∈ O (Y is unobserved); +• common effect: X → Y ← Z and Y or any of its descendants are observed. +10 + +X +Z +X +Z +Z +Y +X +Y +X +Z +Y +(a) +(b) +(c) +(d)Figure 2.3: An example for d-separation: X1 and X6 are d-separated given X2,X3 (left), +X2, X3 are not d-separated given X1, X6 (right). +Finally, we say that Q and W are d-separated given O if there are no active paths bet- +ween any node A ∈ Q and B ∈ W given O. See examples for d-separation in Figure 2.3. +In the second example there is no d-separation because there is an active path which +passes through the V -structure created when X6 is observed. The notion of d-separation +lets us describe a large fraction of the dependencies that hold in our model. It can be +shown that if Q and W are d-separated given O, then Q ⊥ W | O. +We will write I(G) = {(Q ⊥ W | O) : Q, W are d-separated given O} to denote the +set of independencies corresponding to all d-separations in G. If p factorizes over G, then +I(G) ⊆ I(p) and p can be constructed easily. In this case, we say that G is an I-map +for p. In other words, all the independencies encoded in G are sound: variables that are +d-separated in G are conditionally independent with respect to p. However, the converse +is not true: a distribution may factorize over G, yet have independencies that are not +captured in G. +So an interesting question here is whether for the probability distribution p we can +always find a perfect map I(G) for which I(G) = I(p) or not. The answer is no (see +an example from Koller and Friedman (2009)). +Another related question is whether +perfect maps are unique when they exist. This is not the case either, for example, DAGs +X → Y and X ← Y encode the same independencies, yet form different graphs. In a ge- +neral case we say that two Bayesian networks B1, B2 are I-equivalent if their DAGs encode +the same dependencies I(G1) = I(G2). For a case of three variables we can notice that +graphs (a), (b) and (c) in Figure 2.2 encode the same dependencies, so as long as we +do not turn graphs into V -structures ((d) is the only structure which encodes the de- +pendency X ̸⊥ Y | Z) we can change directions in them and get I-equivalent graphs. +This brings us to a fact that if G1, G2 have the same skeleton (meaning that if we drop +the directionality of the arrows, we obtain the same undirected graph) and the same +V -structures, then I(G1) = I(G2). For the full proof of this statement, other previously +made statements and more information about BNs see Koller and Friedman (2009). +11 + +X4 +X2 +X6 +X1 +X3 +X5X4 +X2 +X6 +X1 +X3 +X52.3 +Continuous Time Markov Processes +In this section we collect auxiliary results on Markov processes with continuous time. We +can think of a continuous time random process X as a collection of random variables +indexed by time t ∈ [0, ∞). It is sometimes more convenient to view X across all values +of t as a single variable, whose values are functions of time, also called paths or trajectories. +Definition 2.4. The Markov condition is the assumption that the future of a process is in- +dependent of its past given its present. More explicitly, the process X satisfies the Markov +property iff P(X(t+∆t) | X(s), 0 ≤ s ≤ t) = P(X(t+∆t) | X(t)) for all t, ∆t > 0 (Chung +and Walsh (2005)). +In this thesis we focus on Markov processes with finite state space which are basically +defined by initial distribution and a matrix of transition intensities. The framework of +CTBNs is based on the notion of homogeneous Markov processes in which the transition +intensities do not depend on time. +Definition 2.5. Let X be a stochastic process with continuous time. Let the state space +of X be V al(X) = {x1, x2, ..., xN}. Then X is a homogeneous Markov process if and +only if its behavior can be specified in terms of an initial distribution P X +0 over V al(X) and +a Markovian transition model usually presented as an intensity matrix +QX = +� +����� +−q1 +q12 +. . . +q1N +q21 +−q2 +. . . +q2N +... +... +... +... +qN1 +qN2 +. . . +−qN +� +����� +, +(2.3) +where qi = � +j̸=i qij and all the entries qi and qij are positive. +Intuitively, the intensity qi gives the “instantaneous probability” of leaving state xi and +the intensity qij gives the “instantaneous probability” of the jump from xi to xj. More +formally, for i ̸= j +lim +∆t→0 P(X(t + ∆t) = xj | X(t) = xi) = qij∆t + O(∆t2), +(2.4) +and for all i = 1, . . . , N +lim +∆t→0 P(X(t + ∆t) = xi | X(t) = xi) = 1 − qi∆t + O(∆t2). +(2.5) +Therefore, the matrix QX describes the instantaneous behavior of the process X and also +makes the process satisfy the Markov assumption since it is defined solely in terms of its +current state. +The instantaneous specification of the transition model of X induces a probability +distribution over the set of its possible trajectories. To see how the distribution is induced, +we must first recall the notion of a matrix function. +12 + +Definition 2.6. The matrix exponential for a matrix Q is defined as +exp Q = +∞ +� +k=0 +Qk +k! . +Now the set of Equations (2.4) and (2.5) can be written collectively in the form +lim +∆t→0 P(X(t + ∆t) | X(t)) = lim +∆t→0 exp(QX∆t) = lim +∆t→0 +� +I + QX∆t + O(∆t2) +� +. +(2.6) +So given the matrix QX we can describe the transient behavior of X(t) as follows. If +X(0) = xi then the process stays in state xi for an amount of time exponentially dis- +tributed with parameter qi. Hence, the probability density function f and the corres- +ponding distribution function F for the time when X(t) remains equal to xi are given by +f(t) = qi exp(−qit), +t ≥ 0, +F(t) = 1 − exp(−qit), +t ≥ 0. +The expected time of changing the state is 1/qi. Upon transitioning, X jumps to the +state xj with probability qij/qi for j ̸= i. +Example 2.7. Assume that we want to model the behavior of the barometric pressure B(t) +discretized into three states (b1 = falling, b2 = steady, and b3 = rising). Then for instance +we could write the intensity matrix as +QB = +� +�� +−0.21 +0.2 +0.01 +0.05 +−0.1 +0.05 +0.01 +0.2 +−0.21 +� +�� . +If we view units of time as hours, this means that if the pressure is falling, we expect that it +will stop falling in a little less than 5 hours (1/0.21 hours). It will then transition to being +steady with probability 0.2/0.21 ≈ 0.95 and to falling with probability 0.01/0.21 ≈ 0.0476. +When the transition model is defined solely in terms of an intensity matrix (as above), +we refer to it as using a pure intensity parameterization. The parameters for an N state +process are {qi, qij ∈ QX, 1 ≤ i, j ≤ N, i ̸= j}. +This is not the only way to parameterize a Markov process. Note that the distribution +over transitions of X factors into two pieces: an exponential distribution over when the +next transition will occur and a multinomial distribution over where the process jumps. +This is called a mixed intensity parameterization. +Definition 2.8. The mixed intensity parameterization for a homogeneous Markov pro- +cess X with N states is given by two sets of parameters +qX = {qi, 1 ≤ i ≤ N} +and +θX = {θij, 1 ≤ i, j ≤ N, i ̸= j}, +where qX is a set of intensities parameterizing the exponential distributions over when +the next transition occurs and θX is a set of probabilities parameterizing the distribution +over where the process jumps. +13 + +To relate these two parametrizations we note the following theorem from Nodelman +(2007). +Theorem 2.9. Let X and Y be two Markov processes with the same state space and +the same initial distribution. If X is defined by the intensity matrix QX given by (2.3), +and Y is the process defined by the mixed intensity parameterization qY = {q′ +1, . . . , q′ +N} +and θY = {θ′ +ij, i ̸= j}, then X and Y are stochastically equivalent, meaning they have the +same state space and transition probabilities, if and only if q′ +i = qi for all i = 1, . . . , N and +θ′ +ij = qij +qi +for all 1 ≤ i, j ≤ N, i ̸= j. +2.4 +Conditional Markov Processes +In order to compose Markov processes in a larger network, we need to introduce the +notion of a conditional Markov process. This is an inhomogeneous Markov process where +the intensities vary with time, but not as a direct function of time. Rather, the intensities +depend on the current values of a set of other variables, which also evolve as Markov +processes. +Let Y be a process with a state space Val(Y ) = {y1, y2, ..., ym}. Assume that Y evolves +as a Markov process Y (t) whose dynamics are conditioned on a set V of variables, each +of which can also evolve over time. Then we have a conditional intensity matrix (CIM) +which can be written as +QY |V = +� +����� +−q1(V) +q12(V) +. . . +q1m(V) +q21(V) +−q2(V) +. . . +q2m(V) +... +... +... +... +qm1(V) +qm2(V) +. . . +−qm(V) +� +����� +. +Equivalently, we can view CIM as a set of intensity matrices QY |v one for each instantiation +of values v to the variables V, see Example 2.10. Since the framework of CTBNs which +we consider in the thesis has a graph at its core, we will refer to the set of variables V as +the set of parents of Y and denote it by paG(Y ). Note that if the parent set paG(Y ) is +empty, then CIM is simply a standard intensity matrix. Just as a regular intensity matrix, +CIM induces the distribution of the dynamics of Y given the behavior of paG(Y ) = V. +If V takes the value v on the interval [t, t + ε) for some ε > 0, then as in Equation (2.6) +lim +∆t→0 P(Yt+∆t | Yt, v) = lim +∆t→0 exp(QY |v∆t) = lim +∆t→0 +� +I + QY |v∆t + O(∆t2) +� +. +If we specify an initial distribution of Y , then we have defined a Markov process whose +behavior depends on the instantiation v of values of paG(Y ). +Example 2.10. Consider a variable E(t) which models whether or not a person is eating +(e1 = not eating, e2 = eating) conditioned on a variable H(t) which models whether or +14 + +not the person is hungry (h1 = not hungry, h2 = hungry). Then we can specify exemplary +CIM for E(t) as +QE|h1 = +� +−0.01 +0.01 +10 +−10 +� +QE|h2 = +� +−2 +2 +0.01 +−0.01 +� +. +For instance, given this model, we expect that a person who is hungry and not eating +is going to start eating in half an hour. Also, we expect a person who is not hungry and +is eating to stop eating in 6 minutes (1/10 hour). +2.5 +Continuous time Bayesian networks +In this section we define the notion of CTBN, which in essence is a probabilistic graphical +model with the nodes as variables, the state evolving continuously over time, and where +the evolution of each variable depends on the state of its parents in the graph. +Before the formal definition we recall an example from Nodelman et al. (2002). Con- +sider the situation in medical research where some drug has been administered to a patient +and we wish to know how much time it takes for the drug to have an effect. The answer +to this question will likely depend on various factors, such as how recently the patient ate. +We want to model the temporal process for the effect of the drug and how its dynamics +depends on other factors. In contrast to previously developed methods of approaching +such a problem (e.g. event history analysis, Markov process models) the notion of CTBN +introduced by Nodelman et al. (2002) allows the specification of models with a large struc- +tured state space where some variables do not directly depend on others. For example, +the distribution of how fast the drug takes effect might be mediated through how fast +it reaches the bloodstream, which in turn may be affected by how recently the person +ate. Figure 2.4 shows an exemplary graph structure for CTBN modelling the drug effect. +There are nodes for the uptake of the drug and for the resulting concentration of the drug +in the bloodstream. The concentration is also affected by how full patient’s stomach is. +The drug is supposed to alleviate joint pain, which may be aggravated by falling pressure. +The drug may also cause drowsiness. The model contains a cycle, indicating that whether +the person is hungry depends on how full their stomach is, which depends on whether or +not they are eating. +Let G = (V, E) denote a directed graph with possible cycles, where V is the set of +nodes and E is the set of edges. Further in the context of probabilistic graphical models +we use the terms “nodes” and “random variables” interchangeably. +For every w ∈ V +we consider a corresponding space Xw of possible states at w and we assume that each +space Xw is finite. We consider a continuous time stochastic process on a product space +X = � +w∈V Xw, so a state s ∈ X is a configuration s = (sw)w∈V, where sw ∈ Xw. If W ⊆ V, +then we write sW = (sw)w∈W for the configuration s restricted to the nodes in W. We +also use the notation XW = � +w∈W Xw, so we can write sW ∈ XW. +In what follows +15 + +Figure 2.4: (a) +we use the bold symbol s to denote configurations belonging to X only. All restricted +configurations will be denoted with the standard font s. +Now suppose we have a family of functions Qw : XpaG(w) × (Xw × Xw) → [0, ∞). +For a fixed c ∈ XpaG(w) we consider Qw(c; ·, · ) as a conditional intensity matrix (CIM) +at the node w (only off-diagonal elements of this matrix have to be specified, the dia- +gonal ones are irrelevant). +The state of CTBN at time t is a random element X(t) +of the space X of all the configurations. +Let Xw(t) denote its w-th coordinate. +The +process {(Xw(t))w∈V : t ≥ 0} is assumed to be Markov and its evolution can be described +informally as follows: transitions, or jumps, at the node w depend on the current confi- +guration of its parents. If the state of any parent changes, then the node w switches to +other transition probabilities. If sw ̸= s′ +w, where sw, s′ +w ∈ Xw, then +P (Xw(t + dt) = s′ +w | X−w(t) = s−w, Xw(t) = sw) = Qw(spaG(w), sw, s′ +w) dt. +Definition 2.11. A continuous time Bayesian network N over a set of random variables +X = {X1, . . . , Xn} is formed by two components. The first one is an initial distribution P 0 +X +specified as a Bayesian network B over X. The second component is a continuous transi- +tion model, specified as +• a directed (possibly cyclic) graph G whose nodes correspond to the random vari- +ables Xi; +• a conditional intensity matrix QXi|paG(Xi), specifying the continuous dynamic of each +variable Xi given its parents’ configuration. +16 + +Eating +Hungry +Full +Uptake +stomach +Barometer +Concentration +Joint +Drowsy +painEssentially, CTBN is a Markov jump process (MJP) on the state space X with tran- +sition intensities given by +Q(s, s′) = +� +� +� +Qw(spaG(w), sw, s′ +w), +if s−w = s′ +−w and sw ̸= s′ +w for some w, +0, +if s−w ̸= s′ +−w for all w, +(2.7) +for s ̸= s′. Obviously, Q(s, s) is defined “by subtraction” to ensure that � +s′ Q(s, s′) = 0. +For convenience, we will often write Q(s) = −Q(s, s) so that Q(s) ≥ 0. In particular, +Qw(c; sw) = − � +s̸=s′ Qw(c; s, s′). +It is important to note that we make a fundamental assumption in the construction +of the CTBN model: two variables cannot transition at the same time (a zero in the +definition of Q(s, s)). This can be viewed as a formalization of the view that variables +must represent distinct aspects of the world. We should not, therefore, model a domain in +which we have two variables that functionally and deterministically change simultaneously. +For example, in the drug effect network, we should not add a variable describing the type +of food, if any, a person is eating. We could, however, change the value space of the +“Eating” variable from a binary “yes/no” to a more descriptive set of possibilities. +Further we omit the symbol G in the indices and write pa(w) instead of paG(w). For +CTBN the density of a sample trajectory X = X([0, T]) on a bounded time interval [0, T] +decomposes as follows: +p(X) = ν(X(0)) +� +w∈V +p(Xw || Xpa(w)) , +(2.8) +where ν is the initial distribution on X and p(Xw || Xpa(w)) is the density of piecewise +homogeneous Markov jump process with the intensity matrix equal to Qw(c; ·, · ) in every +time sub-interval such that Xpa(w) = c. +Below we explicitly write an expression for +the density p(Xw || Xpa(w)) in terms of moments of jumps and the skeleton of the process +(Xw, Xpa(w)), as in (2.8), where by skeleton we understand the sequence of states of +the process corresponding to the sequence of moments of time. +Let T w = (tw +0 . . . , tw +i , . . .) and T pa(w) = (tpa(w) +0 +, . . . , tpa(w) +j +, . . .) denote moments of jumps +at the node w ∈ V and at parent nodes, respectively. By convention, put tw +0 = tpa(w) +0 += 0 +and tw +|T w|+1 = tpa(w) +|T pa(w)|+1 = tmax. Analogously, Sw and Spa(w) denote the corresponding +skeletons. Thus we divide the time interval [0, tmax] into disjoint segments [tpa(w) +j +, tpa(w) +j+1 ), +j = 0, 1, . . . |T pa(w)| such that Xpa(w) is constant and Xw is homogeneous in each segment. +Next we define sets Ij = {i > 0 : tpa(w) +j +< tw +i < tpa(w) +j+1 } with notation jbeg and jend for +17 + +the first and the last element of Ij. Then we obtain the following formula. +p(Xw || Xpa(w)) = p(T w, Sw || Spa(w), T pa(w)) = += +|T pa(w)| +� +j=0 +� +I(Ij ̸= ∅) +� � +i∈Ij +Qw(spa(w) +j +; sw +i−1, sw +i )× +× +� +i∈Ij\{jbeg} +exp +� +−(tw +i − tw +i−1)Qw(spa(w) +j +; sw +i−1) +� +× +× exp +� +−(tw +jbeg − tpa(w) +j +)Qw(spa(w) +j +; sw +jbeg−1) − (tpa(w) +j+1 +− tw +jend)Qw(spa(w) +j +; sw +jend) +� � ++ ++ I(Ij = ∅) exp +� +−(tpa(w) +j +− tpa(w) +j+1 )Qw(spa(w) +j +; sw +jbeg−1) +� � +. +Below in Figure 2.5 there is an example of a trajectory of the node w with two possible +states and of its parent with also two possible states 0 and 1. In this case the sets of +indices are I0 = {2, 3, 4}, I1 = {∅} and I2 = {7}. +Figure 2.5: An exemplary trajectory of a node w and its parents pa(w). +In consequence, using the fundamental property of the exponential function we may +write p(Xw || Xpa(w)) in the form +p(Xw || Xpa(w)) = +� +c∈Xpa(w) +� +s∈Xw +� +s′∈Xw +s′̸=s +Qw(c; s, s′)nT +w(c; s,s′) exp +� +−Qw(c; s, s′)tT +w(c; s) +� +, +(2.9) +where +• nT +w(c; s, s′) denotes the number of jumps from s ∈ Xw to s′ ∈ Xw at the node w on +the time interval [0, T], which occur when the parent configuration is c ∈ Xpa(w), +18 + +pa(w) +pa(w) += 1 += 1 +so +1 +pa(w) +pa(w) +S1 +=0 +0 +w : +1 +w +s°=0 +s=0 +0 +to +t +tu +tw +t? +itw +itw +t +t2 +T5 +T6 ++w +t8 +> ++pa(w) ++pa(w) ++pa(w) +1• tT +w(c; s) is the length of time that the node w is in the state s ∈ Xw on the time +interval [0, T], when the configuration of parents is c ∈ Xpa(w). +To simplify the notation we omit the upper index T in nT +w(c; s, s′) and tT +w(c; s) further in +the thesis, except for the part where we consider martingales. +2.6 +The LASSO penalty +In this section we shortly describe the notions of the LASSO penalty and LASSO estima- +tors which constitute the base of the novel algorithms for structure learning in the thesis. +LASSO is the acronym for Least Absolute Shrinkage and Squares Operator. The term +was invented by Tibshirani (1996) though the general concept was introduced even earlier. +Most of the contents of this section come from Hastie et al. (2015). +The underlying idea of the LASSO estimators is the assumption of sparsity. A sparse +statistical model is one in which only a relatively small number of parameters (or predic- +tors) play an important role. Consider a linear regression model with N observations yi of +a target variable and xi = (xi1, . . . , xip)⊤ of p associated predictor variables which are also +called features. The goal is to predict the target from the predictors for future data and +also to discover which predictors are relevant. In the linear regression model we assume +that +yi = β0 + +p +� +j=1 +βjxij + ϵi, +where β = (β0, β1, . . . , βp) is the vector of unknown parameters and ϵi is an error term. +The standard way to find β is to minimize the least-squares function +N +� +i=1 +� +yi − β0 − +p +� +j=1 +βjxij +�2 +. +Typically all of the estimates appear to be non-zero, which complicates the interpretability +of the model especially with a high number of possible predictors. Moreover, since the data +have noise the model will try to fit the training observations too much and the parameters +will most probably take extreme values. In case when p > N the estimates are not even +unique, so most of solutions will overfit the data. +The solution is to regularize the estimation process, i.e. add some constraints on the pa- +rameters. The LASSO estimator uses ℓ1-penalty, which means that we minimize the least- +square function with an additional bound on ℓ1-norm of β, namely ∥β∥1 = �p +j=1 |βj| ≤ t. +The value t is the user-specified parameter usually called hyperparameter. The motiva- +tion to use ℓ1-penalty instead of any other ℓq-penalty comes from the fact that if t is small +enough we obtain a sparse solution with only a small amount of non-zero parameters. This +does not happen for ℓq-norm if q > 1, and if q < 1 the solutions are sparse but the problem +is not convex. Convexity simplifies the computations as well as the theoretical analysis of +the properties of the estimator. This allows for scalable algorithms capable of handling +19 + +problems with even millions of parameters. Before the optimization process we typically +standardize the predictors so that each column is centred, i.e. the mean for each column +is 0, and has unit variance, i.e. the mean of squares is equal to 1. We also centre the target +column, so in the result we can omit the intercept term β0 in the estimation process. +The LASSO penalty is used not only in linear regression but in a wide variety of +models, for example generalized linear models where the target and the linear model are +connected through some link function. Hence, in a more general case we can formulate +the optimization problem as +ˆβ = argmin +θ∈Rp [L(θ, D) + λ∥θ∥1], +where L(θ, D) is the arbitrary loss function for the data D and the parameter vector θ. +The tuning hyperparameter λ corresponds to the constraining value t, there is one-to- +one correspondence. This is so-called Lagrangian form for the LASSO problem described +above. +In the setting of structure learning for Bayesian networks, both static and continuous, +we formulate the problem as an optimization problem for a linear or generalized linear +model, where the parameter vectors encode the dependencies between variables in the +network. We use the LASSO penalty in all the formulated problems, hence the prob- +lem of finding arrows in the graph reduces to recovering certain non-zero parameters in +the LASSO estimator. As the loss functions we use the negative log-likelihood function +and the residual sum of squares. +20 + +Chapter 3 +Statistical inference for networks with +known structure +There are three main classes of problems concerning Bayesian networks (both static and +continuous time). The first one is to discover the structure of the network. Namely, we +need to specify the underlying graph of the network, which nodes are the variables of +interest, and its edges encode the dependencies between the variables. This problem will +be covered in subsequent chapters. +The second problem is to learn the parameters of the network. +Namely, knowing +the structure of the network we need to specify the behaviour of the network in any +specified node given the states of its parents. In the context of static BN this behaviour +is encoded by conditional probability distributions (CPD, see (2.2)). The corresponding +parameters in case of CTBNs are conditional intensity matrices (CIM, see (2.7)). +The third type of problems is to make statistical inference using the network with +known structure and parameters. For instance, we may want to predict the state of some +node of interest or, knowing states of some nodes, find which combination of the remaining +nodes explains them the best. Finally, we may be interested in prediction of the future +dynamics (in time) of some nodes of the network. +In this chapter we discuss well known results concerning the problems of learning +the parameters of the network and then the inference based on the fully discovered net- +work. The contents of this chapter are mainly based on Koller and Friedman (2009), +Nodelman (2007) and Heckerman (2021) with more detailed references throughout it. +3.1 +Learning probabilities in BNs +First we discuss the discrete case. +We assume that the Bayesian network with the +known underlying graph G includes n nodes each corresponding to a variable Xi ∈ X for +i = 1, . . . , n. Also, each variable Xi is discrete, having ri possible values x1 +i , x2 +i , . . . , xri +i . +We denote an observed value of Xi in l-th observation as Xi[l]. If each node is observed +m times, then we obtain the sample dataset D = {D1, D2, . . . , Dm} with the sample +Dl = (X1[l], X2[l], . . . , Xn[l]) indicating the observed values of all the nodes in the l-th +21 + +sampling. We refer to each Dl as a case. If all cases are complete, i.e. no missing values oc- +curred in the dataset D, it is considered as complete data; otherwise, it is called incomplete +data. Missing values in data can occur for many different reasons, for instance, people +filling out a survey may prefer not to answer some questions or certain measurements +might not be available for some patients in a medical setting. +There are mainly two categories of methods for parameter estimation in BN: one is +for dealing with the complete data, and the other is for incomplete data. We will provide +concise descriptions of two algorithms for the first category such as maximum likelihood +estimation and Bayesian method; and we will briefly discuss algorithms for the second +category. +Assume that as in (2.2) we can write the joint distribution of the variables in X as +follows +P(X1, X2, ..., Xn | θ) = +n +� +i=1 +P(Xi | paG(Xi), θi) +for some vector of parameters θ = (θ1, . . . , θn), where θi is the vector of parameters for +the local distribution P(Xi | paG(Xi), θi). For shortness, further in this chapter we will +write pa(Xi) instead of paG(Xi). In the case of discrete and completely observed data +categorical distribution is commonly used. We note that in literature concerning learning +Bayesian networks this type of distribution is often referred to as multinomial distribution +or in some cases as unrestricted multinomial distribution (for example Heckerman (2021)) +to differentiate this distribution from multinomial distributions that are low-dimensional +functions of pa(Xi). +Hence we assume that each local distribution function is a collection of categorical +distributions, one distribution for each configuration of its parents, namely +P(Xi = xk +i | paj +i, θi) = θijk > 0, for 1 ≤ k ≤ ri, 1 ≤ j ≤ qi, +(3.1) +where qi = +� +Xj∈pa(Xi) +rj and pa1 +i , pa2 +i , . . . , paqi +i denote all possible configurations of pa(Xi), +and θi = ((θijk)ri +k=2)qi +j=1 are the parameters. Note that the parameter θij1 is given by +the difference 1 − �ri +k=2 θijk. For convenience, let us denote the vector of parameters +θij = (θij2, θij3, . . . , θijri) for all 1 ≤ i ≤ n and 1 ≤ j ≤ qi so that θi = (θij)qi +j=1. +As it is well known, the maximum likelihood estimation (MLE) is a method of estima- +ting the parameters of a probability distribution by maximizing the likelihood function, +so that under the assumed statistical model the observed data is the most probable. Ba- +sically, if Ck is the result of a random test for an event C with several possible outcomes +C1, C2, . . . , Cn it will appear in the maximum likelihood for this event. Hence, the esti- +mated value of ˆC will be set as parameter θ if it maximizes the value of the likelihood +function P(C | θ). +22 + +For the general Bayesian network with n nodes we denote the likelihood function as +L(θ : D) = P(D | θ) = +m +� +l=1 +P(Dl | θ) = +m +� +l=1 +P(X1[l], X2[l], . . . , Xn[l] | θ) = += +m +� +l=1 +n +� +i=1 +P(Xi[l] | pai[l], θi) = +n +� +i=1 +m +� +l=1 +P(Xi[l] | pai[l], θi) = +� +i +Li(θi : D), +(3.2) +where by pai[l] = pa(Xi)[l] we denote the l-th observation of the parents vector of the vari- +able Xi. This representation shows that the likelihood decomposes as a product of inde- +pendent factors, one for each CPD in the network. This important property is called the +global decomposition of the likelihood function. Moreover, this decomposition is an imme- +diate consequence of the network structure and does not depend on any particular choice +of the parameterization for CPDs (see Koller and Friedman (2009)). +If the conditional distribution of Xi given its parents paG(Xi) is the categorical dist- +ribution, then the local likelihood function can be further decomposed as follows +Li(θi : D) = +m +� +l=1 +P(Xi[l] | pai[l], θi) = +m +� +l=1 +qi +� +j=1 +ri +� +k=1 +P(Xi[l] = xk +i | pai[l] = paj +i, θi) += +qi +� +j=1 +ri +� +k=1 +θ +N(xk +i ,paj +i ) +ilk +, +(3.3) +where N(xk +i , paj +i) is the number of cases in D for which Xi = xk +i and pa(Xi) = paj +i. +Considering that the dataset is complete for each possible value paj +i of the parents +pa(Xi) of the node Xi, the probability P(Xi | paj +i) is the independent categorical dist- +ribution not related to any other configurations pal +i of pa(Xi) for j ̸= l. Therefore, as +the result of the MLE method we obtain the estimated parameter ˆθ as follows +ˆθijk = N(xk +i , paj +i) +N(paj +i) +, +where N(paj +i) denotes the number of cases when the configuration paj +i appears in the full +set of observations for the vector of variables pa(Xi). +Note that in general the MLE approach attempts to find the parameter vector θ that +is “the best” given the data C. +On the other hand, the Bayesian approach does not +attempt to find such a point estimate. Instead, the underlying principle is that we should +keep track of our beliefs about values of θ, and use these beliefs for reaching conclusions. +In other words, we should quantify the subjective probability we have initially assigned +to different values of θ taking into account new evidence. Note that in representing such +subjective probabilities we now treat θ as a random variable. Thus, the Bayesian approach +is based on the Bayes rule +p(θ | C) = p(C | θ)p(θ) +p(C) +. +(3.4) +Hence, the basic idea of the Bayesian method for parameter learning is the following. +We treat θ as a random variable with a prior distribution p(θ), and it is very common +23 + +to set p as the uniform distribution, especially in the case when we have no prior knowledge +about θ. Given a distribution with unknown parameters and a complete set of observed +data C, new beliefs about θ, namely p(θ | C), can be estimated according to the previous +knowledge. The aim is to calculate p(θ | C) which is called the posterior probability of +the parameter θ. For computational efficiency we want to use a conjugate prior, i.e. when +the posterior distribution after conditioning on the data is in the same parametric family +as the prior one. +Here we assume that each vector θij has the prior Dirichlet distribution, so that +p(θij) = Dir(θij | αij1, . . . , αijri) = +Γ(αij) +�ri +k=1 Γ(αijk) +ri +� +k=1 +θ +αijk−1 +ijk +, +(3.5) +where αij = �ri +k=1 αijk, αijk > 0, k = 1, . . . , ri, αij1, . . . , αijri are hyperparameters and Γ(·) +is Gamma function. This is the standard conjugate prior to both categorical and multi- +nomial distributions. Hence, the probability of observed samples is +p(D) = +� +p(θij)p(D | θij)dθij = += +� +Γ(αij) +�ri +k=1 Γ(αijk) +ri +� +k=1 +θ +αijk−1 +ijk +× +ri +� +k=1 +θ +Nijk +ijk dθij = += +Γ(αij) +Γ(αij + Nij) +ri +� +k=1 +Γ(αijk + Nijk) +Γ(αijk) +, +(3.6) +where for shortness Nijk = N(xk +i , paj +i) and Nij = N(paj +i) = �ri +k=1 Nijk. The integral is +(ri − 1)-dimensional over the set {θijk ≥ 0, +2 ≤ k ≤ ri, +�ri +k=2 θijk ≤ 1}. +As we have already mentioned, in Bayesian method, if we do not have prior distri- +bution we assume it to be uniform, which is consistent with the principle of maximum +entropy in information theory, it maximizes the entropy of random variables with bounded +support. Thus, if there is no information used for determination of prior distribution, we +set hyperparameters α1 = · · · = αr = 1. +Combining (3.4), (3.5) and (3.6) under the assumptions of parameter independence +and complete data finally we obtain the posterior distribution as follows +p(θij | D) = Dir(θij | αij1 + Nij1, . . . , αijri + Nijri). +(3.7) +Therefore, we have an estimate for each parameter θijk from data D as follows +ˆθijk = αijk + Nijk +αij + Nij +, +1 ≤ k ≤ ri. +Continuous Variable Networks. +When we were discussing the MLE method for disc- +rete BNs, we mentioned the global decomposition rule which applies to any type of CPD. +That is, if the data are complete, the learning problem reduces to a set of local lear- +ning problems, one for each variable. The main difference is in applying the maximum +likelihood estimation process to CPD of a different type: how we define the sufficient +24 + +statistics, and how we compute the maximum likelihood estimate from them. In this +paragraph, we briefly discuss how MLE principles can be applied in the setting of linear +Gaussian Bayesian networks. +Consider a variable X with parents U = {U1, . . . , Uk} with linear Gaussian CPD: +p(X | u) = N(β0 + β1u1 + · · · + βkuk, σ2). +Our task is to learn the parameters ˆθX|U = (β0, β1, . . . , βk, σ2). To find the MLE values of +these parameters, we need to differentiate the likelihood function and to solve the equa- +tions that define a stationary point. As usual, it is easier to work with the log-likelihood +function. Using the definition of the Gaussian distribution, we have that +ℓ(θX|U : D) = log LX(θX|U : D) = += +� +l +� +−1 +2 log(2πσ2) − +1 +2σ2(β0 + β1u1[l] + · · · + βkuk[l] − x[l])2 +� +. +We consider the gradients of the log-likelihood with respect to all of the parameters +β0, . . . , βk and σ2 and as a result we get a number of equations, which describe the solution +to a system of linear equations. From the Theorem 7.3 in Koller and Friedman (2009), it +follows that if B is a linear Gaussian Bayesian network, then it defines a joint distribution +that is jointly Gaussian, and the MLE estimate has to match the constraints implied by it. +Briefly speaking, to estimate p(X | U) we estimate the means of X and U and +the covariance matrix of {X}∪U from the data. The vector of means and the covariance +matrix define the joint Gaussian distribution over {X} ∪ U. Then, for example using +the formulas provided by Theorem 7.3 in Koller and Friedman (2009), we find the unique +linear Gaussian that matches the joint Gaussian with these parameters. +The sufficient statistics we need to collect to estimate linear Gaussians are the uni- +variate terms of the form � +m x[m] and � +m ui[m], and the interaction terms of the form +� +m x[m] · ui[m] and � +m uj[m] · ui[m]. From these we can estimate the mean and the co- +variance matrix of the joint distribution. +3.2 +Inference in Bayesian networks +In this section we assume that the network structure is known, meaning we know all +the existing edges and their directions as well as all the CPDs. The problem of inference +for BNs is a challenging task on its own and there is a lot of research done on the subject. +We will not go into much of a detail on the inference since our focus is on learning their +structure. However, the question of inference is worth mentioning here in order to get +a wholesome picture of such a powerful tool as BNs. +First, we discuss what the notion of inference means in the case of BNs. Typically it +refers to: +• marginal inference, i.e. finding the probability of a variable being in a certain state, +given that other variables are set to certain values; or +25 + +• maximum a posteriori (MAP) inference, i.e. finding the values of a given set of +variables that best explain (in the sense of the highest MAP probability) why a set +of other variables have certain values, +Let us demonstrate both categories of questions using an example. We will use the BN +structure of a well-known ASIA network (see Figure 3.1) first introduced in Lauritzen +and Spiegelhalter (1988). It illustrates the causal structure of a patient having a certain +lung disease based on several factors, one being whether or not the patient has recently +been to Asia. In this case, an exemplary question on marginal inference might be what +is the probability of a patient who is a smoker and has dyspnoea having a certain lung +disease, e.g. lung cancer. For the MAP inference, we might want to know what is the +most likely set of conditions (with “smoking” and “dyspnoea” excluded) that could have +caused the symptoms mentioned above. +Now we provide short descriptions of the most popular exact and approximate infe- +rence algorithms for BNs. Among them are variable elimination and belief propagation for +the marginal inference, methods for the MAP inference and the sampling-based inference. +For the purposes of transparency of the presentation the inference methods for BNs will +be demonstrated for the discrete and finite case. +Figure 3.1: The ASIA Bayesian network structure +3.2.1 +Variable Elimination +This inference algorithm is defined in terms of so-called factors and is developed to answer +questions of marginal inference. Factors generalize the notion of CPDs. A factor φ is +a function of value assignments of a set of random variables V with positive real values. +The set of variables V is called the scope of the factor. There are two operations on +factors that are repeatedly performed in a variable elimination algorithm (VE) and hence +are of great importance. +26 + +visit to Asia? +smoking? +tuberculosis? +lung cancer? +bronchitis? +either tub. or lung cancer? +positive X-ray? +dyspnoea?• The factor product. If V1, V2, and V3 are disjoint sets of variables and we have +factors φ1 and φ2 with scopes V1 ∪ V2 and V2 ∪ V3 respectively, then we define +the factor product φ1 · φ2 as a new factor ψ with the scope V1 ∪ V2 ∪ V3 by +ψ(V1, V2, V3) = φ1(V1, V2) · φ2(V2, V3). +This product is the new factor over the union of the variables defined for each instan- +tiation by multiplying the value of φ1 on the particular instantiation by the value +of φ2 on the corresponding instantiation. More precisely, +ψ(v1, v2, v3) = φ1(v1, v2) · φ2(v2, v3) +for each instantiation, where v1 ∈ Val(V1), v2 ∈ Val(V2) and v3 ∈ Val(V3). +• The factor marginalization. This operation “locally” eliminates a set of variables +from a factor. +If we have a factor φ(V1, V2) over two sets of variables V1, V2, +marginalizing V2 produces a new factor +τ(V1) = +� +V2 +φ(V1, V2), +where the sum is over all joint assignments for the set of variables V2. More precisely, +τ(v1) = +� +v2∈Val(V2) +φ(v1, v2), v1 ∈ Val(V1) +for each instantiation v1 ∈ Val(V1). +Thus, in the context of factors we can write our distribution over all variables as a product +of factors, where each factor presents a CPD as in (2.2): +P(X1, X2, ..., Xn) = +n +� +i=1 +φi(Ai), +(3.8) +where Ai = (Xi, paG(Xi)) represents a set of variables including the i-th variable and its +parents in the network. +Now we can describe the full VE algorithm. Assume we want to find marginal dist- +ribution of a fixed variable from X1, . . . , Xn. First we need to choose in which order O +to eliminate remaining variables. The choice of an optimal +elimination ordering O is +an NP-hard problem and it may dramatically affect the running time of the variable +elimination algorithm. Some intuitions and techniques on how to choose an adequate +ordering are given for example in Koller and Friedman (2009). +For each variable Xi +(ordered according to the ordering O) we perform the following steps: +• multiply all factors containing Xi (on the first round all the φi containing Xi); +• marginalize out Xi according to the definition of the factor marginalization to obtain +a new factor τ (which does not necessarily correspond to a probability distribution, +even though each φ is CPD); +27 + +• replace the factors used in the first step with τ. +Essentially, we loop over the variables as ordered by O and eliminate them in this order. +Performing those steps we use simple properties of product and summation on factors, +namely, both operations are commutative and products are associative. The most im- +portant rule is that we can exchange summation and product, meaning that if a set of +variables X is not in the scope of the factor φ1, then +� +X +φ1 · φ2 = φ1 · +� +X +φ2. +(3.9) +So far we saw that the VE algorithm can answer queries of the form P(V), where V +is some subset of variables. However, in addition to this type of questions it can answer +marginal queries of the form +P(Y | E = e) = P(Y, E = e) +P(E = e) , +where P(X, Y, E) is a probability distribution over sets of query variables Y, observed +evidence variables E, and unobserved variables X. We can compute this probability by +performing variable elimination once on P(Y, E = e) and then once again on P(E = e) +taking into account only instantiations consistent with E = e. +An exemplary run of the VE algorithm is presented in Table 3.1. +It corresponds +to Extended Student example first mentioned in Section 2.2. +Step +Variables +Factors used +Variables +New +eliminated +involved +factor +1 +C +φC(C), φD(D, C) +C, D +τ1(D) +2 +D +φG(G, I, D), τ1(D) +G, I, D +τ2(G, I) +3 +I +φI(I), φS(S, I), τ2(G, I) +G, S, I +τ3(G, S) +4 +H +φH(H, G, J) +H, G, J +τ4(G, J) +5 +G +τ3(G, S), τ4(G, J), φL(L, G) +G, J, L, S +τ5(J, L, S) +6 +S +τ5(J, L, S), φJ(J, L, S) +J, L, S +τ6(J, L) +7 +L +τ6(J, L) +J, L +τ7(J) +Table 3.1: A run of variable elimination for the query P(J). +3.2.2 +Message Passing Algorithms +Markov random fields. +In the framework of probabilistic graphical models there exists +another technique for compact representation and visualization of a probability distribu- +tion which is formulated in the language of undirected graphs. This class of models (known +as Markov Random Fields or MRFs) can succinctly represent independence assumptions +that directed models cannot represent and the opposite is also true. There are advan- +tages and drawbacks to both of those methods but that is not the focus of this thesis. +28 + +We will introduce and discuss MRFs only to the extent we need to properly describe and +explain notions and methods concerning BNs. Note that the methods provided below for +marginal and MAP inference are applicable both to MRFs and BNs. +Definition 3.1. A Markov Random Field (MRF) is a probability distribution over vari- +ables X1, . . . , Xn defined by an undirected graph G in which nodes correspond to vari- +ables Xi. The probability has the form +P(X1, X2, ..., Xn) = 1 +Z +� +c∈C +φc(Xc), +where C denotes the set of cliques (i.e. fully connected subgraphs) of G and each factor φc +is a non-negative function over the variables in a clique. The partition function +Z = +� +(x1,...,xn) +� +c∈C +φc(Xc) +is a normalizing constant that ensures that the distribution sums to one, where the sum- +mation is taken over all possible instantiations of all the variables. +Thus, given a graph G, our probability distribution may contain factors whose scope +is any clique in G and the clique can be a single node, an edge, a triangle, etc. Note that +we do not need to specify a factor for each clique. +It is not hard to see that Bayesian networks are a special case of MRFs with a norma- +lizing constant equal to 1 where the clique factors correspond to CPDs. One can notice +that if we take a directed graph G, add side edges to all parents of a given node and remove +their directionality, then the CPDs (seen as factors over each variable and its ancestors) +factorize over the resulting undirected graph. The resulting process is called moralization +(see Figure 3.2). A Bayesian network can always be converted into an undirected network +with normalizing constant 1. +Figure 3.2: Moralization of a Bayesian network. +Message passing. +As we mentioned above, the VE algorithm can answer marginal +queries of the form P(Y | E = e). However, if we want to ask the model for another +query, e.g. P(Y2 | E2 = e2), we need to restart the algorithm from scratch. Fortunately, in +the process of computing marginals, VE algorithm produces many intermediate factors τ +as a side-product of the main computation, which turn out to be the same as the ones +that we need to answer other marginal queries. +29 + +B +B +MoralizationMany complicated inference problems can be solved by message-passing algorithms, in +which simple messages are passed locally among simple elements of the system. An illust- +rative example was shown in the book MacKay (2003) for a problem of counting soldiers. +Consider a line of soldiers walking in the mist. The commander, which is in the line, +wishes to count the soldiers. The straightforward calculation is impossible because of +the mist. However, it can be done in a simple way which does not require any complex +operations. The algorithm requires the soldiers’ ability to add two integer numbers and +add 1 to it. The algorithm consists of the following steps (for example see Figure 3.3): +• the front soldier in the line says the number ‘one’ to the soldier behind him, +• the rearmost soldier in the line says the number ‘one’ to the soldier in front of him, +• the soldier, which is told a number from the soldier ahead or the soldier behind, +adds 1 to it and passes the new number to the next soldier in the line on the other +side. +Figure 3.3: A line of soldiers counting themselves using message-passing rule-set. +Hence, the commander can find the global number of soldiers by simply adding to- +gether the numbers: heard from the soldier in front of him, from the soldier behind him +and 1. This method makes use of a property of the total number of soldiers: the number +can be written as the sum of the number of soldiers in front of a point and the number +behind that point, two quantities which can be computed separately, because the two +groups are separated by the commander. When this requirement is satisfied this message- +passing algorithm can be modified for a general graph with no cycles (as an example see +Figure 3.4a). When the graph has no cycles (see Figure 3.4a) for each soldier we can +uniquely separate the group into two groups, ‘those in front’, and ‘those behind’ and +perform the algorithm above. However, it is not always possible for a graph with cycles, +for instance for a soldier in a cycle (such as ‘Jim’) in Figure 3.4b such a separation is not +unique. +Using the same principle we will now describe the message passing for tree-structured +networks (called belief propagation, BP for short) and then the modification of the method +for general networks (called clique tree algorithm). +Belief propagation. +Let us first look at tree-structured graphs. Consider what happens +if we run the VE algorithm on a tree in order to compute a marginal distribution P(Xi). +30 + +2 +3 +A +3 +2 +Commander(a) No cycles. +(b) Contains a cycle. +Figure 3.4: A swarm of soldiers. +We can easily find the optimal ordering for this problem by rooting the tree at the node +associated with Xi and iterating through the nodes in post-order (from leaves to the root), +just like for a swarm of soldiers with no cycles. At each step, we will eliminate one of +the variables, say Xj; this will involve computing the factor τk(xk) = � +xj φ(xk, xj)τj(xj), +where Xk is the parent of Xj in the tree. At a later step, the variable Xk will be eliminated +in the same manner, i.e. τk(xk) will be passed up the tree to the parent Xl of Xk in order to +be multiplied by the factor φ(xl, xk) before being marginalized out. As a result we obtain +the new factor τl(xl). The factor τj(xj) can be thought of as a message that Xj sends +to Xk that summarizes all of the information from the subtree rooted at the node Xj. +We can visualize this transfer of information using arrows on the tree, see Figure 3.5. +At the end of the VE algorithm, the node Xi receives messages from all of its children +and the final marginal P(Xi) is obtained by marginalizing those messages out. +Figure 3.5: Message passing order when using VE to compute P(X3) on a small tree. +With the same indices as above, suppose that after computing P(Xi) we want to +compute P(Xk) as well. We would again run VE for the new tree rooted at the node Xk, +waiting until it receives all messages from its children. Note that the new tree consists +31 + +Commander +JimCommander +Jimx3 +m13(x3) / +m53(x3) +x1 +m21(x1) / +x4 +x5 +m43(x3) +x2of two parts. The first one is the subtree rooted at Xk with all its descendants from +the original tree (i.e. rooted at Xi). The other part is the subtree rooted at Xl (which +was the parent of Xk in the original tree, but now is the child of Xk). Therefore, this +part contains the node Xi. The key insight is that the messages received by Xk from Xj +now will be the same as those received when Xi was the root. Thus, if we store the inter- +mediary messages of the VE algorithm, we can quickly compute other marginals as well. +Notice for example, that the messages sent to Xk from the subtree containing Xi will need +to be recomputed. So, how do we compute all the messages we need? Again, referring to +the soldier counting problem, a node is ready to transmit a message to its parent after it +has received all the messages from all of its children. All the messages will be sent out +after precisely 2|E| steps, where |E| is the number of edges in the graph, since each edge +can receive messages only twice. +To define belief propagation (BP) algorithm formally let us see what kind of messages +can be sent. For the purposes of marginal inference we will use sum-product message +passing. This algorithm is defined as follows: while there is a node Xk ready to transmit +to Xl it sends the message +mk→l(xl) = +� +xk +φ(xk)φ(xk, xl) +� +j∈Nb(k)\{l} +mj→k(xk), +where Nb(k) \ {l} means all the neighbours of the k-th node, excluding l-th node. Note +that this message is precisely the factor τ that Xk would transmit to Xl during a round +of variable elimination with the goal of computing P(Xi), and also note that the product +on the RHS of this equation naturally equals to 1 for leaves in the tree. +After having computed all messages, we may answer marginal queries over any vari- +able Xj in constant time using the equation: +P(Xj) ∝ ψ(Xj) +� +l∈Nb(j) +ml→j(xj), +where ψ(Xj) is a product of all factors φ whose scope contains Xj. In case of BNs we +have the equality instead of proportionality. +Clique Tree Algorithm. +First let us define what is meant by a clique tree. Clique tree +is an undirected tree such that its nodes are clusters Ci of variables, meaning Ci is a subset +of a set of all variables {X1, . . . , Xn}. Each edge between clusters Ci and Cj is associated +with a sepset (separation set) Si,j = Ci ∩ Cj. See a simple example demonstrating a +clique tree for a chain network in Figure 3.6. +So far we assumed that the graph is a tree. What if that is not the case? Then the +clique tree algorithm (also called the junction tree algorithm in the literature) can be +used; it partitions the graph into clusters of variables so that interactions among clusters +will have a tree structure, i.e. a cluster will be only directly influenced by its neighbours +in the tree, we denote it T . Then we can perform message passing on this tree. This leads +to tractable global solutions if the local (cluster-level) problems can be solved exactly. +In addition, clique trees must satisfy two following properties: +32 + +Figure 3.6: An example of a chain network consisting of three variables A, B and C; +corresponding MRF and a clique tree with C1 = {A, B}, C2 = {B, C} and S1,2 = {B}. +1. family preservation, i.e. for each factor φ there is a cluster such that factor’s scope +is a subset of the cluster; +2. running intersection property (RIP), i.e. for each pair of clusters Ci, Cj and a vari- +able X ∈ Ci ∩ Cj all clusters and sepsets on the unique path between Ci and Cj +contain the variable X. +Note that we may always find a trivial clique tree with one node containing all the vari- +ables in the original graph, but obviously such trees are useless. Optimal trees are the ones +that make the clusters as small and modular as possible; unfortunately, as in case of VE, +the problem of finding the optimal tree is also NP-hard. A special case when we can find +it is when we originally have a tree, in this case we can put each connected pair of nodes +into a separate cluster, it is easy to check that both conditions are met. One of the prac- +tical ways to find a good clique tree is to use a simulation of VE, i.e. the elimination order +fixed for VE will induce the graph from which we will take maximal cliques and set them +as our clusters and form a tree. RIP will be satisfied automatically. Note that we do not +need to run VE, just to simulate it for a chosen ordering and get the induced graph. More +formally: +Definition 3.2. Let Φ be a set of factors (CPDs in the case of Bayesian Networks) +over X = {X1, . . . , Xn}, and ≺ be an elimination ordering for some subset X ⊆ X. +The induced graph denoted by IΦ,≺ is an undirected graph over X , where Xi and Xj +are connected by an edge if they both appear in some intermediate factor ψ generated by +the VE algorithm using ≺ as an elimination ordering. +In Figure 3.7 there is an example of an induced graph for the Student example using +the elimination ordering of Table 3.1, cliques in that graph and a corresponding clique +tree. One can see that RIP is satisfied, for a proof that trees corresponding to induced +graphs by VE will satisfy RIP see Koller and Friedman (2009). +Now let us define the full clique tree algorithm. First, we define the potential ψi(Ci) +of each cluster Ci as the product of all the factors φ in G that have been assigned to Ci. +33 + +A +B +A,B +B +B,C +B +A +cBy the family preservation property, this is well-defined, and we may assume that our +distribution is of the form +P(X1, . . . , Xn) = 1 +Z +� +i +ψi(Ci). +Then, at each step of the algorithm, we choose a pair of adjacent clusters Ci, Cj in +a tree graph T and compute a message whose scope is the sepset Si,j between the two +clusters +mi→j(Si,j) = +� +Ci\Si,j +ψi(Ci) +� +l∈Nb(i)\{j} +ml→i(Sl,i). +(3.10) +In the context of clusters, Nb(i) denotes the set of indices of neighboring clusters of Ci. +We choose Ci and Cj only if Ci has received messages from all of its neighbors except +Cj. Just as in belief propagation, this procedure will terminate in exactly 2|ET | steps +because this process is equivalent to making an upward pass and a downward pass. In the +upward pass, we first pick a root and send all messages towards it starting from leaves. +When this process is complete, the root has all the messages. Therefore, it can now send +the appropriate message to all of its children. This algorithm continues until the leaves +of the tree are reached, at which point no more messages need to be sent. This second +phase is called the downward pass. After it terminates, we will define the belief of each +cluster based on all the messages that it receives +βi(Ci) = ψi(Ci) +� +l∈Nb(i) +ml→i(Sl,i). +(3.11) +These updates are often referred to as Shafer-Shenoy updates and the full procedure +is also referred as sum-product belief propagation. Then each belief is the marginal of +the clique +βi(Ci) = +� +X\Ci +P(X1, . . . , Xn). +Now if we need to compute the marginal probability of a particular variable X we +can select any clique whose scope contains X, and eliminate the redundant variables +in the clique. A key point is that the result of this process does not depend on the clique +we selected. That is, if X appears in two cliques, they must agree on its marginal. Two +adjacent cliques Ci and Cj are said to be calibrated if +� +Ci\Si,j +βi(Ci) = +� +Cj\Si,j +βj(Cj). +A clique tree T is calibrated if all pairs of adjacent cliques are calibrated. For a calibrated +clique tree, we use the term clique beliefs for βi(Ci) and sepset beliefs for µi,j(Si,j) defined +as either side of the above equality. +As the end result of sum-product belief propagation procedure we get a calibrated +tree, which is more than simply a data structure that stores the results of probabilistic +inference for all of the cliques in the tree, i.e. their beliefs (3.11). It can also be viewed +34 + +as an alternative representation of the joint measure over all variables. For sepset beliefs +we have that +µi,j(Si,j) = mi→j(Si,j)mj→i(Si,j). +Using this fact at convergence of the clique tree calibration algorithm, we get the unnor- +malized joint measure ˜P as +˜P(X1, . . . , Xn) = +� +i +ψi(Ci) = +� +i βi(Ci) +� +(i,j) µi,j(Si,j), +(3.12) +where the product in the numerator is over all cliques and the product in the denominator +is over all sepsets in the tree. +As a result we get a different set of parameters that +captures unnormalized measure that defined our distribution (in case of BNs it is simply +the distribution) and there is no information lost in the process. +Thus, we can view +the clique tree as an alternative representation of the joint measure, one that directly +reveals the clique marginals. +The second approach, mathematically equivalent but using a different intuition, is +message passing with division. In sum-product belief propagation messages were passed +between two cliques only after one had received messages from all of its neighbors except +the other one as in (3.10) and the resulting belief was (3.11). Nonetheless, a different +approach to compute the same expression is to multiply in all of the messages, and then +divide the resulting factor by the message from the other clique to avoid double-counting. +To make this notion precise, we must define a factor-division operation. +Let X and Y be disjoint sets of variables and let φ1 and φ2 be two factors with scopes +X ∪ Y and Y respectively. Then we define the division φ1 +φ2 +as a factor-division ψ with +the scope X ∪ Y as follows +ψ(X, Y) = φ1(X, Y) +φ2(Y) , +where we define 0 +0 = 0. We now see that we can compute the expression of equation +(3.10) by computing the beliefs as in equation (3.11) and then dividing by the remaining +message +mi→j(Si,j) = +� +Ci\Si,j βi(Ci) +mj→i(Si,j) +. +The belief of the j-th clique is updated by multiplying its previous belief by mi→j and +dividing it by the previous message passed along this edge (regardless of the direction) +stored in sepset belief µi,j to avoid double counting. This algorithm is called belief update +message passing and is also known as the Lauritzen-Spiegelhalter algorithm. +3.2.3 +MAP inference +The maximum a posteriori (MAP) problem has a broad range of applications, in computer +vision, computational biology, speech recognition, and more. By using MAP inference we +lose the ability to measure our confidence (or uncertainty) in our conclusions. Never- +theless, there are good reasons for using a single MAP assignment rather than using +35 + +Figure 3.7: (a) Induced graph for VE in the Student example, using the elimination order +of Table 3.1 (b) Cliques in the induced graph:{C, D}, {D, I, G}, {G, I, S}, {G, J, S, L} +and {G, H, J}. (c) Clique tree for the induced graph. +the marginal probabilities of the different variables. The first reason is the preference +for obtaining a single coherent joint assignment, whereas a set of individual marginals +may not make sense as a whole. The second is that there are inference methods that are +applicable to the MAP problem and not to the task of computing probabilities, so that +the former may be tractable even when the latter is not. The problem of finding the MAP +assignment in the general case is NP−hard (Cooper (1990)). +There are two types of Maximum a Posteriori (MAP) inference: a MAP query and +a marginal MAP query. Assume first that the set of all variables X = Y ∪ E consists of +two disjoint sets, where E is the evidence meaning that we know values of those variables. +Then a MAP query aims to find the most likely assignment to all of the non-evidence +variables Y +MAP(Y | E = e) = argmax +y +P(Y = y | E = e). +Now assume that the set of all variables X = Y ∪ W ∪ E consists of three disjoint sets, +where E is still the evidence. In this case a marginal MAP query aims to find the most +likely assignment to the subset Y, marginalizing over the rest of the variables W +MAP(Y | E = e) = argmax +y +P(Y = y | E = e) = += argmax +y +� +w +P(Y = y, W = w | E = e). +36 + +Coherence +Coherence +Difficulty +Intelligence +Dificulty +Intelligence +Grade +SAT +Grade +SAT +Letter +Letter +Job +Job +Happy +Happy +(a) +(b) +C,D +G,I,D +G,I, S +G,J, S, L +G,H,J +D +G,I +G,S +G,J +(c)Both tasks can be solved within the same variable elimination (VE) and message passing +frameworks as marginal inference, where instead of summation we use maximization. The +second type of query is much more complicated both in theory and in practice since it +involves both maximization and summation. In particular, exact inference methods such +as VE can be intractable, even in simple networks. Hence, first we will briefly discuss +them and then introduce some more efficient methods. +Recall that while discussing VE we introduced two operations on factors, which were +the foundation in performing the algorithm. Now we need to introduce one additional +operation called the factor maximization. Let X be a set of variables, and Y ̸∈ X a variable +not belonging to the set X. Let φ(X, Y ) be a factor over those variables. We define +the factor maximization of Y in φ to be a factor ψ over X such that: +ψ(X) = max +Y +φ(X, Y ). +More precisely, +ψ(x) = +max +y∈Val(Y ) φ(x, y) +for each instantiation x ∈ Val(X). Similarly to the property (3.9) we have that if a set +of variables X is not in the scope of the factor φ1, then +max +X (φ1 · φ2) = φ1 · max +X φ2 +(3.13) +and +max +X (φ1 + φ2) = φ1 + max +X φ2. +(3.14) +This leads us to a max-product variable elimination algorithm for a general MAP query, +which is constructed in the same way as a sum-product variable elimination algorithm in +Subsection 3.2.1, but we replace the marginalizing step (summation) with maximization +over corresponding variables. +This way we find the maximum value for the joint probability, though the original +and more interesting problem is to find the most probable assignment corresponding to +that maximum probability. This process is called a traceback procedure, which is quite +straightforward (details can be found in Koller and Friedman (2009)). In the process of +eliminating variables we find their maximizing value given the values of the variables that +have not yet been eliminated. When we pick the value of the final variable, we can then +go back and pick the values of the remaining variables accordingly. +Recall that the joint distribution P in Bayesian networks is represented by a product +of factors, where each factor coincides with a CPD (we introduced this representation +in (3.8)). Then we can write the marginal MAP query as +argmax +y +� +W +P(y, W) = argmax +y +� +W +� +i +φi, +where we skipped the evidence set for the transparency of notation since it does not effect +the main point of discussion. First we compute +max +y +� +W +� +i +φi. +37 + +This form immediately suggests an algorithm combining the ideas of sum-product and +max-product variable elimination. Specifically, the summations and maximizations out- +side the product can be viewed as operations on factors. Thus, to compute the value of +this expression, we simply have to eliminate the variables in W by summing them out, +and the variables in Y by maximizing them out. When eliminating a variable X, whether +by summation or by maximization, we simply multiply all the factors whose scope in- +volves X, and then eliminate X to produce the resulting factor. The ability to perform +this step is justified by the interchangeability of factor summation and maximization with +factor product (properties (3.9) and (3.13)). The traceback procedure to find the most +probable assignment can also be found in Koller and Friedman (2009). +At first glance it seems that algorithms for both queries have the same complexity +but that is not the case. It can be shown that even on very simple networks, elimination +algorithms can require exponential time to solve a marginal MAP query (see Example 13.7 +in Koller and Friedman (2009)). The difficulty comes from the fact that we are not free +to choose an arbitrary elimination ordering. When summing out variables, we can utilize +the fact that the operations of summing out different variables commute. Thus, when +performing summing-out operations for sum-product variable elimination, we could sum +out the variables in any order. Similarly, we could use the same flexibility in the case of +max-product elimination. Unfortunately, the max and sum operations do not commute. +Thus, in order to maintain the correct semantics of marginal MAP queries, as specified +in the equation, we must perform all the variable summations before we can perform any +of the variable maximizations. +We can also use the message passing framework, or more general case of clique tree +algorithm, to MAP inference. In Subsection 3.2.2 we used clique trees to compute the sum- +marginals over each of the cliques in the tree. Here, we compute a set of max-marginals +over each of those cliques. By the max-marginal of a function f defined on the set X +relative to a set of variables Y ⊂ X we denote such a factor that for each y ∈ Y +MaxMarginalf(y) = max +⟨x⟩Y=y f(x) +determines the value of the unnormalized probability of the most likely joint assign- +ment x ∈ X consistent with y. We compute the whole set for two reasons. First, the set of +max-marginals can be a useful indicator for how confident we are in particular components +of the MAP assignment. Second, in many cases, an exact solution to the MAP problem via +a variable elimination procedure is intractable. In this case, to compute approximate max- +marginals we can use message passing procedure in cluster graphs, similar to the clique +tree procedure. These pseudo-max-marginals can be used for selecting an assignment; +while this assignment is not generally the MAP assignment, we can nevertheless provide +some guarantees in certain cases. As before, our task consists of two parts: computing +the max-marginals and decoding them to extract a MAP assignment. +As for the first part, in the same way as we modified sum-product VE to sum-product +message-passing we modify max-product VE to max-product belief propagation algorithm +in clique trees. The resulting algorithm executes precisely the same initialization and +38 + +overall message scheduling as in the sum-product belief propagation algorithm. The only +difference is that we use max-product rather than sum-product message passing. +As +a result of running the algorithm we will get a set of max-marginals for every clique of +our clique tree. +Each belief is the max-marginal of the clique βi(Ci) = MaxMarginalp(Ci) and all +pairs of adjacent cliques are max-calibrated +µi,j(Si,j) = max +Ci\Si,j βi(Ci) = max +Cj\Si,j βj(Cj). +Similarly to sum-product message passing we get reparameterization of the distribution +in the form (3.12) with corresponding beliefs of the max-product belief propagation algo- +rithm. +Now we need to decode those max-marginals to get a MAP assignment. In the case +of variable elimination, we had the max-marginal only for a single last to be eliminated +variable and could identify the assignment for that particular variable. To compute the as- +signments to the rest of the variables, we had to perform a traceback procedure. Now +the situation appears different. One obvious solution is to use the max-marginal for each +variable to compute its own optimal assignment, and thereby compose a full joint assign- +ment to all variables. However, this simplistic approach works only in case when there is +a unique MAP assignment, equivalently, each max-marginal has a unique maximal value. +For generic probability measures this is not a very rigid constraint, thus, we can find the +unique MAP assignment by locally optimizing the assignment to each variable separately. +Otherwise, in most cases to break ties we can introduce a slight random perturbation +into all of the factors, making all of the elements in the joint distribution have slightly dif- +ferent probabilities. However, there might be cases when we need to preserve the structure +in relationships between some variables, for example some variables can share parameters +or there might be some deterministic structure that should be preserved. Under these +circumstances we find a locally optimal assignment using for example traceback proce- +dure. Afterwards we can verify if this assignment is a MAP assignment (for procedure +and verification see Koller and Friedman (2009)). +MAP as Linear Optimization Problem. +In MAP inference we search for assign- +ments which maximize a certain measure, in our case either the joint probability over all +non-evidence variables or the probability over some set of variables. Therefore, it is na- +tural to consider it directly as an optimization problem. There exists extensive literature +on optimization algorithms and we can apply some of those ideas and algorithms to our +specific case. +The main idea here is to reduce our MAP problem to an Integer Linear Program- +ming (ILP) problem, i.e. an optimization problem over a set of integer valued variables, +where both the objective and the constraints are linear. First, to define ILP problem we +need to turn the product representation of the joint probability as in (3.8) into a sum, +replacing the probability with its logarithm. It is possible because all the factors (CPDs) +39 + +are positive. Hence, we want to compute +argmax +ξ +n +� +i=1 +φi(Ai) = argmax +ξ +n +� +i=1 +log(φi(Ai)), +where ξ is a general assignment for the whole vector of variables in the network, and +Ai = (Xi, paG(Xi)) represents a set of variables including the i-th variable and its parents +in the network. Note that the whole discussion in this paragraph is actually identical for +MRFs with positive factors, the only difference is the number of factors, but since they are +not the focus of this thesis, we formulate everything in the Bayesian networks framework. +For variable indices r ∈ {1, . . . , n} we define the number of corresponding possible +vector instantiations nr = |Val(Ar)|. For any joint assignment ξ, if this assignment con- +strained to the variables from Ar takes the value of aj +r, j = {1, . . . , nr}, i.e. ξAr = aj +r, +then the factor log(φr) makes a contribution to the objective of a quantity denoted as +ηr +j = log(φr(aj +r)). +We introduce optimization variables q(xj +r), where r enumerates the different factors, +and j enumerates the different possible assignments to the variables from Ar. +These +variables take binary values, so that q(xj +r) = 1 if and only if Ar = aj +r and 0 otherwise. +It is important to distinguish the optimization variables from the random variables in +our original graphical model; here we have an optimization variable q(xj +r) for each joint +assignment aj +r to the model variables Ar. +Let q denote a vector of the optimization variables {q(xj +r), +1 ≤ r ≤ n, +1 ≤ j ≤ nr} +and η denote a vector of the coefficients ηj +r sorted in the same order. Both of these are +vectors of dimension N = �n +r=1 nr. With this interpretation, the MAP objective can be +rewritten as: +max +q +n +� +r=1 +nr +� +j=1 +ηj +rq(xj +r) +(3.15) +or, in shorthand, max +q +η⊤q. +Now that we have an objective to maximize we need to add some consistency con- +straints that would guarantee that an assignment q ∈ {0, 1}N we get as a solution of +optimization problem is legal, meaning it corresponds to some assignment in X. Namely, +first we require that we restrict attention to integer solutions, then we construct two +constraints to make sure that these integer solutions are consistent. The first constraint +enforces the mutual exclusivity within a factor and the second one implies that factors +in our network agree on the variables in the intersection of their scopes. In this way we +reformulate the MAP task as an integer linear program, where we optimize the linear +objective of equation (3.15) subject to discussed constraints. We note that the problem +of solving integer linear programs is itself NP-hard, so that we do not avoid the basic +hardness of the MAP problem. +One of the methods often used to tackle ILP problems is the method of linear program +relaxation. In this approach we turn a discrete, combinatorial optimization problem into +a continuous problem. This problem is a linear program (LP), which can be solved in +40 + +polynomial time, and for which a range of very efficient algorithms exists. +One can +then use the solutions to this LP to obtain approximate solutions to the MAP problem. +To perform this relaxation, we substitute the condition that the solutions are integer with +a relaxed constraint that they are non-negative. +This linear program is a relaxation of our original integer program, since every assign- +ment to q that satisfies the constraints of the integer problem also satisfies the constraints +of the linear program, but not the other way around. Thus, the optimal value of the ob- +jective of the relaxed version will be no less than the value of the (same) objective in +the exact version, and it can be greater when the optimal value is achieved at an assign- +ment to q that does not correspond to a legal assignment ξ. An important special case +are tree-structured graphs, in which the relaxation is guaranteed to always return integer +solutions, which are in turn optimal (for proof and more detailed discussion see Koller +and Friedman (2009)). Otherwise we get approximate solutions, which in order we need +to transform into integer (and legal) assignments. +One approach is a greedy assignment process, which assigns values to the variables Xi +one at a time. Another approach is to round the LP solution to its nearest integer value. +This approach works surprisingly well in practice and has theoretical guarantees for some +classes of ILPs (Koller and Friedman (2009)). +An alternative method for the MAP problem which also comes from the optimization +theory is called dual decomposition. Dual decomposition uses the principle that our prob- +lem can be decomposed into sub-problems, together with linear constraints (the same as +in ILP) that enforce some notion of agreement between solutions to the different prob- +lems. The sub-problems are chosen such that they can be solved efficiently using exact +combinatorial algorithms. The agreement constraints are incorporated using Lagrange +multipliers, it is called Lagrangian relaxation, and an iterative algorithm - for example, +a subgradient algorithm - is used to minimize the resulting dual. The initial work on +dual decomposition in probabilistic graphical models was focused on the MAP problem +for MRFs (see Komodakis et al. (2007)). +By formulating our problem as a linear program or its dual, we obtain a very flexible +framework for solving it; in particular, we also can easily incorporate additional con- +straints into the LP, which reduce the space of possible assignments of q, eliminating +some solutions that do not correspond to actual distributions over X. The problems are +convex and in principle they can be solved directly using standard techniques, but the size +of the problems is very large, which makes this approach unfeasible in practice. However, +the LP has special structure: when viewed as a matrix, the equality constraints in this +LP all have a particular block structure that corresponds to the structure of adjacent +clusters. Moreover, when the network is not densely connected, the constraint matrix is +also sparse, thus, standard LP solvers may not be fully suited for exploiting this special +structure. The theory of convex optimization provides a wide spectrum of tools, and +some are already being adapted to take advantage of the structure of the MAP problem +(see for example, Wainwright et al. (2005), Sontag and Jaakkola (2007)). The empirical +evidence suggests that the more specialized solution methods for the MAP problems are +41 + +often more effective. +Other methods. +Another method for solving a MAP problem is local search algo- +rithms. It is a heuristic-type solution, which starts with an arbitrary assignment and +performs “moves” on the joint assignment that locally increase the probability. This tech- +nique does not offer theoretical justification; however, we can often use prior knowledge +to come up with highly effective moves. Therefore, in practice, local search may perform +extremely well. +There are also searching methods that are more systematic. They search the space +so as to ensure that assignments that are not considered are not optimal, and thereby +guarantee an optimal solution. Such methods generally search over the space of partial +assignments, starting with the empty assignment and successively assigning variables one +at a time. One such method is known as branch-and-bound. +These methods have much greater applicability in the context of marginal MAP prob- +lem, where most other methods are not currently applicable. +In the next subsection +we discuss sample-based algorithms which can be applied both to marginal and MAP +inference. +3.2.4 +Sampling-based methods for inference +In practice, the probabilistic models that we use can often be quite complex, and simple +algorithms like VE may be too slow for them. In addition, many interesting classes of +models may not have exact polynomial-time solutions at all, and for this reason, much re- +search effort in machine learning is spent on developing algorithms that yield approximate +solutions to the inference problem. In this subsection we consider some sampling methods +that can be used to perform both marginal and MAP inference queries; additionally, they +can compute various interesting quantities, such as the expectation E[f(X)] of a function +of the random vector distributed according to a given probabilistic model. +In general, sampling is rather a hard problem. The aim is to generate a random sample +of the observations of X. However, our computers can only generate samples from very +simple distributions, such as the uniform distribution over [0, 1]. All sampling techniques +involve calling some kind of simple subroutine multiple times in a properly constructed +way. For example, in case of multinomial distribution with parameters θ1, . . . , θk instead +of directly sampling a multinomial variable we can sample a single uniform variable pre- +viously subdividing a unit interval into k regions with region i having size θi. Then we +sample uniformly from [0, 1] and return the value of the region in which our sample falls. +Forward sampling. +Now let us return to the case of Bayesian networks (BN). We can +apply the same sampling technique to BNs with multinomial variables. We start from +the nodes which do not have parents, these variables simply have multinomial distribution, +and we go down the network to the next generation as arrows point out until we reach +the leaves. Therefore, for a particular node we need to wait until all of its parents are +42 + +sampled. When we know all the values of parents the variable naturally has multinomial +distribution. In the Student example to sample student’s grade, we would first sample +an exam difficulty d′ and an intelligence level i′. Then, once we have samples d′ and i′, we +generate a student grade g′ from P(g | d′, i′). There is one problem though, as we cannot +perform it in case of having evidence for any variables besides roots. +Monte Carlo and rejection sampling. +Algorithms that construct solutions based on +a large number of samples from a given distribution are referred to as Monte Carlo (MC) +methods. Sampling from an arbitrary distribution p lets us compute integrals of the form +EX∼p[f(X)] = +� +x +f(x)p(x), +where the summation extends over all possible values of X and p can be thought of as +the density of X with respect to counting measure. Below we follow the same interpreta- +tion also with regards to joint and conditional distributions. +If f(X) does not have special structure that matches the BN structure of p, this +integral will be impossible to compute analytically; instead, we will approximate it using +a large number of samples from p. Using Monte Carlo technique we approximate a target +expectation with +EX∼p[f(X)] ≈ IT = 1 +T +T +� +t=1 +f(xt), +where x1, . . . , xT are samples drawn according to p. It is easy to show that IT is an un- +biased estimator for EX∼p[f(X)] and its variance can be made arbitrarily small with +a sufficiently large number of samples. +Now let us consider rejection sampling as a special case of Monte Carlo integration. +For example, suppose we have a Bayesian network over the set of variables X = Z ∪ E. +We may use rejection sampling to compute marginal probabilities P(E = e). We can +rewrite the probability as +P(E = e) = +� +z +P(Z = z, E = e) = +� +x +P(X = x)I(E = e) = EX∼p[I(E = e)] +and then take the Monte Carlo approximation. In other words, we draw many samples +from p and report the fraction of samples that are consistent with the value of the marginal. +Importance sampling. +Unfortunately, rejection sampling can be very wasteful. +If +P(E = e) equals, say, 1%, then we will discard 99% of all samples. A better way of +computing such integrals uses importance sampling. The main idea is to sample from +an auxiliary distribution q (hopefully with q(x) roughly proportional to f(x) · p(x)), +and then reweigh the samples in a principled way, so that their sum still approximates +the desired integral. +43 + +More formally, suppose we are interested in computing EX∼p[f(X)]. Adopting anal- +ogous convention regarding notation for probability distribution we may rewrite this in- +tegral as +EX∼p[f(X)] = +� +x +f(x)p(x) = +� +x +f(x)p(x) +q(x)q(x) = += EX∼q[f(X)w(X)] ≈ 1 +T +T +� +t=1 +f(xt)w(xt), +where w(x) = p(x) +q(x) and the samples xt are drawn from q. +In other words, instead +of sampling from p we may take samples from q and reweigh them with w(x); the ex- +pected value of this Monte Carlo approximation will be the original integral. By choosing +q(x) = +|f(x)|p(x) +� +|f(x)|p(x)dx we can set the variance of the new estimator to zero. Note that +the denominator is the quantity we are trying to estimate in the first place and sampling +from such q is NP-hard in general. +In the context of our previous example for computing P(E = e), we may take q to be +the uniform distribution and apply importance sampling as follows: +P(E = e) = Ez∼p[p(e | z)] = Ez∼q +� +p(e | z)p(z) +q(z) +� += += Ez∼q +�p(e, z) +q(z) +� += Ez∼q[we(z)] ≈ 1 +T +T +� +t=1 +we(xt), +where we(z) = p(e, z) +q(z) . Unlike rejection sampling, this will use all the samples; if p(z | e) +is not too far from uniform, this will converge to the true probability after only a very +small number of samples. +Markov chain Monte Carlo. +Now let us turn to performing marginal and MAP +inference using sampling. We will solve these problems using a very powerful technique +called Markov chain Monte Carlo (MCMC). +A key concept in MCMC is that of a Markov chain, which is a sequence of random +elements having Markov property (see 2.3). A Markov chain X = (X0, X1, X2, . . . ) with +each random vector Xi taking values from the same state space Val(X) is specified by +the initial distribution P(X0 = x), x ∈ Val(X), and the set of transition probabilities +P(Xk+1 = x′ | Xk = x) +for x, x′ ∈ Val(X), which do not depend on k (in this case the Markov chain is called +homogeneous). Therefore, the transition probabilities at any time in the entire process +depend only on the given state and not on the history of the process. In what follows, +we consider finite state space only so we may assume Val(X) = {1, . . . , d}, unless stated +otherwise. +44 + +If the initial state X0 is drawn from a vector of probabilities p0, we may represent +the probability pt of ending up in each state after t steps as +pt = T tp0, +where T denotes the transition probability matrix with Tij = P(Xk+1 = i | Xk = j), +i, j ∈ {1, . . . , d}, and T t denotes matrix exponentiation. If the limit lim +t→∞ pt = π exists, it +is called a stationary distribution of the Markov chain. A sufficient condition for π to be +a stationary distribution is called detailed balance: +π(j)Tij = π(i)Tji +for all i, j ∈ Val(X). +The high-level idea of MCMC is to construct a Markov chain whose states are joint +assignments to the variables in the model and whose stationary distribution is equal to +the model probability p. +Then, running the chain for a number of times, we obtain +the sample from the distribution p. In order to construct such a chain, we first recall +the conditions under which stationary distributions exist. This turns out to be true under +two sufficient conditions: irreducibility, meaning that it is possible to get from any state x +to any other state x′ with positive probability in a finite number of steps, and aperiodicity, +meaning that it is possible to return to any state at any time. In the context of continuous +variables, the Markov chain must be ergodic, which is a slightly stronger condition than +the above. For the sake of generality, we will require our Markov chains to be ergodic. +At a high level, MCMC algorithms will have the following structure. They take as +an argument a transition operator T specifying a Markov chain whose stationary distri- +bution is p, and an initial assignment X0 = x0 of the chain. An MCMC algorithm then +performs the following steps: +1. Run the Markov chain from x0 for B burn-in steps. +2. Run the Markov chain for N sampling steps and collect all the states that it visits. +The aim of the burn-in phase is to wait until the state distribution is reasonably close to p. +Therefore, we omit the first B states visited by the chain and then we collect a sample from +the chain of the size N. A common approach to set the number B is to use a variety of +heuristics to try to evaluate the extent to which a sample trajectory has “mixed”, i.e. when +it is reasonably close to p (see Koller and Friedman (2009)). Also Geyer (2011) advocates +that burn-in is unnecessary and uses other ways of finding good starting points. Gelman +and Shirley (2012) propose to discard the first half of generated sequences. We may then +use these samples for Monte Carlo integration (or in importance sampling). We may +also use them to produce Monte Carlo estimates of marginal probabilities. Finally, we +may take the sample with the highest probability and use it as an estimate of the mode +(i.e. perform MAP inference). +Before we discuss two most important special cases, note that sampling-based methods +have theoretical asymptotic justification. Therefore, their application for finite samples of +45 + +reasonable size may lead to drastically inaccurate results, especially in sophisticated and +complex models. Successful implementation heavily depends on how well we understand +structure of the model as well as on intensive experimentation. It can also be achieved +by combining sampling with other inference methods. +Metropolis-Hastings Algorithm. +The Metropolis-Hastings (MH) algorithm (Hast- +ings (1970)) is one of the first ways to construct Markov chains within MCMC. The MH +method constructs a transition operator T from two components: +1. A transition kernel q specified by the user. In practice, the distribution q(x′ | x) +can take almost any form and very often it is a Gaussian distribution centered at x. +2. An acceptance probability for moves proposed by q, specified by the algorithm as +A(x′ | x) = min +� +1, p(x)q(x′ | x) +p(x′)q(x | x′) +� +. +At each step, if the Markov chain is in the state x, then we choose a new point x′ +according to the distribution q. Then, we either accept this proposed change with the +probability α = A(x′ | x), or with the probability 1 − α we remain at our current state. +Notice that the acceptance probability encourages the chain to move towards more likely +points in the distribution (imagine for example that q is uniform); when q suggests that +we move into a low-probability region, we follow that move only a certain fraction of time. +Given any q the MH algorithm ensures that p is a stationary distribution of the resulting +Markov Chain. More precisely, p will satisfy the detailed balance condition with respect +to the Markov chain generated by MH algorithm. +This is a straight consequence of +the definition of A(x′ | x). +As the result we wish to build the Markov chain with a small correlation between subse- +quent values, which allows to explore the support of the target distribution rather quickly. +This correlation consists of two components. The higher the variance of q, the lower the +correlation between the current state and the newly chosen one, and the lower the variance +of q, the lower the correlation when we stay at the same state hitting the low-probability +region. To choose a good kernel q we need to find good balance between the two. For +multivariate distributions the covariance matrix for the proposal distribution should reflect +the covariance structure of the target. +Gibbs sampling. +A widely-used special case of the Metropolis-Hastings methods is +Gibbs sampling. It was first described in Geman and Geman (1984). Suppose we have +a finite sequence of random variables X1, . . . , Xn. We denote the i-th sample as x(i) = +(X(i) +1 , . . . , X(i) +n ). Starting with an arbitrary configuration x(0) we perform the procedure +below. +Repeat until convergence for t = 1, 2, 3, . . . : +1. Set x ← x(t−1) +46 + +2. For each variable Xi +• Sample X′ +i ∼ P(Xi | X−i) +• Update x ← (X(t) +1 , . . . , X(t) +i−1, X′ +i, X(t−1) +i+1 , . . . , X(t−1) +n +) +3. Set x(t) ← x +By X−i we denote all the variables in our set except Xi. At each epoch of the step 2 +only one site undergoes a possible change, so that successive samples for each iteration +can differ in at most one coordinate. Note that at this step we use updated values of +the variables for which we have already sampled new values. The sampling step is quite +easy to perform because we only condition on variables from Xi-th Markov blanket, which +consists of its parents, children and other parents of its children. +In Geman and Geman (1984) it was stated that the distribution of x(t) converges +to π as t → ∞ regardless of x(0). The only assumption is that we continue to visit each +site which is obviously a necessary condition for convergence. As in case of any MCMC +algorithm if we choose an arbitrary starting configuration there is a burn-in phase, for +the list of intuitions on how to decide how many samples we want to discard see Casella +and George (1992). To avoid the high correlation between successive samples in Gibbs +sampler we can also take every r-th sample instead of all of them, which is rather a question +of heuristics and experimenting. +3.3 +Learning probabilities in BNs for incomplete data +Here we again consider categorical distributions. Suppose we observe a single incomp- +lete case in our data, which we denote as d ∈ D. Under the assumption of parameter +independence, we can compute the posterior distribution of θij for our network as follows: +p(θij | d) = (1 − p(paj +i | d)){p(θij)} + +ri +� +k=1 +p(xk +i , paj +i | d){p(θij | xk +i , paj +i)}. +Each term in curly brackets in this equation is a Dirichlet distribution. Thus, unless +both Xi and all the variables in pa(Xi) are observed in case d, the posterior distribution +of θij will be a linear combination of Dirichlet distributions, that is a Dirichlet mixture +with mixing coefficients (1 − p(paj +i | d)) and p(xk +i , paj +i | d), 1 ≤ k ≤ ri. See Spiegelhalter +and Lauritzen (1990) for the details of derivation. +When we observe a second incomplete case, some or all of the Dirichlet components +in the previous equation will again split into Dirichlet mixtures. More precisely, the pos- +terior distribution for θij will become a mixture of Dirichlet mixtures. As we continue to +observe incomplete cases, where each case has missing values for the same set of variables, +the posterior distribution for θij will contain a number of components that is exponential +in the number of cases. In general, for any interesting set of local likelihoods and priors, +the exact computation of the posterior distribution for θ will be intractable. Thus, we +require an approximation for incomplete data. +47 + +One of the possible ways to approximate is Monte-Carlo methods discussed previously, +for example the Gibbs sampler, which must be irreducible and each variable must be +chosen infinitely often. More specifically for our case, to approximate p(θ | D) given +an incomplete data set we start with some initial states of the unobserved variables in +each case (chosen randomly or otherwise) and as a result, we have a complete random +sample Dc. Then we choose some variable Xi[l] (variable Xi in case l) that is not observed +in the original random sample D, and reassign its state according to the probability +distribution +p(x′ +il | Dc \ {xil}) = +p(x′ +il, Dc \ {xil}) +� +x′′ +il p(x′′ +il, Dc \ {xil}), +where Dc\xil denotes the data set Dc with observation xil removed, and the sum in the de- +nominator runs over all states of the variable Xi. Both the numerator and denominator +can be computed efficiently as in (3.6). In the third step we repeat this reassignment for +all unobserved variables in D, producing a new complete random sample D′ +c. The fourth +step is to compute the posterior density p(θij | D′ +c) as in (3.7) and, under the assumption +of parameter independence, the joint posterior p(θ | D′ +c) will be a product of all densities +p(θij | D′ +c). Finally, we iterate through last three steps, and use the average of p(θ | D′ +c) +as our approximation. +Monte-Carlo methods yield accurate results but they are often intractable, for example +when the sample size is large. Another approximation that is more efficient than Monte- +Carlo methods and often accurate for relatively large samples is the Gaussian approxi- +mation. The idea is that for large amounts of data we can approximate the distribution +p(θ | D) ∝ p(D | θ)p(θ) as a multivariate-Gaussian distribution, namely +p(θ | D) ≈ p(D | ˜θ)p(˜θ) exp +� +−1 +2(θ − ˜θ)H(θ − ˜θ)⊤ +� +, +where ˜θ is the configuration of θ that maximizes g(θ) = ln(p(D | θ)p(θ)) and H is a +negative Hessian of g(θ). The vector ˜θ is also called the maximum a posteriori (MAP) +configuration of θ. There are various methods to compute the second derivatives proposed +in literature (Meng and Rubin (1991), Raftery (1995), Thiesson (1995)). +One more way to learn probabilities from incomplete data is the Expectation-Ma- +ximization (EM) algorithm. +It is an iterative algorithm consisting of two alternating +steps - Expectation and Maximization. When the data is incomplete we cannot calculate +the likelihood function as in (3.2) and (3.3). Now instead of maximizing likelihood or log- +likelihood function we will be maximizing the expected log-likelihood of the complete data +set with respect to the joint distribution for X conditioned on the assigned configuration +of the parameter vector θ′ and the known data D. The calculation of the expected log- +likelihood (Expectation step) amounts to computing expected sufficient statistics. For +incomplete data the expected log-likelihood takes the following form +E[ℓ(θ) | D, θ′] = +n +� +i=1 +qi +� +l=1 +ri +� +k=1 +ˆNilk log(θilk), +48 + +where +ˆNilk = E[I(Xi = xk +i , pa(Xi) = pal +i) | D, θ′] = +m +� +j=1 +P(Xi = xk +i , pa(Xi) = pal +i | dj, θ′). +Here dj is possibly incomplete j-th case in D. When Xi and all the variables in pa(Xi) +are observed, the term for this case requires a trivial computation: it is either zero or +one. Otherwise, we can use any Bayesian network inference algorithm discussed above to +evaluate the term. +Having performed the Expectation step we want to find the new parameter vector, +which is obtained by maximization of the expected log-likelihood (Maximization step). +In our case we have new parameters on the r-th iteration +θr +ilk = +ˆNilk +�ri +k=1 ˆNilk +. +We start algorithm with an arbitrary (for example, random) parameter configuration θ0 +and iteratively perform two steps described above until the convergence. Dempster et al. +(1977) showed that, under certain regularity conditions, iterations of the expectation and +maximization steps will converge to a local maximum. +3.4 +Learning parameters for CTBNs +The new method we propose in next chapters for learning CTBNs is capable of performing +both tasks of parameter learning and structure learning simultaneously, although naturally +these tasks can be performed separately. +In this section we review selected methods +focused only on parameter learning. +3.4.1 +Data +In this thesis we discuss both complete and incomplete data. In essence, CTBN models +the joint trajectories of its variables, hence having complete, or fully observed, data means +that for each point in time of each trajectory, we know the full instantiation to all variables. +By D = {σ[1], . . . , σ[m]} we denote a data set of trajectories. In case of complete +data each σ[i] is a complete set of state transitions and the times at which they occurred. +Another way to specify each trajectory is to assign a sequence of states xi ∈ Val(X), each +with an associated duration. +In contrast to the definition of complete data, an incomplete data set can be repre- +sented by a set of one or more partial trajectories. A partially observed trajectory σ ∈ D +can be specified as a sequence of subsystems Si of X, each with an associated duration. +A subsystem S describes the behaviour of the process over a subset of the full state space, +i.e. Val(S) ⊂ Val(X). It is simply a nonempty subset of states of X, in which we know +the system stayed for the duration of the observation. Some transitions are partially ob- +served, i.e. we know only that they take us from one subsystem to another. Transitions +49 + +from one state to another within the subsystem are fully unobserved, hence, we do not +know how many transitions there are inside of a particular subsystem nor when they do +occur. +3.4.2 +Learning parameters for complete data +Recall, that CTBN N consists of two parts. The first is an initial distribution P X +0 , specified +as a Bayesian network over X. The second is a continuous transition model, specified as +a directed (and possibly cyclic) graph and a set of conditional intensity matrices (CIM), +one for each variable Xi in the network. For the purposes of this section we abbreviate +paG(Xi) as pa(Xi) and we denote CIMs as QXi|pa(Xi). Recall that each QXi|pa(Xi) consists +of intensity matrices QXi|pai, where pai is a single configuration of pa(Xi). +Strictly +speaking, pai is one of the possible parent configurations pa1 +i , . . . , paqi +i similar to (3.1). +In terms of pure intensity parameterization we denote elements of these matrices as qxx′|pai +and qx|pai. Note, that by Theorem 2.9 we can divide the set of parameters in terms of +mixed intensity into two sets. Then for each variable Xi and each instantiation pai of its +set of parents pa(Xi) the parameters of QXi|pa(Xi) will be qXi = {qx|pai : x ∈ Val(Xi)} +and θXi = {θxx′|pai : x, x′ ∈ Val(Xi), x ̸= x′}. More precisely, for each Xi and every +x ∈ Val(Xi) we have +θxx′|pai = +qxx′|pai +� +x′ qxx′|pai +, +x′ ∈ Val(Xi), +x ̸= x′. +The learning problem for the initial distribution is a Bayesian network learning task, +which was discussed previously in this chapter. Therefore, it remains to learn the vector +of parameters (q, θ). +Likelihood estimation. +Let us start from a fully observed case and a single homo- +geneous Markov process X(t). As all the transitions are observed, the likelihood of D +can be decomposed as a product of the likelihoods for individual transitions d. +Let +d = ⟨xd, td, x′ +d⟩ ∈ D be the transition where X transitions to state x′ +d after spending +the amount of time td in state xd. Using the mixed intensity parameterization, we can +write the likelihood for the single transition d as +LX(q, θ : d) = LX(q : d)LX(θ : d) = qxd exp(−qxdtd) · θxdx′ +d. +Then multiplying the likelihoods for each transition d in our data D we can summarize +it in terms of sufficient statistics T[x] which describes the amount of time spent in each +state x ∈ Val(X) and M[x, x′] which encodes the number of transitions from x to x′, +where x ̸= x′ as follows: +LX(q, θ : D) = +�� +d∈D +LX(q : d) +� �� +d∈D +LX(θ : d) +� += +�� +x +qM[x] +x +exp(−qxT[x]) +� �� +x +� +x′̸=x +θM[x,x′] +xx′ +� +, +(3.16) +50 + +where M[x] = � +x′ M[x, x′]. +Now in case of CTBNs, each variable X of the network N is conditioned on its par- +ent set Pa = paG(X), and each transition of X must be considered in the context of +the instantiation pa of Pa. With complete data, we know the value of Pa during the en- +tire trajectory, so at each point in time we know precisely which homogeneous intensity +matrix QX|pa governed the dynamics of X. +Thus, the likelihood decomposes into the product of likelihoods, each corresponding +to the variable in the network, as +LN(q, θ : D) = +� +Xi∈X +LXi(qXi|Ui, θXi|Ui : D) = +� +Xi∈X +LXi(qXi|Ui : D)LXi(θXi|Ui : D). +The term LX(θX|Pa : D) is the probability of the sequence of state transitions, disregarding +the times between transitions. These state changes depend only on the value of the parents +at the moment of the transition. +For each variable X ∈ X let M[x, x′ | pa] denote +the number of transitions from X = x to X = x′ while Pa = pa. Then, with this set of +sufficient statistics M[x, x′ | pa], we have +LX(θX|Pa : D) = +� +pa +� +x +� +x′̸=x +θM[x,x′|pa] +xx′|pa +. +The computation of LX(qX|Pa : D) is more subtle since the duration in the state can be +terminated not only due to a transition of X, but also due to a transition of one of its +parents. The total amount of time where X = x and Pa = pa can be decomposed into +two different kinds of durations T[x | pa] = Tr[x | pa] + Tc[x | pa], where Tr[x | pa] is +the total length of the time intervals that terminate with X remaining equal to x, and +Tc[x | pa] is the total length of the time intervals that terminate with a change in the value +of X. However, it is easy to show that we do not need to maintain the distinction between +the two of them and we can use the set of T[x | pa] as sufficient statistics. +Finally, we can write the log-likelihood as a sum of local variable likelihoods of the form +ℓX(q, θ : D) = ℓX(q : D) + ℓX(θ : D) = += +�� +pa +� +x +M[x | pa] log qx|pa − qx|paT[x | pa] +� ++ +�� +pa +� +x +� +x′̸=x +M[x, x′ | pa] log θxx′|pa +� +. +(3.17) +Now we can write the maximum-likelihood (MLE) parameters as functions of the sufficient +statistics as follows (for the proof see Nodelman (2007)): +ˆqx|pa = M[x | pa] +T[x | pa] , +ˆθxx′|pa = M[x, x′ | pa] +M[x | pa] . +The Bayesian approach. +The other way to estimate parameters in case of fully ob- +served data is the Bayesian approach. To perform Bayesian parameter estimation, simi- +larly to the case of Bayesian networks, for computational efficiency we use a conjugate +51 + +prior (one where the posterior after conditioning on the data is in the same parametric +family as the prior) over the parameters of our CTBN. +For a single Markov process we have two types of parameters, a vector of parameters θ +for categorical distribution and q for exponential distribution. An appropriate conjugate +prior for the exponential parameter q is the Gamma distribution P(q) = Gamma(α, τ), +and as we mentioned in Section 3.1, the standard conjugate prior to categorical distri- +bution is a Dirichlet distribution P(θ) = Dir(αxx1, . . . , αxxk). The posterior distribu- +tions P(θ | D) and P(q | D) given data are Dirichlet and Gamma distributions, respec- +tively. +In order to apply this idea to an entire CTBN we need to make two standard assump- +tions for parameter priors in Bayesian networks, global parameter independence: +P(q, θ) = +� +X∈X +P(qX|paG(X), θX|paG(X)) +and local parameter independence for each variable X in the network: +P(qX|Pa, θX|Pa) = +�� +x +� +pa +P(qx|pa) +� �� +x +� +pa +P(θx|pa) +� +. +If our parameter prior satisfies these assumptions, so does our posterior, as it belongs +to the same parametric family. Thus, we can maintain our parameter distribution in +the closed form, and update it using the obvious sufficient statistics M[x, x′ | pa] for θx|pa +and M[x | pa], T[x | pa] for qx|pa. +Given a parameter distribution, we can use it to predict the next event, averaging out +the event probability over the possible values of the parameters. As usual, this prediction +is equivalent to using “expected” parameter values, which have the same form as the MLE +parameters, but account for the “imaginary counts” of the hyperparameters: +ˆqx|pa = αx|pa + M[x | pa] +τx|pa + T[x | pa] , +ˆθxx′|pa = αxx′|pa + M[x, x′ | pa] +αx|pa + M[x | pa] +. +Note that, in principle, this choice of parameters is only valid for predicting a single +transition, after which we should update our parameter distribution accordingly. However, +as is often done in other settings, we can approximate the exact Bayesian computation by +“freezing” the parameters to these expected values, and use them for predicting an entire +trajectory. +3.4.3 +Learning parameters for incomplete data +Recall, that in case of Bayesian networks one of the methods to deal with missing data +was Expectation-Maximization (EM) algorithm. Here we provide a concise description +of the algorithm based on EM for CTBNs presented in detail in Nodelman et al. (2012). +We start again with reviewing the EM scheme for a single Markov process X, which is +the basis of the algorithm for CTBNs. Let D = {σ[1], . . . , σ[m]} denote the set of all +partially observed trajectories of X. +52 + +For each partial trajectory σ[i] ∈ D we can consider the space H[i] of possible comple- +tions of this trajectory. For every transition of σ[i] each completion h[i] ∈ H[i] specifies +which underlying transition of X occurred. Also it specifies all the entirely unobserved +transitions of X. Combining σ[i] and h[i] gives us a complete trajectory σ+[i] over X. +Note that, in a partially observed trajectory, the number of possible unobserved transi- +tions is unknown. Moreover, there are uncountably many times at which each transition +can take place. Nevertheless, we can define the set D+ = {σ+[1], . . . , σ+[m]} of comple- +tions of all of the partial trajectories in D. For examples of completions see Nodelman +et al. (2012). +As we mentioned in the previous subsection, the sufficient statistics of the set of +complete trajectories D+ for a Markov process are T[x], the total amount of time that X +stays in x, and M[x, x′], the number of times in which X transitions from x to x′. Applying +logarithm to (3.16) we can write the log-likelihood ℓX(q, θ : D+) for X as an expression +of these sufficient statistics. +Let r be a probability density over each completion in H[i] which, in turn, yields +a density over possible completions of the data D+. We can write the expectations of +the sufficient statistics with respect to the probability density over possible completions +of the data as T[x], M[x, x′] and M[x]. These expected sufficient statistics allow us to +write the expected log-likelihood for X as +Er[ℓX(q, θ : D+)] = Er[ℓX(q : D+)] + Er[ℓX(θ : D+)] = += +� +x +� +M[x] ln(qx) − qxT[x] + +� +x′̸=x +M[x, x′] ln(θxx′) +� +. +Now we can use the EM algorithm to find maximum-likelihood parameters q, θ of X. +The EM algorithm begins with an arbitrary initial parameter assignment, q0, θ0. It then +repeats the two steps, Expectation and Maximization, updating the parameter set, until +convergence. After the k-th iteration we start with parameters qk, θk. The Expectation +step goes as following: using the current set of parameters, we define for each σ[i] ∈ D, +the probability density rk(h[i]) = p(h[i] | σ[i], qk, θk). We then compute expected suffi- +cient statistics T[x], M[x, x′] and M[x] according to this posterior density over completions +of the data given the data and the model. Using the expected sufficient statistics we just +have computed as if they came from a complete data set, we set qk+1 and θk+1 to be +the new maximum likelihood parameters for our model as follows +qk+1 +x += M[x] +T[x] , +θk+1 +xx′ = M[x, x′] +M[x] . +(3.18) +The difficult part in this algorithm is the Expectation Step. The space over which we are +integrating is highly complex, and it is not clear how to compute the expected sufficient +statistics in a tractable way. +In Nodelman et al. (2012) and Nodelman (2007) authors provided in detail the algo- +rithm on how to compute expected sufficient statistics for an n-state homogeneous Markov +process Xt with intensity matrix QX with respect to the posterior probability density over +53 + +completions of the data given the observations and the current model. The statistics are +computed for each partially observed trajectory σ ∈ D separately and then the results +are combined. +A partially observed trajectory σ is given as a sequence of N subsystems so that +the state is restricted to subsystem Si during the interval [ti, ti+1) for 0 ≤ i ≤ N − 1. +To conduct all the necessary computations, for each time t, the forward and backward +probability vectors αt and βt are defined, which include evidence of any transition at time t, +and also vectors α− +t and β+ +t , neither of which include evidence of a transition at time t. +The total expected time E[T[j]] is obtained by summing the integrals over all intervals +of constant evidence [v, w) with the subsystem S to which the state is restricted on that +interval. Each integrand is an expression containing αv, βw and QS. The computations for +each integral are performed via the Runge-Kutta method of fourth order with an adaptive +step size. +Regarding the expected number of transitions E[M[x, x′]] from the state x to x′ dis- +crete time approximations of M[x, x′] are considered which in the limit as the size of +the discretization goes to zero yields an exact equation. As a result we get the sum of +expressions where each summand is associated with a time interval. The overall expres- +sion for the expected number of transitions consists of two parts: the sum of products +corresponding to intervals with partially observed transitions and containing α− +t and β+ +t +for different time points t and the sum of integrals of practically identical form to those +obtained for total expected time. +In order to compute αt and βt a forward-backward style algorithm (Rabiner and Juang +(1986)) over the entire trajectory is used to incorporate evidence and get distributions over +the state of the system at every time ti. If needed it is possible to exclude incorporation of +the evidence of the transition from either forward or backward vector and also obtain α− +t +and β+ +t . We can then write the distribution over the state of the system at time t given +all the evidence. +Continuous time Bayesian networks are a factored representation for homogeneous +Markov processes, hence, extending the EM algorithm to them involves making it sensitive +to a factored state space. +As mentioned previously, the log-likelihood decomposes as +the sum of local log-likelihoods for each variable. With the sufficient statistics T[x | pa], +M[x, x′ | pa] and M[x | pa] of the set of complete trajectories D+ for each variable X in +CTBN N the likelihood for each variable X further decomposes as in (3.17). By linearity +of expectation, the expected log-likelihood function also decomposes in the same way. +So we can write the expected log-likelihood Er[ℓ(q, θ : D+)] as a sum of terms, one for +each variable X, in a similar form as (3.17), but using the expected sufficient statistics +T[x | pa], M[x, x′ | pa] and M[x | pa]. +The EM algorithm for CTBNs is essentially the same as for homogeneous Markov +processes. We need only specify how evidence in the network induces evidence on the in- +duced Markov process, and how expected sufficient statistics in the Markov process give +us the necessary sufficient statistics for CTBN. +The Maximization step is practically the same as in (3.18), we just use proper expected +54 + +sufficient statistics for the CTBN case: +qk+1 +x|pa = M[x | pa] +T[x | pa] , +θk+1 +xx′|pa = M[x, x′ | pa] +M[x | pa] . +The Expectation step is again more difficult and could be done by flattening the CTBN +into a single homogeneous Markov process with a size of the state space exponential +in the number of variables. Then we follow the method described above. However, as +the number of variables in the CTBN grows the process becomes intractable, so we are +forced to use approximate inference. +We want this approximate algorithm to be able to compute approximate versions of +the forward and backward messages αt and βs and extract the relevant sufficient statistics +from these messages efficiently. In the next subsection we review a cluster graph infe- +rence algorithm which can be used to perform this type of approximate inference. Using +obtained cluster beliefs (see below) we can compute αti+1 and βti and use them in the +forward-backward message passing procedure. The cluster distributions are represented +as local intensity matrices from which we can compute the expected sufficient statistics +over families Xi, paG(Xi) as described above. +3.5 +Inference for CTBNs +To gain the perspective on the whole concept of continuous time Bayesian networks and +their power, similarly to Bayesian networks, we discuss the questions of inference although +it is not the key subject of this thesis. We start with a discussion of the types of queries +we might wish to answer and the difficulties of the exact inference. +Inference for CTBNs can take a number of forms. The common types of queries are: +• querying the marginal distribution of a variable at a particular time or also the time +at which a variable first takes a particular value, +• querying the expected number of transitions for a variable during a fixed time in- +terval, +• querying the expected amount of time a variable stayed in a particular state during +an interval. +Previously we showed that we can view CTBN as a compact representation of a joint +intensity matrix for a homogeneous Markov process. Thus, at least in principle, we can +use CTBN to answer any query that we can answer using an explicit representation of +a Markov process: we can form the joint intensity matrix and then answer queries just as +we would do for any homogeneous Markov process. +The obvious flaw is that this approach for answering these queries requires us to +generate the full joint intensity matrix for the system as a whole. The size of the matrix +is exponential in the number of variables, making this approach generally intractable. The +55 + +graphical structure of the CTBN immediately suggests that we perform the inference in +a decomposed way, as in Bayesian networks. Unfortunately, the problems are significantly +more complex in this setting. +In Nodelman et al. (2002) the authors describe an approximate inference algorithm +based on ideas from clique tree inference, but without any formal justification for the algo- +rithm. More importantly, the algorithm covers only point evidence, meaning observations +of the value of a variable at a point in time, but in many applications we observe a variable +for an interval or even for its entire trajectory. Therefore, we shortly describe an approx- +imate inference algorithm called Expectation Propagation (EP) presented in Nodelman +et al. (2005) that allows both point and interval evidence. The algorithm uses message +passing in a cluster graph (with clique tree algorithms as a special case), where the clus- +ters do not contain distributions over the cluster variables at individual time points, but +over trajectories of the variables through a duration. +As we discussed in this chapter, in cluster graph algorithms we construct a graph +whose nodes correspond to clusters of variables and then pass messages between these +clusters to produce an alternative parameterization, in which the marginal distribution of +the variables in each cluster can be read directly from the cluster. In discrete graphical +models, when the cluster graph is a clique tree, two passes of the message passing algorithm +produce the exact marginals. In generalized belief propagation (Yedidia et al. (2001)), +message passing is applied to a graph which is not a clique tree, in which case the algorithm +may not converge, and produces only approximate solutions. There are several forms of +message passing algorithm as we have discussed in Subsection 3.2.2. The algorithm of +Nodelman et al. (2005) is based on multiply-marginalize-divide scheme of Lauritzen and +Spiegelhalter (1988), which we now briefly review. +A cluster graph is defined in terms of a set of clusters Ci, whose scope is some subset +of the variables X. Clusters are connected to each other by edges, along which messages +are passed. The edges are annotated with a set of variables called a sepset Si,j, which +is the set of variables in Ci ∩ Cj. The messages passed over an edge between Ci and Cj +are factors over the scope Si,j. Each cluster Ci maintains a potential βi, which is a factor +reflecting its current beliefs over the variables in its scope. Each edge similarly maintains +a message µi,j which encodes the last message sent over the edge. The potentials are +initialized with a product of some subset of factors parameterizing the model (CIMs in +our setting). Messages are initialized to be uninformative. Clusters then send messages to +each other, and use incoming messages to update their beliefs over the variables in their +scope. The message mi→j from Ci to Cj is the marginal distribution Si,j according to βi. +The neighbouring cluster Cj assimilates this message by multiplying it into βi, but avoids +double-counting by first dividing by the stored message µi,j. Thus, the message update +takes the form βj ←− βj · mi→j +µi,j . +In the algorithm the cluster beliefs represent not the factors over values of random +variables themselves, but rather cluster potentials and messages both encode measures +over entire trajectories of the variables in their scope. The number of parameters grows +exponentially with the size of the network, and thus we cannot pass messages exactly +56 + +without giving up the computational efficiency of the algorithm. To address this issue +Nodelman et al. (2005) used the expectation propagation (EP) approach of Minka (2001), +which performs approximate message passing in cluster graphs. In order to get an approx- +imate message each message mi→j is projected into a compactly representable space so as +to minimize the KL-divergence between the message and its approximation. To encode +the cluster potentials CIMs are used. In order to apply the EP algorithm to clusters of +this form some basic operations over CIMs need to be defined. They include CIM product +and division, approximate CIM marginalization, as well as incorporating the evidence into +CIM. +The message propagation algorithm is first considered for one segment of the trajectory +with constant continuous evidence. +Exactly the same as for Bayesian networks, this +process starts with constructing the cluster tree for the graph G. Note that cycles do not +introduce new issues. We can simply moralize the graph connecting all parents of a node +with undirected edges and then make all the remaining edges undirected. +If there is +a cycle, it simply turns into a loop in the resulting undirected graph. Next we select a set +of clusters Ci. These clusters can be selected so as to produce a clique tree for the graph, +using any standard method for constructing such trees. We can also construct a loopy +cluster graph and use generalized belief propagation. We did not discuss this topic in +the thesis (for more details see Koller and Friedman (2009)). The message passing scheme +described in this section is the same in both cases. +The algorithm iteratively selects an edge connecting the clusters Ci and Cj in the clus- +ter graph and passes the message from the former to the latter. In clique tree propagation +the order in which we chose edges was basically fixed, meaning that we started from leaves +to roots performing an upward pass and then going in the opposite direction. In genera- +lized belief propagation, we might use a variety of message passing schemes. Convergence +occurs when messages cease to affect the potentials which means that neighboring clus- +ters Ci and Cj agree on the approximate marginals over the variables from Si,j. +Now we can generalize the algorithm for a single segment to trajectories containing +multiple segments of continuous evidence. Nodelman et al. (2005) applied this algorithm +separately to every segment, passing information from one segment to the next one in +the form of distributions. More precisely, consider a trajectory defining a sequence of +time points t1, . . . , tn, with constant continuous evidence on every interval [ti, ti+1) and +possible point evidence or observed transition at each ti. +Then a sequence of cluster +graphs over each segment is constructed. Starting from the initial segment EP inference +is run on each cluster graph using the algorithm for a single segment described above, +and the distribution at the end time point of the interval is computed. The resulting +distribution is then conditioned on any point evidence or the observed transition, and +next used as the initial distribution for the next interval. +However, there is one subtle difficulty relating to the propagation of messages from +one interval to another. If a variable X appears in two clusters Ci and Cj in a cluster +graph, the distribution over its values in these two clusters is not generally the same, even +if the EP computation converges. The reason is that even calibrated clusters only agree +57 + +on the projected marginals over their sepset, not the true marginals. To address this issue +and to obtain a coherent distribution which can be transmitted to the next cluster graph +the individual cluster marginals and sepsets for the state variables at the end time point +of the previous interval are recalibrated to form a coherent distribution (the conditioning +on point evidence can be done at the same time if needed). Then we can extract the new +distribution as a set of calibrated cluster and sepset factors, and introduce each factor +into the appropriate cluster or sepset in the cluster graph for the next time interval. +The above algorithm performs the propagation of beliefs forward in time. It is also pos- +sible to do a similar propagation backwards and pass messages in reverse, where the cluster +graph for one time interval passes a message to the cluster graph for the previous one. +Also to achieve more accurate beliefs we can repeat the forward-backward propagation +until the entire network is calibrated, essentially treating the entire network as a single +cluster graph. Note that since one cluster graph is used for each segment of fixed con- +tinuous evidence, then each cluster will approximate the trajectory of all the variables it +contains as a homogeneous Markov process for the duration of the entire segment. There- +fore, the choice of segments and the resulting subsets of variables, over which we compute +the distribution, determine the quality of the approximation. +58 + +Chapter 4 +Structure learning for Bayesian +networks +Recall the Definition 2.3 of Bayesian Networks (BN), the notion of which combines +the structure given by a Directed Acyclic Graph (DAG) and the probability distribu- +tion encoded by Conditional Probability Distributions (CPD). By far, in Chapter 3 we +discussed the problem of finding CPDs and making the inference given the structure. In +this chapter we will discuss the problem of learning the structure of Bayesian networks. +In Section 4.1 we briefly review known approaches to the problem. In Section 4.2 we +recall partition MCMC algorithm for learning the structure of the network, whose part +concerning the division of the graph into layers will be the first step of our new method. +In Sections 4.3 and 4.4 we present a novel approach to structure learning with the use of +the above algorithm and LASSO approach for continuous and discrete data, respectively. +Section 4.5 is dedicated to numerical results. +4.1 +Problem of learning structure of Bayesian Networks +Structure learning is known to be a hard problem, especially due to the superexponential +growth of the DAG space when the number of nodes is increasing. Generally speaking +the literature on the structure learning can be divided into three classes: constraint- +based methods, score-and-search algorithms and the dynamic programming approach (as +discussed for example in Koller and Friedman (2009)), even though this division is not +that strict. The contents of this section come mostly from Kuipers and Moffa (2017) and +Daly et al. (2011). +Constraint-based methods use conditional independence tests to obtain information +about the underlying causal structure. They start from the full undirected graph and +then make decisions about removing the edge in the network based on tests of conditional +independence. The widely used algorithm of this nature, PC algorithm (Spirtes et al. +(2000)), and constraint-based methods in general are sensitive to the order in which they +are run. However Colombo and Maathuis (2014) proposed some modifications for PC +algorithm to remove either partially or altogether this dependence. These methods scale +59 + +well with the dimension but are sensitive to local errors of the independence tests which +are used. +One of the most widely studied ways of learning a Bayesian network structure has +been the use of so-called ’score-and-search’ techniques. These algorithms comprise of: +• a search space consisting of the various allowable states, each of which represents +a Bayesian network structure; +• a mechanism to encode each of the states; +• a mechanism to move from state to state in the search space; +• a scoring function assigning some score to a state in the search space which describes +the goodness of fit with the sample data. +Also some hybrid methods combining ideas from both techniques were proposed, for +example the max-min-hill-climbing of Tsamardinos et al. (2006). +Within the family of search and score methods we can distinguish a separate class +of MCMC methods for the graph space exploration. Their main and huge advantage is +that they can provide a collection of samples from the posterior distribution of the graph +given the data. +This means that rather than making the inference based on a single +graphical model, we can account for model uncertainty by averaging over all the models +in the obtained class. In particular, we can estimate the expectation of any given network +feature, such as the posterior probability of an individual edge, by averaging the posterior +distributions under each of the models, weighted by their posterior model probabilities +(Madigan et al. (1995), Kuipers and Moffa (2017)). This is especially important in high +dimensional domains with sparse data where the single best model cannot be clearly +identified, so the inference relying on the best scoring model is not justified. +The first MCMC algorithm over graph structures is due to Madigan et al. (1995), +later refined by Giudici and Castelo (2003). To improve on the mixing and convergence, +Friedman and Koller (2001) instead suggested to build a Markov chain on the space of +node orders, at the price of introducing a bias in the sampling. For smaller systems with +smaller space and time complexity one of the efficient approaches is the dynamic prog- +ramming (Koivisto and Sood (2004)), which can be further used to extend the proposals +of standard structure MCMC approach in a hybrid method (Eaton and Murphy (2007)). +Within the MCMC approach, to avoid the bias while keeping reasonable convergence +rate, Grzegorczyk and Husmeier (2008) more recently proposed a new edge reversal move +method combining ideas both of standard structure and order based MCMC. Recently +Kuipers and Moffa (2017) presented another MCMC algorithm designed on the combi- +natorial structure of DAGs, with the advantage of improving convergence with respect +to structure MCMC, while still providing an unbiased sample since it acts directly on +the space of DAGs. Moreover, it can also be combined with the algorithm of Grzegorczyk +and Husmeier (2008) to improve the convergence rate even further. +60 + +4.2 +Partition MCMC method +In this section we describe the Partition MCMC algorithm of Kuipers and Moffa (2017), +which will be the base of our novel method for learning the structure of BNs. This algo- +rithm considers combinatorial representation of DAGs to build an efficient MCMC scheme +directly on the space of DAGs. Its convergence is better than that of the structure MCMC +and does not introduce bias as the order based MCMC. As we mentioned, the authors +also proposed a way to combine their method with the new edge reversal move approach +of Grzegorczyk and Husmeier (2008) and improve upon their MCMC sampler. +First we need to introduce the notion of layers and partitions for DAG. Given DAG +G = (V, E) we define layers ℓi of the nodes (called interchangeably variables) in the network +as follows: +• ℓ0 = {v ∈ V : paG(v) = ∅} is the layer of the nodes which do not have any parents; +• having defined the layer ℓi for i = 0, 1, . . . , k − 1 we define the next layer as +ℓk = {v ∈ V : ∃w ∈ ℓk−1 such that +w ∈ paG(v) and paG(v) ⊆ Lk−1}, +where Lk−1 = +� +i≤k−1 +ℓi. +Note that variables from the same layer do not have arrows between them, and that each +variable (except for the layer ℓ0) has at least one arrow directed towards it from any +variable from the adjacent previous layer. For instance, the graph in Figure 4.1 has three +layers: ℓ0 = {1, 3, 5}, ℓ1 = {4} and ℓ2 = {2}. +Suppose that for some arbitrary graph we have q+1 layers. Each layer ℓi has a certain +amount ki of nodes, which in sum gives the total number of nodes d, i.e. +q� +i=0 +ki = d. +In addition, with each layer representation there is associated a permutation of nodes, +where we list nodes in the layer order. More precisely, first we write nodes from the first +layer, then from the second one, etc. For the graph in Figure 4.1 we have the partition +λ = [3, 1, 1] and the permutation πλ = [1, 3, 5, 4, 2]. Together a pair (λ, πλ) is called a +labelled partition. +Kuipers and Moffa (2017) proposed an efficient MCMC algorithm for exploring the spa- +ce of partitions to find the most probable layer representation given the observed data. +Although the full algorithm is suited for structure learning, we want to improve on this +algorithm and replace the second part of it with the LASSO estimator. The authors +define an MCMC algorithm on the space of node partitions avoiding in this way over- +representation of certain DAGs. Compared to other MCMC methods mentioned above +partition MCMC is faster than structure MCMC of Madigan et al. (1995). It is slower than +order MCMC of Friedman and Koller (2001) but does not introduce any bias. The basic +move consists of splitting one element of the partition (i.e. layer) into two parts or joining +two adjacent elements (the authors also propose an additional move consisting of swapping +two nodes in adjacent layers). +All the partitions reachable from a given partition in +61 + +Figure 4.1: An example of partition representation of the DAG. +one basic move are called the neighbourhood of the partition. So the MCMC scheme +consists of sampling a partition from the neighbourhood of the previous partition with +a small probability to stay still defined by the user. The obtained partition is scored and +the score coincides with the posterior probability of the labelled partition. After sampling +the partition we sample a single DAG weighted according to its posterior. Then we can +average the acquired DAGs in the MCMC chain and choose the model. However, we +propose to change the step where we sample DAG from the posterior distribution and +average DAGs from the MCMC chain. It is well suited for inference and estimation of +network parameters but we believe that we can improve the Bayesian averaging approach +in the case of structure learning. We propose to use partition MCMC for finding the best +scoring partition and next to use it for recovering arrows with the LASSO estimator where +each parameter corresponds to a certain arrow in the network. +4.3 +The novel approach to structure learning +We want to combine advantages of partition MCMC and LASSO for linear models. First +we find the best layer representation using partition MCMC algorithm. Next we obtain +the final DAG solving d LASSO problems, where d is the number of variables (nodes). +Having found the most probable layer representation for a DAG we consider two models: +one for continuous data and one for discrete data. +4.3.1 +Gaussian Bayesian Networks +For the continuous case we consider Gaussian Bayesian Networks (GBN) introduced in +Section 3.1. We denote as Xm +i +the m-th random variable in the i-th layer, where m ∈ +{1, . . . , ki}. We assume that each ϵm +i +has the normal distribution N(0, σm +i ). We also +assume that each ϵm +i +is independent of all Xm +i . Now given the partition [k0, k1, . . . , kq] +we can write the problem of finding the DAG structure as a set of the following d linear +62 + +3 +3 +4 +5model problems: +X1 +0 = β1 +0,0 + ϵ1 +0 +... +Xk0 +0 = βk0 +0,0 + ϵk0 +0 +X1 +1 = β1 +1,0 + β1,1 +1,0X1 +0 + · · · + β1,k0 +1,0 Xk0 +0 + ϵ1 +1 +... +Xk1 +1 = βk1 +1,0 + βk1,1 +1,0 X1 +0 + · · · + βk1,k0 +1,0 +Xk0 +0 + ϵk1 +1 +X1 +2 = β1 +2,0 + β1,1 +2,0X1 +0 + · · · + β1,k0 +2,0 Xk0 +0 + β1,1 +2,1X1 +1 + · · · + β1,k1 +2,1 Xk1 +1 + ϵ1 +2 +... +Xkq +q = βkq +q,0 + +� +j 0 balance the minimization of +the cost function and the penalty function. The form of the penalty is crucial, because +its singularity at the origin implies that some coordinates of the minimizer ˆβi +j are exactly +equal to 0 if λj,i is sufficiently large. Thus, starting from the graph with all possible +arrows for the given layer representation (i.e. there are arrows from variables on each +63 + +layer towards all the variables in next layers) we remove irrelevant edges. The functions +RSSj,i(θ) and the penalty are convex, so (4.2) is a convex minimization problem. This is +an important fact from both practical and theoretical points of view. +4.3.2 +Theoretical results for GBNs +By Sj,i we denote the support of the true vectors of parameters βi +j, i.e. the sets of non- +zero coordinates of each βi +j, and by S = {S1,1, . . . , S1,k1, S2,1, . . . , Sq,kq}. Moreover, βi +j,min +is the smallest in the absolute value element of βi +j restricted to Sj,i. The set Sc +j,i denotes +the complement of Sj,i, that is the set of zero coordinates of βi +j. For any vector a we +denote its l∞-norm by ∥a∥∞ = maxk |ak|. For a vector a and a subset of indices I by aI +we denote the vector a restricted to its coordinates from the set I, i.e. (aI)i = ai for +i ∈ I and (aI)i = 0 otherwise. Moreover, |I| denotes the number of elements of I. For +a vector a = (a1, . . . , an) by Cov(a) we denote the matrix (cij), where cii = V ar(ai) and +cij = Cov(ai, aj). +Before we state the main results of this chapter we introduce the cone invertibility +factor (CIF), which plays an important role in the theoretical analysis of the properties of +LASSO estimators. In literature there are three related notions which are commonly used +in said analysis and help to provide some constraints on the optimized function so that +the estimator is “good” in certain sense. These notions are the cone invertibility factor, +compatibility factor and restricted eigenvalue (see Huang et al. (2013) and references +therein). For any ξ > 1 we define the cones C(ξ, Sj,i) = +� +θ : ∥θSc +j,i∥1 ≤ ξ∥θSj,i∥1 +� +. Then +CIF is defined as +¯Fj,i(ξ) = +inf +0̸=θ∈C(ξ,Sj,i) +∥Σjθ∥∞ +∥θ∥∞ +, +(4.3) +where Σj is the covariance matrix for a random vector (X1, . . . , Xj−1) of variables from +the first j layers. More precisely, +Σj = 1 +m +� +X0:(j−1)�⊤ X0:(j−1). +Our goal will be to show that the estimators ˆβi +j are close to the true vectors βi +j in a certain +sense. However, if the curvature of the function in (4.2) around βi +j is relatively small, +then the closeness between its values at ˆβi +j and βi +j does not necessarily imply the closeness +between the arguments ˆβi +j and βi +j. +Hence, we require some additional conditions, for +instance, strong convexity of RSSj,i at βi +j, i.e. that the smallest eigenvalue of its Hessian +is positive. +In the high-dimensional case it is too strong of an assumption, therefore +one usually considers restricted strong convexity or restricted smallest eigenvalues, where +“restricted” means that we take infimum over C(ξ, Sj,i) instead of the whole space. CIF +(4.3) is an example of such reasoning. We also introduce a non-random version Fj,i(ξ) of +CIF for each j ∈ {1, . . . , q} as follows. First we define +Hj = Cov(X1[0 : (j − 1)]), +64 + +where X1[0 : (j − 1)] denotes the restriction of X1 to variables from the first j layers. +We assume that each Hj is positive definite and elements on the diagonal are equal to 1, +i.e. Hj +ii = 1, where i ∈ {1, 2, . . . , k1 + · · · + kj−1}. Then +Fj,i(ξ) = +inf +0̸=θ∈C(ξ,Sj,i) +∥Hjθ∥∞ +∥θ∥∞ +. +(4.4) +Since in a Gaussian Bayesian Network the joint probability of all variables is assumed to +be Gaussian, then each marginal is Gaussian as well. Hence, for simplicity we can bound +the variance for each variable by the same constant τ 2. Also we denote +mj,i = |S|2τ 4(1 + ξ)2 log(|Lj−1|2qkj/ε) +F 2 +j,i(ξ) +(4.5) +for each j ∈ {1, . . . , q} and i ∈ {1, . . . , kj}. +Theorem 4.1. Fix arbitrary ε ∈ (0, 1) and ξ > 1. Assume that Fj,i(ξ) defined in (4.4) is +positive for each j ∈ {1, . . . , q} and i ∈ {1, . . . , kj}. In addition suppose that +m ≥ K1 max +j,i mj,i +(4.6) +and for each i and j we have +λj,i ≥ K2 +ξ + 1 +ξ − 1τσi +j +� +log(|Lj−1|qkj/ε) +mj,i +for some universal constants K1 and K2. Then +P +� +∥ˆβ − β∥∞ ≤ +4ξ +ξ + 1 max +j,i +λj,i +Fj,i +� +≥ 1 − ε. +The second main result is about thresholded version of LASSO estimator. +It will +be proved after the proof of Theorem 4.1. Consider the Thresholded LASSO estima- +tor with the sets of nonzero coordinates ˆSj,i. +The set ˆSj,i contains only those coeffi- +cients of the LASSO estimator (4.2), which are larger in the absolute value than some +pre-specified threshold δj,i for each j ∈ {1, . . . , q} and i ∈ {1, . . . , kj}. +We denote +{ ˆS1,1, . . . , ˆS1,k1, ˆS2,1, . . . , ˆSq,kq} as ˆSδ. +Corollary 4.2. Suppose that assumptions of Theorem 4.1 are satisfied. If for each j, i +and arbitrary ξ > 1 we have βi +j,min/2 > δj,i ≥ +4ξλj,i +(ξ + 1)Fj,i +, then Thresholded LASSO with +δ = [δ1,1, . . . , δq,kq] is consistent, i.e. +P +� +ˆSδ = S +� +≥ 1 − ε . +Before the proof of Theorem 4.1 we state and prove an auxiliary result Proposition 4.3, +which is interesting on its own. It describes a slightly more general case and it will be used +multiple times for different numbers and sets of predictors and targets in order to prove +65 + +Theorem 4.1. Moreover, to avoid any confusion with indices and notation introduced +before for convenience we use more general notation in subsequent proofs. +Hence, let (Y1, Z1), . . . , (Ym, Zm) be i.i.d. random vectors such that Yi ∈ Rp and Zi ∈ R. +The coordinates of Yi will be denoted by Yij for each j = {1, . . . , p} and by Y we denote +the full (m × p)−matrix of predictors Y = (Y1, . . . , Ym)⊤. Moreover, let H = Cov(Y1) is +a positive definite matrix with diagonal elements Hjj = 1 for j = 1, . . . , p. We assume +that +Zi = β⊤Yi + εi, +i = 1, . . . , m, +(4.7) +where ε1, . . . , εm are i.i.d. random variables with Eεi = 0, which are subgaussian with +the parameter σ2, and are independent of p predictors Yi, . . . , Yp. Subgaussianity means +that for each i and a ∈ R +E exp(aεi) ≤ exp(a2σ2/2). +We also assume that predictors are subgaussian with the parameter τ 2, i.e. E exp(aY1j) ≤ +exp(a2τ 2/2) for each j = 1, . . . , p. +The goal is to find the set of indices of the relevant predictors +S = {j ∈ {1, . . . , p} : βj ̸= 0}. +(4.8) +The set Sc denotes the complement of S, that is the set of zero coordinates of β. Now +consider the LASSO estimator +ˆβ = argmin +θ∈Rp [RSS(θ) + λ∥θ∥1], +(4.9) +where +RSS(θ) = +1 +2m +m +� +i=1 +� +Zi − θ⊤Yi +�2 . +For any ξ > 1 we define the cone C(ξ, S) = {θ : ∥θSc∥1 ≤ ξ∥θS∥1}. Then CIF is defined as +¯F(ξ) = +inf +0̸=θ∈C(ξ,S) +∥Y ⊤Y θ/m∥∞ +∥θ∥∞ +, +and its non-random version is given by +F(ξ) = +inf +0̸=θ∈C(ξ,S) +∥Hθ∥∞ +∥θ∥∞ +. +Proposition 4.3. Fix arbitrary a ∈ (0, 1) and ξ > 1. Suppose that F(ξ) is positive and +m ≥ K1|S|2τ 4(1 + ξ)2 log(p2/a) +F 2(ξ) +(4.10) +and +λ ≥ K2 +ξ + 1 +ξ − 1τσ +� +log(p/a) +m +, +(4.11) +where K1, K2 are some universal constants. Then +P +� +∥ˆβ − β∥∞ ≤ +4ξλ +(ξ + 1)F(ξ) +� +> 1 − 2a. +(4.12) +66 + +The proof of Proposition 4.3 relies on Lemma 4.4 and 4.6 below. +Lemma 4.4. In the context of previously defined random variables Yij and εi, where +i = {1, . . . , m}, for arbitrary j = 1, . . . , p and u > 0 we have +P +� +1 +n +n +� +i=1 +Yijεi > 2τσ +� +2 +� +2u +n + u +n +�� +≤ exp(−u). +The proof of Lemma 4.4 uses the following Corollary 8.2 of van de Geer (2016). +Lemma 4.5. Suppose that Z1, . . . , Zn are i.i.d. random variables and there exists L > 0 +such that C2 = E exp (|Z1|/L) is finite. Then for arbitrary u > 0 +P +� +1 +n +n +� +i=1 +(Zi − EZi) > 2L +� +C +� +2u +n + u +n +�� +≤ exp(−u). +Proof of Lemma 4.4. Fix j = 1, . . . , p and u > 0. We consider an average of i.i.d. centered +random variables Zj = Yijεi with EZj = 0, so we can use Lemma 4.5. We need to find +L, C > 0 such that E exp (|Y1jε1|/L) ≤ C2. Note that +E exp (|Y1jε1|/L) ≤ E exp (Y1jε1/L) + E exp (−Y1jε1/L) . +(4.13) +For the first term on the right-hand side of (4.13) we have +E exp (Y1jε1/L) = E [E (exp(Y1jε1/L) | Y1j)] . +Using independence of Y1j and ε1, and subgaussianity of ε1 for each y ∈ R we obtain +E [exp (Y1jε1/L) | Y1j = y] = E exp (yε1/L) ≤ exp +� +y2σ2/(2L2) +� +. +Therefore we have +E [E (exp(Y1jε1/L)|Y1j)] ≤ E exp +� +Y 2 +1jσ2/(2L2) +� +, +which, using subgaussianity of Y1j and Lemma 7.4 of Baraniuk et al. (2011), we can bound +from above by +1 +� +(1 − τ 2σ2/L2) +, +provided that L > τσ. The second expectation on the right-hand side of (4.13) can be +bounded analogously, hence, we obtain +E exp (|Y1jε1|/L) ≤ +2 +� +(1 − τ 2σ2/L2) +, +provided that L > τσ. We can take L = 2τσ and obtain C ≥ +2 +4√ +3, which finishes the proof. +Lemma 4.6. Suppose that assumptions of Proposition 4.3 are satisfied. Then for arbitrary +ε ∈ (0, 1) and ξ > 1 with probability at least 1 − ε we have ¯F(ξ) ≥ F(ξ)/2. +67 + +Proof. Fix ε ∈ (0, 1) and ξ > 1. We start with considering the l∞-norm of the matrix +���� +1 +mY ⊤Y − EY ⊤ +1 Y1 +���� +∞ += +max +j,k=1,...,p +����� +1 +m +m +� +i=1 +YijYik − EY1jY1k +����� . +Fix j, k ∈ {1, . . . , p}. Notice that for any two numbers a and b we have the inequality +ab ≤ a2 +2 + b2 +2 . Hence, we can write +|Y1jY1k| ≤ Y 2 +1j +2 + Y 2 +1k +2 . +Therefore, first using the previous inequality and Cauchy-Schwarz inequality afterwards +for any positive constant L we obtain +E exp (|Y1jY1k|/L) ≤ E exp +� +Y 2 +1j/(2L) +� +exp +� +Y 2 +1k/(2L) +� +≤ +� +E exp +� +Y 2 +1j/L +� +E exp (Y 2 +1k/L). +(4.14) +The variable Y1j is subgaussian, so using Lemma 7.4 of Baraniuk et al. (2011) we can +bound the first expectation under the square root in (4.14) from above by +� +1 − 2τ 2 +L +�−1/2 +, +provided that 2τ 2 < L. The second expectation under the square root in (4.14) can be +bounded by the same value when we use the subgaussianity of Y1k. Therefore, +E exp (|Y1jY1k|/L) ≤ +� +1 − 2τ 2 +L +�−1/2 +, +provided that 2τ 2 < L. Applying Lemma 4.5 with L = 3τ 2 and C = 2 and u = log(p2/ε) +we obtain +P +������ +1 +m +m +� +i=1 +YijYik − EY1jY1k +����� > Kτ 2 +� +log(p2/ε) +m +� +≤ ε +p2 , +where K is an universal constant. Therefore, +P +����� +1 +mY ⊤Y − EY ⊤ +1 Y1 +���� +∞ +> Kτ 2 +� +log(p2/ε) +m +� += += P +� +max +j,k=1,...,p +����� +1 +m +m +� +i=1 +YijYik − EY1jY1k +����� > Kτ 2 +� +log(p2/ε) +m +� +≤ +≤ +� +j,k +P +������ +1 +m +m +� +i=1 +YijYik − EY1jY1k +����� > Kτ 2 +� +log(p2/ε) +m +� +≤ ε. +(4.15) +Proceeding similarly to the proof of Lemma 4.1 of Huang et al. (2013) we first obtain +���� +���� +1 +mY ⊤Y θ +���� +∞ +− ∥Hθ∥∞ +���� ≤ +���� +1 +mY ⊤Y θ − Hθ +���� +∞ +≤ +���� +1 +mY ⊤Y − H +���� +∞ +∥θ∥1 = += +���� +1 +mY ⊤Y − H +���� +∞ +(∥θS∥1 + ∥θSc∥1) ≤ (1 + ξ)|S| · ∥θ∥∞ +���� +1 +mY ⊤Y − EY ⊤ +1 Y1 +���� +∞ +. +68 + +This implies that +���� +1 +mY ⊤Y θ +���� +∞ +≥ ∥Hθ∥∞ − (1 + ξ)|S| · ∥θ∥∞ +���� +1 +mY ⊤Y − EY ⊤ +1 Y1 +���� +∞ +. +Then by dividing both sides by ∥θ∥∞, taking infimum with respect to θ over the cone C(ξ, S) +and using (4.15) we derive that +¯F(ξ) ≥ F(ξ) − K(1 + ξ)|S|τ 2 +� +log(p2/ε) +m +with probability higher than 1 − ε. Finally, using (4.10) we have +¯F(ξ) ≥ F(ξ) − +K +√K1 +F(ξ) = +� +1 − +K +√K1 +� +F(ξ). +We finish the proof by taking sufficiently large K1. +Proof of Proposition 4.3. The central part of the proof is to show that +P +� +∥ˆβ − β∥∞ ≤ +2ξλ +(ξ + 1) ¯F(ξ) +� +> 1 − a. +(4.16) +Let us denote Ω = {∥∇RSS(β)∥∞ ≤ ξ−1 +ξ+1λ}. Now we want to bound from below the pro- +bability of Ω. For each j = 1, . . . , p we can calculate j-th partial derivative of RSS(θ) at +true β +∇jRSS(β) = ∂RSS +∂θj +(β) = 1 +m +m +� +i=1 +Yijϵi +and we bound it from above with high probability using Lemma 4.4. Therefore, taking +(4.11) into account we have +P(Ω) = P +� +max +j +|∇jRSS(β)| ≤ ξ − 1 +ξ + 1λ +� += P +� p� +j=1 +� +|∇jRSS(β)| ≤ ξ − 1 +ξ + 1λ +�� += += 1 − P +� p� +j=1 +� +|∇jRSS(β)| > ξ − 1 +ξ + 1λ +�� +≥ 1 − +p +� +j=1 +P +� +|∇jRSS(β)| > ξ − 1 +ξ + 1λ +� +≥ +≥ 1 − +p +� +j=1 +P +� +|∇jRSS(β)| > K2τσ +� +log(p/a) +m +� +. +Now applying Lemma 4.4 with u = log(p/2a) and appropriately chosen K2 we bound +from below this probability by 1 − a. +In further argumentation we consider only the event Ω. Besides, we denote ˜β = ˆβ − β +where ˆβ is a minimizer of a convex function given in (4.9), which is equivalent to +� +� +� +� +� +� +� +∂RSS +∂θj +(ˆβ) = −λ sgn(ˆβj), +if ˆβj ̸= 0, +���� +∂RSS +∂θj +(ˆβ) +���� ≤ λ, +if ˆβj = 0, +(4.17) +69 + +where j = 1, . . . , p. Next we show that ˜β ∈ C(ξ, S). Our argumentation is analogous to +Ye and Zhang (2010). From conditions in (4.17) and the fact that ∥˜β∥1 = ∥˜βS∥1 + ∥˜βSc∥1 +we obtain +0 ≤ ˜β⊤Y ⊤Y ˜β/m = ˜β⊤ � +∇RSS(ˆβ) − ∇RSS(β) +� += += +� +j∈S +˜βj∇jRSS(ˆβ) + +� +j∈Sc +ˆβj∇jRSS(ˆβ) − ˜β⊤∇RSS(β) ≤ +≤ λ +� +j∈S +|˜βj| − λ +� +j∈Sc +|ˆβj| + ∥˜β∥1∥∇RSS(β)∥∞ = += [λ + ∥∇RSS(β)∥∞]∥˜βS∥1 + [∥∇RSS(β)∥∞ − λ]∥˜βSc∥1. +Since we exclusively consider the event Ω, we obtain the following inequality +∥˜βSc∥1 ≤ λ + ∥∇RSS(β)∥∞ +λ − ∥∇RSS(β)∥∞ +∥˜βS∥1 ≤ ξ∥˜βS∥1. +Hence, we have just proved that ˜β belongs to the cone C(ξ, S). Therefore from the defi- +nition of ¯F(ξ) we have +∥ˆβ − β∥∞ ≤ ∥Y ⊤Y (ˆβ − β)/m∥∞ +¯F(ξ) +≤ ∥∇RSS(ˆβ)∥∞ + ∥∇RSS(β)∥∞ +¯F(ξ) +. +Using the second condition in (4.17) and the definition of the event Ω we then obtain +(4.16). Finally, having shown (4.16), we apply Lemma 4.6 and obtain (4.12) which finishes +the proof. +Proof of Theorem 4.1. In order to show that our estimator is close to the true parameter +vector β we first use union bounds. So here we have +P +� +∥ˆβ − β∥∞ ≤ +4ξ +(ξ + 1) max +j,i +λj,i +Fj,i +� +≥ P +�� +j,i +� +∥ˆβi +j − βi +j∥∞ ≤ +4ξλj,i +(ξ + 1)Fj,i +�� += += 1 − P +�� +j,i +� +∥ˆβi +j − βi +j∥∞ > +4ξλj,i +(ξ + 1)Fj,i +�� +≥ +≥ 1 − +q +� +j=1 +kj +� +i=1 +P +� +∥ˆβi +j − βi +j∥∞ > +4ξλj,i +(ξ + 1)Fj,i +� +. +Then, using Proposition 4.3 separately for each variable Xi +j in each layer ℓj for j = 1. . . . , q +with λ = λj,i, with the number of predictors equal to p = |Lj−1| and a = +ε +qkj +we obtain +that the expression above can be bounded from below by 1−ε. The bound on the number +of observations m is chosen according to (4.5) and (4.6). +To prove Corollary 4.2 we apply the same methodology. Namely, we prove an auxi- +liary lemma concerning the model described by (4.7), so the set S is defined by (4.8). +Additionally, by βmin we denote the smallest in the absolute value non-zero coordinate +of the true parameter vector β. +By ˆS we denote the set of non-zero coordinates of +the Thresholded LASSO estimator with the level δ, i.e. the coordinates of the vector ˆβ, +which are greater than δ. +70 + +Lemma 4.7. Fix a ∈ (0, 1) and ξ > 1. Then under the assumptions of Proposition 4.3 +and +4ξλ +(ξ + 1)F(ξ) ≤ δ ≤ βmin/2 +(4.18) +we have +P +� +ˆS = S +� +≥ 1 − a . +Proof. Take any j /∈ S. Then from Proposition 4.3 and (4.18) with the probability greater +than 1 − a we have +|ˆβj| = |ˆβj − βj| ≤ ∥ˆβ − β∥∞ ≤ δ. +Therefore, the j-th coordinate of Thresholded LASSO ˆβTH +j += 0. Next, we take j ∈ S and +obtain, also from Proposition 4.3 and (4.18), that with probability greater than 1 − a +|ˆβj| ≥ |βj| − |ˆβj − βj| ≥ βmin − ∥ˆβ − β∥∞ ≥ δ. +Hence, ˆβTH +j +̸= 0. +Proof of Corollary 4.2. From Lemma 4.7 for each j ∈ {1, . . . , q} and i = {1, . . . , kj} under +the assumptions of Theorem 4.1 we have that for arbitrary aj,i ∈ (0, 1) +P +� +ˆSj,i ̸= Sj,i +� +< aj,i. +Now we obtain +P +� +ˆSδ ̸= S +� += P +�� +j,i +{ ˆSj,i ̸= Sj,i} +� +≤ +q +� +j=1 +kj +� +i=1 +P +� +ˆSj,i ̸= Sj,i +� +. +By taking aj,i = +ε +qkj +we obtain the bound P( ˆSδ ̸= S) < ε and finish the proof. +4.4 +Discrete case +As we discussed in Section 3.1 in the discrete case as the distribution of the model we +take a collection of categorical distributions for each variable. First we assume a binary +case so that each Xi ∈ {0, 1} and we consider the logistic regression model. Let us denote +the sigmoid function as σ(x) = +1 +1 + e−x. In this setting we can write probabilities for +each variable in each layer similar to (4.1) as follows +P(X1 +1 = 1) = σ(β1 +1,0 + β1,1 +1,0X1 +0 + · · · + β1,k0 +1,0 Xk0 +0 ) +... +P(Xk1 +1 = 1) = σ(βk1 +1,0 + βk1,1 +1,0 X1 +0 + · · · + βk1,k0 +1,0 +Xk0 +0 ) +P(X1 +2 = 1) = σ(β1 +2,0 + β1,1 +2,0X1 +0 + · · · + β1,k0 +2,0 Xk0 +0 + β1,1 +2,1X1 +1 + · · · + β1,k1 +2,1 Xk1 +1 ) +... +P(Xkq +q = 1) = σ(βkq +q,0 + +� +β +kq,mj +q,j +X +mj +j ). +(4.19) +71 + +Using the same notation as for the continuous case we need to solve the following d opti- +mization problems +ˆβi +j = +argmin +θ∈Rk0+···+kj−1 +[ℓj,i(θ) + λj,i∥θ∥1], +j = 1, . . . , q, +i = 1, . . . , kj, +where ℓj,i is the negative log-likelihood for the i-th variable in the j-th layer and has +the following form +ℓj,i(θ) = − +m +� +l=1 +X(p+i)l log +� +σ +� +θi +j +⊤X0:(j−1)�� ++ (1 − X(p+i)l) log +� +1 − σ +� +θi +j +⊤X0:(j−1)�� +. +Here we denote by p = p(j) = k0 + · · · + kj−1 the number of variables in the pre- +vious j − 1 layers. +We can also generalize the above case to the case where each variable has a discrete and +finite state space, namely each Xi +j ∈ {1, . . . , N i +j}. Now instead of the sigmoid function we +use the so-called softmax function. For any vector a = (a1, . . . , an) we define the softmax +function σ(a) as the vector σ(a) = (σ(a)[1], . . . , σ(a)[n]), where σ(a)[i] = +exp(ai) +�n +j=1 exp(aj). +We denote as Xj = (X1 +0, X2 +0, . . . , Xk0 +0 , X1 +1, . . . , X +kj−1 +j−1 )⊤ for j = 1, . . . , q. Also we denote +the vectors of parameters corresponding to the l-th class of the i-th variable in the j-th +layer as +βi +j[l] = (βi,1 +j,0[l], . . . , βi,k0 +j,0 [l], βi,1 +j,1[l], . . . , β +i,kj−1 +j,j−1 [l]) +for j = 0, . . . , q − 1, i = 1, . . . , kj and l = 1, . . . , N i +j. +Then the model analogous to +the logistic model in (4.19) takes the form +P(X1 +1 = 1) = σ(β1 +1,0[1] + β1 +1[1]X1, . . . , β1 +1,0[N 1 +1] + β1 +1[N 1 +1]X1)[1] +... +P(X1 +1 = N 1 +1) = σ(β1 +1,0[1] + β1 +1[1]X1, . . . , β1 +1,0[N 1 +1] + β1 +1[N 1 +1]X1)[N 1 +1] +... +P(Xk1 +1 = 1) = σ(βk1 +1,0[1] + βk1 +1 [1]X1, . . . , βk1 +1,0[N k1 +1 ] + βk1 +1 [N k1 +1 ]X1)[1] +... +P(Xi +j = l) = σ(βi +j,0[1] + βi +j[1]Xj, . . . , βi +j,0[N i +j] + βi +j[N i +j]Xj)[l] +... +P(Xkq +q = N kq +q ) = σ(βkq +q,0[1] + βkq +q [1]Xq, . . . , βkq +q,0[N kq +q ] + βkq +q [N kq +q ]Xq)[N kq +q ]. +This is called multinomial logistic regression. It is not difficult to notice that logistic +regression is a particular case of multinomial logistic regression with two possible classes. +For each variable Xi +j we denote the full vector of parameters βi +j = (βi +j[1], . . . , βi +j[N i +j]). Then +we need to solve d optimization problems analogous to the case of logistic regression +ˆβi +j = +argmin +θ∈R +(k0+···+kj−1)Ni +j +[ℓj,i(θ) + λj,i∥θ∥1], +j = 1, . . . , q, +i = 1, . . . , kj, +72 + +where ℓj,i is also the negative log-likelihood for the i-th variable in the j-th layer and in +this case has the following form +ℓj,i(θ) = − +m +� +l=1 +Ni +j +� +k=1 +I(X(p+i)l = k) +� +�θi +j[l]X0:(j−1) − log +� +� +Ni +j +� +l=1 +θi +j[l]X0:(j−1) +� +� +� +� , +where we again denoted by p = p(j) = k0 + · · · + kj−1 the number of variables in the pre- +vious j − 1 layers. +4.5 +Numerical results +In this section we describe the details of algorithm implementation as well as the results +of experimental studies comparing our algorithm to others. +4.5.1 +Details of implementation +We provide in details practical implementation of the proposed algorithm. +The solu- +tion of (4.2) depends on the choice of λj,i. Finding the „optimal” parameters λj,i and +the thresholds δj,i in practice is difficult. We solve it using the information criteria (Xue +et al., 2012; Pokarowski and Mielniczuk, 2015; Miasojedow and Rejchel, 2018). +First, recall the function which is being minimized in (4.2) +RSSj,i(θ) + λj,i∥θ∥1 = 1 +2 +��Xj[i] − θ⊤X0:(j−1)��2 +2 + +j−1 +� +l=0 +kl +� +ml=1 +|θml +l |, +with Xj[i] being the vector of the length m of observations for the i-th variable in the j-th +layer. We perform the optimization separately for each variable and the vector θ is from +Rk0+...+kj−1 for j = 1, . . . , q and i = 1, . . . , kj. In our implementation we use the following +scheme. We start with computing a sequence of minimizers on the grid, i.e. for any j +and i we create a finite sequence {λk}N +k=1 uniformly spaced on the log scale, starting from +the largest λk, which corresponds to the empty model. Next, for each value λk we compute +the estimator ˆβi +j[k] of the vector βi +j +ˆβi +j[k] = +argmin +θ∈Rk0+...,kj−1 +{RSSj,i(θ) + λk∥θ∥1} . +(4.20) +To solve (4.20) numerically for a given λk we use the FISTA algorithm with backtracking +from Beck and Teboulle (2009). The final LASSO estimator ˆβi +j := ˆβi +j[k∗] is chosen using +the Bayesian Information Criterion (BIC), which is a popular method of choosing λj,i in +the literature (Xue et al., 2012; Miasojedow and Rejchel, 2018), i.e. +k∗ = argmin +1≤k≤N +� +m log(RSS(ˆβi +j[k])) + log(m)∥ˆβi +j[k]∥0 +� +. +Here ∥ˆβi +j[k]∥0 denotes the number of non-zero elements of ˆβi +j[k] and m is the number of +observations of the network. In our simulations we use N = 100. +73 + +Finally, the threshold δ is obtained using the Generalized Information Criterion (GIC). +A similar way of choosing a threshold was used previously in Pokarowski and Mielniczuk +(2015); Miasojedow and Rejchel (2018). For a prespecified sequence of thresholds D we +calculate +δ∗ +j,i = argmin +δ∈D +� +m log(RSS(ˆβi +j,δ)) + log(k0 + · · · + kj−1)∥ˆβi +j,δ∥0 +� +, +where ˆβi +j,δ is the LASSO estimator ˆβi +j after thresholding with the level δ. +4.5.2 +Experiments +In this subsection we compare our algorithm to other algorithms developed for this prob- +lem applying them to benchmark networks. We use the bnlearn package in R (Scutari +(2010)), in which many algorithms for learning Bayesian networks including structure +learning are implemented. Algorithms of different types discussed in the beginning of +this chapter such as constraint-based algorithms, score-and-search algorithms and hybrid +algorithms can be found there. The choice of specific algorithms was made empirically, +i.e. we selected the best performing ones on the chosen networks. We took the networks +with continuous data of a medium, large and very large amount of nodes and arcs. We +refer to medium, large and very large sizes as 20-50 nodes, 50-100 nodes or 100-1000 +nodes, respectively, adopting this classification from the authors of bnlearn package. +We chose a medium-size network ECOLI70 with 46 nodes and 70 arcs (Schäfer and +Strimmer (2005)), a large network MAGIC-IRRI * with 64 nodes and 102 arcs and +a very large network ARTH150 with 107 nodes and 150 arcs (Opgen-Rhein and Strim- +mer (2007)). +The algorithms chosen for comparison are hill-climbing (hc) algorithm, +tabu search (tabu), max-min hill-climbing (mmhc) and Hybrid HPC (h2pc) algorithm. +Hill-climbing (Scutari et al. (2018)) is a greedy search algorithm that explores the space +of the directed acyclic graphs (DAGs) by an addition, removal or reversal of a single +edge and uses random restarts to avoid local optima. Tabu search (Russell and Norvig +(2010)) is a modified hill-climbing method, which is able to escape local optima by se- +lecting a network that minimally decreases the score function. Both methods above use +search-and-score approach. Max-min hill-climbing algorithm (Tsamardinos et al. (2006)) +is a hybrid method combining a constraint-based algorithm called max-min parents and +children and hill-climbing. H2PC (Hybrid HPC, Gasse et al. (2014)) algorithm is a hybrid +algorithm combining an ensemble of weak PC learners (Spirtes and Glymour (1991)) and +hill-climbing. For more different comparisons of methods in bnlearn package see Scutari +et al. (2018). +*The model MAGIC-IRRI was developed as an example of multiple trait modelling in +plant genetics for the invited talk “Bayesian Networks, MAGIC Populations and Multiple +Trait Prediction” delivered by Marco Scutari, the author of bnlearn package, at the 5th +International Conference on Quantitative Genetics (ICQG 2016). +74 + +For each network we used two sizes of the data set with m = 300 and m = 1000 +observations. In the tables with results we denoted them with 2-3 first letters of the name +of the network followed by the number of observations so it does not create any confusion. +For each algorithm we ran the experiment for 100 times, each time with a new set of m ob- +servations, and averaged the results in terms of three performance measures: +• power, i.e. the proportion of correctly discovered edges. +• false discovery rate (FDR), i.e. the fraction of incorrectly selected edges among +all selected edges. +• structural Hamming distance (SHD), i.e. the smallest number of operations +(such as adding or removing the edge and changing the direction of the arrow) +required to match the true DAG and a learned one. +In Tables 4.1-4.3 we provide the results of experiments for mentioned above data sets and +methods including ours. In terms of power our algorithm performs right in the middle +of score-and-search and hybrid methods for ECOLI70 data set, similarly to the hybrid +methods in case of small MAGIC-IRRI data sets. We note that for data sets of 1000 +observations it performs worse than other methods, however, with the number of obser- +vations growing the algorithm’s power grows as well. With the number of observations of +10000 it grows up to 0.5 performing as good as other score methods without any increase +in FDR. The same situation we observe with ARTH150 data set. +In terms of FDR our algorithm performs the best consistently giving very low numbers +for false discoveries. This is especially important when the cost of a false discovery is high +and makes obtained discoveries more certain. With the numbers of observations of 10000 +we constantly get numbers in the range 0.2-0.4% for all data sets. When it comes to +structural Hamming distance (SHD) our algorithm performs the best or close to the best +numbers as well. With the growing number of observations it decreases due to increasing +power and consistently low FDR. For the number of observations of 10000 it outperforms +other algorithms or has close SHDs to hybrid methods and reaches around 28, 62 and 86 +for ECOLI70, MAGIC-IRRI and ARTH150 data sets, respectively. +We also checked our method on a discrete binary network called ASIA, introduced +in Chapter 3. It is a small network of 8 nodes and 8 edges. Our algorithm recognizes 6 +arrows and makes 2 false discoveries, discovering 8 arrows in total. However, after a closer +look we noticed that it could not recognize 2 arrows due to incorrect assignment of layers. +Finally, one false discovery was an arrow of an opposite direction to the true one, and +the other one was an arrow from the start to the end of a causal trail (we obtained +an additional arrow X → W for the trail X → Y → Z → W), hence still recovering +dependencies in both cases. +75 + +Method +EC300 +EC1000 +MAG300 +MAG1000 +AR300 +AR1000 +hc +0.57 +0.65 +0.28 +0.45 +0.58 +0.67 +tabu +0.6 +0.7 +0.31 +0.5 +0.59 +0.68 +mmhc +0.39 +0.45 +0.25 +0.46 +0.48 +0.58 +h2pc +0.4 +0.49 +0.23 +0.45 +0.5 +0.61 +MCMC + LASSO +0.49 +0.55 +0.24 +0.38 +0.38 +0.46 +Table 4.1: Average power for ECOLI70, MAGIC-IRRI and ARTH150 networks for +300 and 1000 observations. +Method +EC300 +EC1000 +MAG300 +MAG1000 +AR300 +AR1000 +hc +0.049 +0.044 +0.04 +0.037 +0.028 +0.19 +tabu +0.047 +0.036 +0.04 +0.036 +0.028 +0.018 +mmhc +0.021 +0.024 +0.022 +0.022 +0.01 +0.008 +h2pc +0.02 +0.023 +0.14 +0.019 +0.006 +0.006 +MCMC + LASSO +0.004 +0.004 +0.004 +0.006 +0.004 +0.003 +Table 4.2: Average FDR for ECOLI70, MAGIC-IRRI and ARTH150 networks for 300 +and 1000 observations. +Method +EC300 +EC1000 +MAG300 +MAG1000 +AR300 +AR1000 +hc +65.6 +51.8 +129.5 +96.5 +214.2 +151.1 +tabu +64.8 +46.9 +127.1 +93.1 +215 +150.9 +mmhc +48.1 +39.2 +101.7 +72.2 +127.3 +104.1 +h2pc +45.9 +37.9 +91.6 +67.21 +103.7 +90.3 +MCMC + LASSO +39.2 +35.1 +85.9 +79.3 +103 +98.7 +Table 4.3: Average SHD for ECOLI70, MAGIC-IRRI and ARTH150 networks for 300 +and 1000 observations. +76 + +Chapter 5 +Structure learning for CTBNs for +complete data +In this chapter we consider continuous time Bayesian networks (CTBNs) introduced and +defined in Section 2.5. First we consider the fully observed case where we observe the be- +haviour of the network at each moment of time. +5.1 +Notation and preliminaries +In this section we describe the proposed method, using the notation introduced in Sec- +tion 2.5. First, we consider the full graph G = (V, E), namely we assume that paG(w) = +pa(w) = −w for each w ∈ V. Then we remove unnecessary edges using the penalized +likelihood technique. We start by introducing the new parametrization of the model. For +simplicity, in the main part of this chapter we consider the binary graph, i.e. Xw = {0, 1} +for each w ∈ V. The extension of our results to more general case is described in Sec- +tion 5.5. +Let d be the number of nodes in the graph. Consider a fixed order (w1, w2, . . . , wd) of +nodes of the graph. Using this order we define a (2d) × d-dimensional matrix +β = +� +βw1 +0,1, βw1 +1,0, βw2 +0,1, βw2 +1,0, . . . , βwd +0,1, βwd +1,0 +�⊤ , +(5.1) +whose rows are vectors βw +s,s′ ∈ Rd for all w ∈ V and s, s′ ∈ {0, 1} such that s ̸= s′. +Obviously, the matrix β can be easily transformed to 2d2-dimensional vector in a standard +way. +In this chapter we assume that for all w ∈ V, c ∈ X−w, s, s′ ∈ {0, 1}, s ̸= s′ +the conditional intensity matrices satisfy +log(Qw(c, s, s′)) = βw +s,s′⊤Zw(c), +(5.2) +where Zw : X−w → {0, 1}d is a binary deterministic function described below. +With +the slight abuse of notation, by Z we will denote the set of all functions Zw1, . . . , Zwd. +In (5.2) the conditional intensity matrix Qw(·, s, s′) is modeled in the analogous way +to the regression function in generalized linear models (GLM) and the functions Zw(·) +77 + +play the role of explanatory variables (covariates). In our setting the link function is +logarithmic. The analogous approach can be found in Andersen and Gill (1982); Huang +et al. (2013), where the Cox model is considered. The relation between the intensity and +covariates in those papers is similar to (5.2). Since the considered CTBNs do not contain +explanatory variables, we introduce them artificially as any possible representations of +parents’ states. Thus, for every w ∈ V these explanatory variables are dummy variables +encoding all possible configurations in pa(w) = −w. To make it more transparent we +consider the following example. +Example 5.1. We consider CTBN with three nodes A, B and C. For the node A we define +the function ZA as +ZA(b, c) = [1, I(b = 1), I(c = 1)]⊤ +for each b, c ∈ {0, 1}, where I(·) is the indicator function. Therefore, for each configu- +ration of parents’ states (i.e. values in the nodes B and C) the value of the function +ZA(·, ·) is a three-dimensional binary vector, whose coordinates correspond to the inter- +cept, the value in the parent B and the value in the parent C, respectively. Analogously, +we define representations for remaining nodes: ZB(a, c) = [1, I(a = 1), I(c = 1)]⊤ and +ZC(a, b) = [1, I(a = 1), I(b = 1)]⊤ for each a, b, c ∈ {0, 1}. In this example the parameter +vector (5.1) is defined as β = +� +βA +0,1, βA +1,0, βB +0,1, βB +1,0, βC +0,1, βC +1,0 +�⊤. With slight abuse of notation, +the vector βA +0,1 is given as βA +0,1 = +� +βA +0,1(1), βA +0,1(B), βA +0,1(C) +�⊤ and we interpret (5.2) as fol- +lows: βA +0,1(B) = 0 means that the intensity of the change from the state 0 to 1 at the node A +does not depend on the state at the node B. Similarly, βA +0,1(C) describes the dependence +between the above intensity and the state at the node C, and βA +0,1(1) corresponds to +the intercept. For the node B the coordinates of the vector βB +0,1 = +� +βB +0,1(1), βB +0,1(A), βB +0,1(C) +� +describe the relation between the intensity of the jump from the state 0 to 1 at the node B +to the intercept, states at nodes A and C, respectively. +Now what if Z = {ZA, ZB, ZC} was defined differently? The new function ZA can be +defined in 3 more different ways, for example ZA(b, c) = [1, I(b = 0), I(c = 1)]⊤. The same +applies to the functions ZB and ZC. Having defined Z = {ZA, ZC, ZC} we obtain the new +vector of the parameters β. Then for instance we have β +A +0,1 = +� +β +A +0,1(1), β +A +0,1(B), β +A +0,1(C) +� +and so on. Note that both sets Z and Z fully describe the state configuration of the net- +work and both βA +0,1 and β +A +0,1 correspond to the same dependencies as above. +In par- +ticular, it is easy to check that for instance βA +0,1(B) = βA +1,0(B) = 0 if and only if +β +A +0,1(B) = β +A +1,0(B) = 0. +Analogously as in Example 5.1, for w ∈ V, u ̸= w, and s, s′ ∈ {0, 1}, s ̸= s′ we +define the coordinate of the function Zw corresponding to the node u as an indicator of +its state equal to either 0 or 1. Moreover, we denote the coordinate of βw +s,s′ corresponding +to the node u by βw +s,s′(u). We interpret βw +s,s′(u) as the parameter describing dependence of +the intensity of the jump from the state s to s′ at the node w on the state at the node u. +Our goal is to find edges in a directed graph (V, E). We define the relation between +78 + +parameters and edges in (V, E) in the following way +βw +0,1(u) ̸= 0 or βw +1,0(u) ̸= 0 ⇔ the edge u → w exists, +(5.3) +which makes parameters compatible with the considered CTBNs. +Roughly speaking, +the node u is a parent of w means that the intensity of switching a state at w depends +on the state at u. Therefore, the problem of finding edges in the graph is reformulated as +the problem of the estimation of the parameter β. +Remark 5.2. As we mentioned previously in the example, the set Z fully describes the pa- +rents state configuration and the relation above does not depend on the choice of Z. More +precisely, assume we have two different properly defined Z and Z and the corresponding +vectors of parameters +β = +� +βw1⊤ +0,1 , βw1⊤ +1,0 , βw2⊤ +0,1 , βw2⊤ +1,0 , . . . , βwd⊤ +0,1 , βwd⊤ +1,0 +�⊤ +, +β = +� +β +w1⊤ +0,1 , β +w1⊤ +1,0 , β +w2⊤ +0,1 , β +w2⊤ +1,0 , . . . , β +wd⊤ +0,1 , β +wd⊤ +1,0 +�⊤ +. +Then the following is true +βw +0,1(u) = 0 ∧ βw +1,0(u) = 0 ⇔ β +w +0,1(u) = 0 ∧ β +w +1,0(u) = 0. +This means that no matter how we define our explanatory functions Zw, we will get +the same arrows in the underlying CTBN. +Remark 5.3. For simplicity, in the rest of the thesis, we omit the first coordinate βw +s,s′(1) in +the vector βw +s,s′ for all w, s ̸= s′, because it corresponds to the intercept and is not involved +in the recognition of the edges in the graph. The first coordinates of representations Zw(c) +are discarded as well. +Remark 5.4. The Markov equivalence/identifiability/non-uniqueness problem is challen- +ging for directed graphical models. +However, this problem does not appear here for +CTBNs. It is a consequence of our Assumption (5.2), which states that we restrict to +models having a conditional intensity in the GLM form. Moreover, under this assump- +tion β is uniquely determined. Moreover, this uniquely defined β determines uniquely +the structure of a graph by (5.3). In fact, our main result (Theorem 5.5 below) shows +consistency of the estimator of β, which is a much stronger property than identifiability. +Finally, in Assumption (5.2) we require that a conditional intensity of a variable is a linear +function of the states of its parents. This condition can be easily extended to a polynomial +dependence, so it can cover quite general dependence structure. +Our method is based on estimating the parameter β using the penalized likelihood +method. In the rest of the thesis the term β is reserved for the true value of the parameter. +Other quantities are denoted by θ. First, we consider a function +ℓ(θ) = 1 +T +� +w∈V +� +c∈X−w +� +s̸=s′ +� +−nw(c; s, s′)θw +s,s′⊤Zw(c) + tw(c; s) exp +� +θw +s,s′⊤Zw(c) +�� +, +(5.4) +79 + +where the third sum in (5.4) is over all s, s′ ∈ Xw such that s ̸= s′. Recall that nw(c; s, s′) +and tw(c; s) were introduced in Section 2.5 to denote the number of jumps from a state s +to s′ and the total time in the state s for the node w, respectively, while the parents +configuration equals to c. Notice that the function (5.4) is the negative log-likelihood. +Indeed, we just apply the negative logarithm to the density (2.8) combined with (2.9) and +(5.2), where pa(w) = −w for each w ∈ V. Then we divide it by T and omit the term +corresponding to the initial distribution ν, because ν does not depend on β. We define +an estimator of β as +ˆβ = argmin +θ∈R2d(d−1) {ℓ(θ) + λ∥θ∥1} , +(5.5) +where ∥θ∥1 = � +w∈V +� +s̸=s′ +� +u∈−w +|θw +s,s′(u)| is the l1-norm of θ. The tuning parameter λ > 0 +characterizes a balance between minimizing the negative log-likelihood and the penalty +function. As we have mentioned, the form of the penalty is crucial, because its singularity +at the origin implies that some coordinates of the minimizer ˆβ are exactly equal to 0, +if λ is sufficiently large. Thus, starting from the full graph we remove irrelevant edges and +estimate parameters for existing ones simultaneously. The function ℓ(θ) and the penalty +are convex functions, so (5.5) is a convex minimization problem, which is an important +fact from both practical and theoretical point of views. +At first glance, computing (5.5) seems to be computationally complex, because the num- +ber of summands in (5.4) is d2d. However, the number of nonzero terms of the form +nw(c; s, s′) and tw(c; s) is bounded by the total number of jumps, which grows linearly +with time T. Hence, most of summands in (5.4) are also zeroes and the minimizer (5.5) +can be calculated efficiently. +Before we state and prove main results of this chapter we introduce some additional +notation. First, for each w ∈ V we denote its parents indicated by the true parameter β as +Sw = +� +u ∈ −w : βw +0,1(u) ̸= 0 +or +βw +1,0(u) ̸= 0 +� +. +By S we denote the support of β, i.e. the set of nonzero coordinates of β. Moreover, βmin is +the smallest in the absolute value element of β restricted to S. +The set Sc denotes +the complement of S, that is the set of zero coordinates of β. Besides, for each w ∈ V we +define −Sw = V \ {Sw ∪ w} and denote ∆ = +max +s,s′∈X, s̸=s′ Q(s, s′). +Recall that for a vector a we denote its l∞-norm by ∥a∥∞ = maxk |ak|. For a subset I +the vector aI denotes a vector such that (aI)i = ai for i ∈ I and (aI)i = 0 otherwise. +Moreover, |I| denotes the number of elements of I. +Let π be the stationary distribution of the Markov jump process (MJP), which is +defined by an intensity matrix Q. The initial distribution of this process is denoted by ν +and we define ∥ν∥2 +2 = � +s∈X +ν2(s)/π(s). Moreover, ρ1 denotes the smallest positive eigenvalue +of the matrix −1/2(Q + Q∗), where Q∗ is an adjoint matrix of Q. +80 + +5.2 +Main results +In this subsection, we state two key results on the structure learning for CTBNs for +complete data. In the first one (Theorem 5.5) we show that the estimation error of the mi- +nimizer ˆβ given by (5.5) can be controlled with probability close to 1. In the second main +result (Corollary 5.6) we state that the thresholded version of (5.5) is able to recognize +the structure of the graph with high probability. +First, we introduce the cone invertibility factor (CIF), which plays an important role +in the theoretical analysis of the properties of LASSO estimators. Our goal is to show that +the estimator ˆβ is close to the true vector β. To accomplish this goal we show in Lemma 5.9 +that the gradient of the likelihood (5.4) evaluated at β is close to 0. However, this is not +sufficient since the likelihood function cannot be too “flat”. Namely, its curvature around +the local optimum needs to be relatively high, because we want to avoid the situation +when the loss difference can be small whereas the error is large. In the high-dimensional +scenario this is often provided by imposing the restricted strong convexity condition (RSC) +on (5.4), as in Negahban et al. (2009). CIF defined below in (5.6) plays a similar role +to RSC, but gives sharper consistency results (Ye and Zhang, 2010). Therefore, it is +used here. CIF is defined analogously to Ye and Zhang (2010); Huang and Zhang (2012); +Huang et al. (2013) and is closely related to the compatibility factor (van de Geer, 2008) +or the restricted eigenvalue condition (Bickel et al., 2009). Recall that in the previous +chapter we also used a version of CIF for Bayesian networks. Thus, for any ξ > 1 we +define the cone C(ξ, S) = {θ : ∥θSc∥1 ≤ ξ∥θS∥1}, where the set S denotes the support of β +as mentioned above. Then CIF is defined as +¯F(ξ) = +inf +0̸=θ∈C(ξ,S) +θ⊤∇2ℓ(β)θ +∥θS∥1∥θ∥∞ +. +(5.6) +Notice that only the value of the Hessian ∇2ℓ(θ) at the true parameter β is taken into +consideration in (5.6). The main difficulty with CIF in our case is that it is a minimum of +the sum of random terms, which number grows exponentially in d. To be able to control +this quantity, we bound CIF from below by its deterministic counterpart with much fewer +summands. Namely, in Lemma 5.11 we prove that ¯F(ξ) is bounded from below by the +product of ζ0 given in Theorem 5.5 and +F(ξ) = +inf +0̸=θ∈C(ξ,S) +� +w∈V +� +s′̸=s +� +cSw∈XSw +exp +� +βw⊤ +s,s′Zw(cSw, 0) +� � +θw⊤ +s,s′Zw(cSw, 0) +�2 +∥θS∥1∥θ∥∞ +(5.7) +with probability close to 1. Here we divided each parent configuration c = (cSw, c−Sw) +into two parts: the first one corresponds to the true parents nodes and the second part +corresponds to the remaining nodes. Below we will also use a similar notation for any state +of the network s ∈ X defining it as a triple s = (cSw, c−Sw; s) of the state of true parents +of the node w, the configuration for the nodes from S−w and the state in the node w. +Note, that we restricted the summation in (5.7) only to cSw ∈ XSw by taking c−Sw = 0. +This allows us to derive the lower bound on ¯F(ξ) without considering exponentially many +81 + +random summands. Our argumentation will be also valid in the case, when we choose +some nonzero values in c−Sw, unless this values depend on w and cSw. Next, we state two +main results of this chapter. +Theorem 5.5. Fix arbitrary ε ∈ (0, 1) and ξ > 1. Suppose that +T > +36 +�� +max +w∈V |Sw| + 1 +� +log 2 + log (d∥ν∥2/ε) +� +ρ1 +min +w∈V, s∈Xw +cSw∈XSw +π2(cSw, 0; s) +. +(5.8) +We also assume that T∆ ≥ 2 and we choose λ such that +2ξ + 1 +ξ − 1 log(K/ε) +� +∆ +T ≤ λ ≤ +2ζ0F(ξ) +e(ξ + 1)|S|, +(5.9) +where K = 2(2 + e2)d(d − 1) and +ζ0 = +min +w∈V, s∈Xw +cSw∈XSw +π(cSw, 0; s)/2. +Then with probability at least 1 − 2ε we have +∥ˆβ − β∥∞ ≤ +2eξλ +(ξ + 1)ζ0F(ξ). +(5.10) +Now consider the Thresholded LASSO estimator with the set of nonzero coordinates ˆS. +The set ˆS contains only those coefficients of the LASSO estimator (5.5), which are larger +in the absolute value than a pre-specified threshold δ. +Corollary 5.6. Suppose that assumptions of Theorem 5.5 are satisfied and let R denote +the right-hand side of the inequality (5.10). If R < βmin/2, then for δ ∈ [R, βmin/2) we +have P +� +ˆS = S +� +≥ 1 − 2ε. +These two results will be proven in the next section and here we give some comments +on their meaning and significance. +The above two results describe the properties of +the proposed estimator (5.5) in recognizing the structure of the graph. Theorem 5.5 gives +conditions under which the estimation error of (5.5) can be controlled. Namely, let us +for a moment ignore constants, ∆ and parameters of MJP such as ν, π, ρ1, ζ0, etc. in +the assumptions. By condition (5.9), if +T ≥ log2(d/ε)|S|2 +F 2(ξ) +, +(5.11) +then the estimation error is small. This forms some restrictions on the number of vertices +in the graph, sparsity of the graph (i.e. the number of edges has to be small enough) and +the expression (5.7), which is discussed in Lemma 5.7 (below). The condition (5.11) is +similar to standard results for LASSO estimators in Ye and Zhang (2010); Bühlmann and +van de Geer (2011); Huang and Zhang (2012); Huang et al. (2013). The only difference is +82 + +that the right-hand side of (5.11) usually depends linearly on log(d/ε), but here we have +log2(d/ε). The square in the logarithm could be omitted, if we imposed some additional +restrictions on observation time T in the crucial auxiliary result (Lemma 5.9), where we +use the Bernstein-type inequality for the Poisson random variable. Obviously, it would +reduce the applicability of the main result. In our opinion, the gain (having log(d/ε) +instead of log2(d/ε)) is “smaller” than the price (additional assumptions), so we do not +focus on it. +The next assumption in Theorem 5.5 that T∆ ≥ 2 is quite natural since observation +time has to increase when the maximal intensity of transitions decreases. +Moreover, +the conditions (5.8) and (5.9) depend also on parameters of MJP. More precisely, they +depend on the stationary distribution π and the spectral gap ρ1, which in general decrease +exponentially with d. However, in some specific cases, it can be proved that they decrease +polynomially. +Corollary 5.6 states that the LASSO estimator after thresholding is able to recognize +the structure of a graph with probability close to 1, if the nonzero coefficients of β are +not too close to zero and the threshold δ is appropriately chosen. However, Corollary 5.6 +does not give a way of choosing the threshold δ, because both endpoints of the inter- +val [R, βmin/2] are unknown. It is not a surprising fact and has been already observed, +for instance, in linear models (Ye and Zhang, 2010, Theorem 8). In the experimental +subsection of this chapter we propose a method of choosing a threshold that relies on +information criteria. A similar procedure can be found in Pokarowski and Mielniczuk +(2015); Miasojedow and Rejchel (2018). +Now we state a lower bound for (5.7) which has an intuitive interpretation. +Lemma 5.7. Define Aβ = � +w∈V +� +s′̸=s +� +u:βw +s,s′(u)̸=0 +exp +� +−βw +s,s′(u) +� +. Then for every ξ > 1 we +have F(ξ) ≥ (ξAβ)−1. +Notice that the term Aβ is larger, and in turn F(ξ) is smaller, when negative coefficients +of β „dominate” in the absolute value the positive ones. Note that, the more these negative +coefficients dominate the more our process „gets stuck”, i.e. tends to stay in the same state +because intensities in this case tend to be close to zero (recall (5.2)). Such behaviour in +the context of MJPs is natural, because multiplying the intensity matrix Q by a constant κ +is equivalent to considering time T/κ instead of T. While F(ξ) appears in the lower +bound (5.11) on T, such dependence on β is expected. +5.3 +Proofs of the main results +This subsection contains the proofs of all the statements made in the previous subsec- +tion. The proofs of the theorem and the corollary are based on a number of auxiliary +results. Some of these results are well-known facts for LASSO estimators and some of +them are new (Lemmas 5.9 and 5.11). The main novelty and difficulty of the considered +model is the continuous time nature of the observed phenomena which we investigate. +83 + +In Lemma 5.9 we derive the new concentration inequality for MJPs based on the mar- +tingale theory. In Lemma 5.11 we give new upper bounds on the occupation time for +MJPs. +In the proofs of subsequent results we use the first and second derivatives of ℓ given +by (5.4), which can be also expressed in the following form +ℓ(θ) = 1 +T +� +w∈V +� +s̸=s′ +ℓw +s,s′(θw +s,s′), +(5.12) +where +ℓw +s,s′(θw +s,s′) = +� +c∈X−w +� +−nw(c; s, s′)θw +s,s′⊤Zw(c) + tw(c; s) exp(θw +s,s′⊤Zw(c)) +� +. +Therefore, we can calculate partial derivatives +∇ℓw +s,s′(θw +s,s′) = +� +c∈X−w +[−nw(c; s, s′) + tw(c; s) exp(θw +s,s′⊤Zw(c))]Zw(c). +(5.13) +By Remark 5.3 the matrix θ of all parameters has 2d rows and (d − 1) columns. It can +be also considered as a 2d(d − 1)-dimensional vector +θ = +� +θw1⊤ +0,1 , θw1⊤ +1,0 , θw2⊤ +0,1 , θw2⊤ +1,0 , . . . , θwd⊤ +0,1 , θwd⊤ +1,0 +�⊤ +, +where (w1, w2, . . . , wd) is a fixed order of the nodes of the graph. Using this order we +obtain the following representation of the gradient of ℓ. +∇ℓ(θ) = 1 +T +� +∇ℓw +s,s′(θw +s,s′) +� +w∈V,s̸=s′ . +(5.14) +Analogously we calculate second derivatives +∇2ℓw +s,s′(θw +s,s′) = +� +c∈X−w +tw(c; s) exp(θw +s,s′⊤Zw(c))Zw(c)Zw(c)⊤. +The second derivative of ℓ(θ) consists of matrices +1 +T ∇2ℓw +s,s′(θw +s,s′) along its diagonal and +zeroes elsewhere. Moreover, for any vector θ ∈ R2d(d−1) and the true parameter vector β +we have +θ⊤∇2ℓ(β)θ = 1 +T +� +w∈V +� +c∈X−w +� +s′̸=s +tw(c; s) +� +θw +s,s′⊤Zw(c) +�2 exp(βw +s,s′⊤Zw(c)). +(5.15) +Next we provide an auxiliary proposition needed to prove an important concentration +inequality for MJPs in Lemma 5.9. +Proposition 5.8. Let X(τ) be a Markov jump process with a bounded intensity matrix Q. +Let +nτ +s,s′ = +� +c∈X−w : Zw(c)[k]=1 +nτ +w(c; s, s′) +84 + +be a number of jumps from s to s′ on the interval [0, τ] and tτ +s be an occupation time at +state s on the interval [0, τ]. Then +Mν(τ) = nτ +s,s′ − tτ +sQ(s, s′) +is a martingale with respect to the natural filtration Fτ. The notation Mν(τ) means that +the distribution at time 0 is ν. +Proof. For any u < τ we have +E(Mν(τ) | Fu) = Mν(u) + E(Mν(τ) − Mν(u) | Fu) = += Mν(u) + E(Mν(τ) − Mν(u) | X(u)) += Mν(u) + E(MX(u)(τ − u) | X(u)), +where the last equality is the consequence of Proposition 20.3 from Bass (2011). Now it +is enough to show that for all τ > 0 and all initial measures ν we have EMν(τ) = 0, since +it implies that E (E(Mν(τ) | Fu)) = 0 for u < τ. +For any n ∈ N define the sequence ki = ki(n) = +τi +n for all i = 0, . . . , n. +Since +the trajectory of the process is càdlàg, we have +EMν(τ) = E lim +n→∞ +n +� +i=1 +� +I (X(ki−1) = s, X(ki) = s′) − τ +nQ(s, s′)I (X(ki−1) = s) +� +. +We observe that for all n ∈ N +����� +n +� +i=1 +� +I (X(ki−1) = s, X(ki) = s′) − τ +nQ(s, s′)I (X(ki−1) = s) +������ ≤ N(τ) + τ, +(5.16) +where N(τ) is the total number of jumps. Since N(τ) is a Poisson process with a bounded +intensity, the right-hand side of (5.16) is integrable and by the dominated convergence +theorem and the definition of Q we get +EMν(τ) = lim +n→∞ E +n +� +i=1 +� +I (X(ki−1) = s, X(ki) = s′) − τ +nQ(s, s′)I (X(ki−1) = s) +� += lim +n→∞ E +n +� +i=1 +� +E (I (X(ki−1) = s, X(ki) = s′) | X(ki−1)) − τ +nQ(s, s′)I (X(ki−1) = s) +� +. +Next for s ̸= s′ we have +P (X(ki−1) = s, X(ki) = s′ | X(ki−1) = s) = += P (X(ki) = s′ | X(ki−1) = s) = Q(s, s′) +n ++ o (1/n) +and for σ ̸= s +P (X(ki−1) = s, X(ki) = s′ | X(ki−1) = σ) = 0. +Hence, +E [I (X(ki−1) = s, X(ki) = s′) | X(ki−1)] = +�τ +nQ(s, s′) + o(1/n) +� +I (X(ki−1) = s) . +85 + +Therefore, we further obtain +EMν(τ) = lim +n→∞ E +n +� +i=1 +��τ +nQ(s, s′) + o(1/n) +� +I (X(ki−1) = s) − τ +nQ(s, s′)I(X(ki−1) = s) +� += lim +n→∞ E +n +� +i=1 +o(1/n)I(X(ki−1) = s) = 0, +where o(1/n) does not depend on i. +Lemma 5.9. Let ε ∈ (0, 1) and ξ > 1 be arbitrary. Assume that T∆ ≥ 2 and +λ ≥ 2ξ + 1 +ξ − 1 log(K/ε) +� +∆ +T , +where K = 2(2 + e2)d(d − 1). Then we have +P +� +∥∇ℓ(β)∥∞ ≤ ξ − 1 +ξ + 1λ +� +≥ 1 − ε. +Proof. Note that by (5.2), (5.13) and (5.14) we have the following inequality +∥∇ℓ(β)∥∞ ≤ 1 +T +max +w∈V,s̸=s′,1≤k≤d−1 +������ +� +c∈X−w : Zw(c)[k]=1 +[nw(c; s, s′) − tw(c; s)Qw(c; s, s′)] +������ +, +where Zw(c)[k] is the k-th coordinate of Zw(c) for each w ∈ V, c ∈ X−w. The core step of +the proof is to show that for fixed w ∈ V, s ̸= s′, 1 ≤ k ≤ d − 1 and η = 2 log (K/ε) +P +� +� +������ +� +c∈X−w : Zw(c)[k]=1 +[nw(c; s, s′) − tw(c; s)Qw(c; s, s′)] +������ +> η +√ +T∆ +� +� ≤ (2 + e2) exp +� +−η +2 +� +. +(5.17) +Having (5.17) we finish the proof of Lemma 5.9 using union bounds. More precisely, +P +� +∥∇ℓ(β)∥∞ > ξ − 1 +ξ + 1λ +� +≤ +≤ P +� +� 1 +T +max +w∈V,s̸=s′,1≤k≤d−1 +������ +� +c∈X−w : Zw(c)[k]=1 +[nw(c; s, s′) − tw(c; s)Qw(c; s, s′)] +������ +> η +� +∆ +T +� +� ≤ +≤ 2d(d − 1)P +� +� +������ +� +c∈X−w : Zw(c)[k]=1 +[nw(c; s, s′) − tw(c; s)Qw(c; s, s′)] +������ +> η +√ +T∆ +� +� ≤ +≤ 2d(d − 1)(2 + e2) exp(− log(K/ε)) = ε. +Therefore, we focus on proving (5.17). The proof of this inequality is based on the mar- +tingale arguments, so we make the dependence on the time explicit in (5.17), that is +nw(c; s, s′) and tw(c; s) become nT +w(c; s, s′) and tT +w(c; s), respectively. +For τ ∈ [0, T] we define a process +M(τ) = +� +c∈X−w : Zw(c)[k]=1 +[nτ +w(c; s, s′) − tτ +w(c; s)Qw(c; s, s′)] . +(5.18) +86 + +We use the upper index τ in nτ +w(c; s, s′) and tτ +w(c; s) to indicate that these quantities +correspond to the time interval [0, τ]. Using Proposition 5.8 to each summand in (5.18) +we obtain that the process {M(τ) : τ ∈ [0, T]} is a martingale. Let us define its jumps by +∆M(τ) = M(τ) − M(τ−) = +� +c∈X−w : Zw(c)[k]=1 +I [X(τ−) = (s, c), X(τ) = (s′, c)] , +where M(τ−) is the left limit at τ. By Theorem II.37 of Protter (2005) and Theorem +I.4.61 of Jacod and Shiryaev (2003) for any x > −1 the process +Ex(τ) += +exp (xM(τ)) +� +u≤τ +(1 + x∆M(u)) exp(−x∆M(u)) += +exp +� +xM(τ) − (x − log(1 + x))nτ +s,s′ +� +is a local martingale, where nτ +s,s′ = � +c∈X−w : Zw(c)[k]=1 nτ +w(c; s, s′) is computed for a trajec- +tory at the time interval [0, τ]. Therefore, by Markov inequality together with the triangle +inequality we get for any x ∈ (0, 1] +P(|M(T)| > L) +≤ +P(|xM(T) − (x − log(1 + x))nT +s,s′| > xL/2) + ++ +P((x − log(1 + x))nT +s,s′ > xL/2) ≤ +≤ +2 exp +�−xL +2 +� ++ P((x − log(1 + x))nT +s,s′ > xL/2) . +We observe that nT +s,s′ is bounded from above by the total number of jumps up to time T, +which in turn is bounded by a Poisson random variable N(T) with the intensity T∆. +Hence, again by Markov inequality we have +P((x − log(1 + x))nT +s,s′ > xL/2) ≤ exp +�−xL +2 ++ T∆ +� ex +1 + x − 1 +�� +. +Applying an inequality ex ≤ 1/(1 − x) for x < 1 and setting x = 1/ +√ +T∆ we get +P((x − log(1 + x))nT +s,s′ > xL/2) ≤ exp +� +−L +2 +√ +T∆ ++ +T∆ +T∆ − 1 +� +. +We use T∆ ≥ 2 and we plug in L = η +√ +T∆ to conclude the proof. +The next lemma is a direct application of Theorem 3.4 of Lezaud (1998) and it will +be used in the second crucial auxiliary Lemma 5.11. +Lemma 5.10. For any w ∈ V, s ∈ Xw, cSw ∈ XSw we have +P +� 1 +T tw(cSw, 0; s) ≤ π(cSw, 0; s)/2 +� +≤ ∥ν∥2 exp +� +− π2(cSw, 0; s)ρ1T +16 + 20π(cSw, 0; s) +� +. +Proof. Fix w ∈ V, s ∈ Xw, cSw ∈ XSw. By the definition we have +tw(cSw, 0; s) = +� T +0 +I [X(t) = (cSw, 0; s)] dt. +Let us define f(X(t)) = π(cSw, 0; s) − I (X(t) = (cSw, 0, s)). Taking γ = π(cSw, 0; s)/2 in +Theorem 3.4 of Lezaud (1998), we conclude the proof. +87 + +Lemma 5.11. Let ε ∈ (0, 1), ξ > 1 be arbitrary. Suppose that F(ξ) defined in (5.7) is +positive and +T > +36 +�� +max +w∈V |Sw| + 1 +� +log 2 + log (d∥ν∥2/ε) +� +ρ1 +min +w∈V, s∈Xw +cSw∈XSw +π2(cSw, 0; s) +, +(5.19) +then +P +� ¯F(ξ) ≥ ζ0F(ξ) +� +≥ 1 − ε, +where ζ0 = +min +w∈V, s∈Xw +cSw∈XSw +π(cSw, 0; s)/2. +Proof. By the definition of ¯F(ξ), the equation (5.7) and the formula for Hessian of ℓ (see +(5.15)) we have +¯F(ξ) +F(ξ) ≥ 1 +T +min +w∈V,s,cSw∈XSw +tw(cSw, 0; s). +(5.20) +We complete the proof by bounding the right-hand side of (5.20) from below. First, we +can calculate that +P +� +min +w∈V,s∈Xw,cSw∈XSw +1 +T tw(cSw, 0; s) ≥ ζ0 +� +≥ +≥ P +� +∀w∈V,s∈Xw,cSw∈XSw +1 +T tw(cSw, 0; s) ≥ π(cSw, 0; s)/2 +� +≥ +≥ 1 − 2d +max +w∈V,s∈Xw,cSw∈XSw +2|Sw|P +� 1 +T tw(cSw, 0; s) < π(cSw, 0; s)/2 +� +. +(5.21) +Using Lemma 5.10 we bound (5.21) from below by +1 − 2d +max +w∈V,s∈Xw,cSw∈XSw +2|Sw|∥ν∥2 exp +� +− π2(cSw, 0; s)ρ1T +16 + 20π(cSw, 0; s) +� +. +Applying (5.19) we conclude the proof. +Next we state and prove three lemmas, where Lemmas 5.12 and 5.14 will be used in +the proof of the main result Theorem 5.5 and Lemma 5.13 is needed to prove Lemma 5.14. +Lemma 5.12. Let ˜β = ˆβ − β, z∗ = ∥∇ℓ(β)∥∞. Then +(λ − z∗)∥˜βSc∥1 ≤ ˜β⊤ � +∇ℓ(ˆβ) − ∇ℓ(β) +� ++ (λ − z∗)∥˜βSc∥1 ≤ (λ + z∗)∥˜βS∥1 . +(5.22) +Besides, for arbitrary ξ > 1 on the event +Ω1 = +� +∥∇ℓ(β)∥∞ ≤ ξ − 1 +ξ + 1λ +� +the random vector ˜β belongs to the cone C(ξ, S). +The proof of Lemma 5.12 is similar to the proof of Lemma 3.1 of Huang et al. (2013) +and is based on convexity of ℓ(θ) and properties of the LASSO penalty. For convenience +of the reader we will provide it here using our notation. +88 + +Proof. Since ℓ(θ) is a convex function, then +˜β⊤ � +∇ℓ(ˆβ) − ∇ℓ(β) +� += ˜β⊤ � +∇ℓ(β + ˜β) − ∇ℓ(β) +� +≥ 0, +which instantly proves the left-hand side inequality in (5.22). As we have already men- +tioned, the minimized target function, given in (5.5), is also convex because of the convex- +ity of the negative log-likelihood ℓ(θ) and ℓ1-penalty functions. Hence ˆβ will be a minimizer +in (5.5) if and only if the following conditions are met +� +� +� +� +� +� +� +� +� +∂ℓ(ˆβ) +∂βj += −λ sgn(βj), +if ˆβj ̸= 0, +����� +∂ℓ(ˆβ) +∂βj +����� ≤ λ, +if ˆβj = 0, +(5.23) +where j ∈ {1, . . . , 2d(d − 1)}. First, we can write +˜β⊤ � +∇ℓ(β + ˜β) − ∇ℓ(β) +� += +� +j∈Sc +˜βj +∂ℓ(β + ˜β) +∂βj ++ +� +j∈S +˜βj +∂ℓ(β + ˜β) +∂βj ++ ˜β⊤(−∇ℓ(β)). +Since ˜βj = ˆβj for j ∈ Sc, then applying the conditions (5.23) we can bound the last +expression from above by +� +j∈Sc +ˆβj(−λ sgn(ˆβj)) + +� +j∈S +|˜βj|λ + ∥˜β∥1z∗ = +� +j∈Sc +−λ|˜βj| + ∥˜βS∥1λ + z∗∥˜βS∥1 + z∗∥˜βSc∥1. +This in turn equals to (z∗ − λ)∥˜βSc∥1 + (λ + z∗)∥˜βS∥1 meaning that the right-hand side +inequality holds as well. Note that we used the fact that ∂ℓ(ˆβ) +∂βj += −λ sgn(βj) only on +the set Sc ∩ {j : ˆβj ̸= 0}, because ˜βj = ˆβj − βj = 0, when j ∈ Sc and ˆβj = 0. Finally, +by (5.22) and the definition of Ω1 we obtain +∥˜βSc∥1 ≤ λ + z∗ +λ − z∗∥˜βS∥1 ≤ ∥˜βS∥1 +proving the last claim of the lemma. +Lemma 5.13. For any b ∈ R2d(d−1) we define cb = +max +w∈V, s̸=s′, c∈X−w exp +���bw +s,s′⊤Zw(c) +��� +. +Then we have +c−1 +b b⊤∇2ℓ(β)b ≤ b⊤[∇ℓ(β + b) − ∇ℓ(β)] ≤ cbb⊤∇2ℓ(β)b +(5.24) +and +c−1 +b ∇2ℓ(β) ≤ ∇2ℓ(β + b) ≤ cb∇2ℓ(β), +(5.25) +where for two symmetric matrices A, B the expression A ≤ B means that B −A is a non- +negative definite matrix. +89 + +Proof. First we prove the inequality (5.24). By (5.14) we have +b⊤[∇ℓ(β + b) − ∇ℓ(β)] = += 1 +T +� +w∈V +� +c∈X−w +� +s′̸=s +tw(c; s) +� +bw +s,s′⊤Zw(c) +� +exp(βw +s,s′⊤Zw(c)) +� +exp(bw +s,s′⊤Zw(c)) − 1 +� +. +(5.26) +Moreover, as in (5.15), we have +b⊤∇2ℓ(β)b = 1 +T +� +w∈V +� +c∈X−w +� +s′̸=s +tw(c; s) +� +bw +s,s′⊤Zw(c) +�2 exp(βw +s,s′⊤Zw(c)). +(5.27) +Let us consider an arbitrary summand in (5.26) and the corresponding one in (5.27). We +can focus only on cases where tw(c; s) > 0 and bw +s,s′⊤Zw(c) ̸= 0. From the mean value +theorem we obtain for all non-zero x ∈ R +e−|x| ≤ ex − 1 +x +≤ e|x|. +(5.28) +So using (5.28) we can write +exp(−|bw +s,s′⊤Zw(c)|) ≤ exp(bw +s,s′⊤Zw(c)) − 1 +bw +s,s′⊤Zw(c) +≤ exp(|bw +s,s′⊤Zw(c)|). +Finally, we multiply each side by the expression tw(c; s)(bw +s,s′⊤Zw(c))2 exp(βw +s,s′⊤Zw(c)) to +conclude the proof of (5.24). +Similarly we can prove (5.25). +Finally, for any vector +x ∈ R2d(d−1) we have +x⊤∇2ℓ(β)x = 1 +T +� +w∈V +� +c∈X−w +� +s′̸=s +tw(c; s) +� +xw +s,s′⊤Zw(c) +�2 exp(βw +s,s′⊤Zw(c)) +and +x⊤∇2ℓ(β + b)x = 1 +T +� +w∈V +� +c∈X−w +� +s′̸=s +tw(c; s) +� +xw +s,s′⊤Zw(c) +�2 exp(βw +s,s′⊤Zw(c)) exp(bw +s,s′⊤Zw(c)). +Comparing each summand separately and taking into account the definition of cb and +the inequality (5.28) we finish the proof. +Lemma 5.14. Let ξ > 1 be arbitrary and assume that ¯F(ξ) > 0. Moreover, let us denote +τ = (ξ + 1)|S|λ +2 ¯F(ξ) +and the event Ω2 = {τ < e−1} . Then Ω1 ∩ Ω2 ⊂ A, where +A = +� +∥ˆβ − β∥∞ ≤ +2ξeηλ +(ξ + 1) ¯F(ξ) +� +and η < 1 is the smaller solution of the equation ηe−η = τ. +Proof. The proof is similar to Theorem 3.1 of Huang et al. (2013) or Lemma 6 of Miaso- +jedow and Rejchel (2018). Suppose we are on the event Ω1 ∩ Ω2. Denote again ˜β = ˆβ − β, +so by the previous lemma we have θ = +˜β +∥˜β∥1 +∈ C(ξ, S). Let us consider the function +g(t) = θ⊤∇ℓ(β + tθ) − θ⊤∇ℓ(β), +t ≥ 0. +90 + +This function is non-decreasing, because the negative log-likelihood function is convex. +Hence, for every t ∈ (0, ∥˜β∥1) we have g(t) ≤ g(∥˜β∥1). By Lemma 5.12 on the event Ω1 +we obtain +θ⊤[∇ℓ(β + tθ) − ∇ℓ(β)] + +2λ +ξ + 1∥θSc∥1 ≤ 2λξ +ξ + 1∥θS∥1. +(5.29) +Using Lemma 5.13 with b = tθ and (5.24) we obtain +tθ⊤[∇ℓ(β + tθ) − ∇ℓ(β)] ≥ t2 exp(−t)θ⊤∇2ℓ(β)θ, +(5.30) +in this case cb = ctθ ≤ exp(t). Now using the definition of CIF ¯F(ξ), the fact that θ +belongs to the cone C(ξ, S) and applying the bounds (5.29), (5.30) we get +t exp(−t) +¯F(ξ)∥θS∥2 +1 +|S| +≤ t exp(−t)θ⊤∇2ℓ(β)θ ≤ θ⊤[∇ℓ(β + tθ) − ∇ℓ(β)] ≤ +≤ 2λξ +ξ + 1∥θS∥1 − +2λ +ξ + 1∥θSc∥1 = += 2λ∥θS∥1 − +2λ +ξ + 1 ≤ λ(ξ + 1)∥θS∥2 +1/2. +This means that for any t satisfying (5.29) we have +t exp(−t) ≤ (ξ + 1)|S|λ +2 ¯F(ξ) += τ. +(5.31) +Since, as we mentioned, the function g(t) is non-decreasing, the set of all non-negative t +satisfying (5.29) is a closed interval [0, ˜t] for some ˜t > 0. Hence, (5.31) implies ˜t ≤ η, +where η is the smallest solution of the equation ηeη = τ. Now from (5.29) and (5.30) we +obtain +∥˜β∥1e−η ≤ ˜te−˜t ≤ +˜t exp(−˜t)θ⊤∇2ℓ(β)θ +¯F(ξ)∥θT∥1∥θ∥∞ +≤ θ⊤[∇ℓ(β + ˜tθ) − ∇ℓ(β)] +¯F(ξ)∥θT∥1∥θ∥∞ +≤ +≤ +2λξ +(ξ + 1) ¯F(ξ)∥θ∥∞ += +2λξ∥˜β∥1 +(ξ + 1) ¯F(ξ)∥˜β∥∞ +, +which finishes the proof. +Proof of Theorem 5.5. Fix arbitrary ε > 0 and ξ > 1. Then F(ξ) is positive by Lemma 5.7. +Thus from Lemma 5.11 we know that P +� ¯F(ξ) ≥ ζ0F(ξ) +� +≥ 1 − ε. Using it with the right- +hand side of (5.9) we obtain that P(Ω2) ≥ 1 − ε. Moreover, from Lemma 5.9 we have +that P(Ω1) ≥ 1 − ε. Therefore, Lemmas 5.12 and 5.14 (with η = 1 for simplicity) imply +the inequality +P +� +∥ˆβ − β∥∞ ≤ +2ξeλ +(ξ + 1) ¯F(ξ) +� +≥ 1 − 2ε. +Finally, we bound ¯F(ξ) from below by ζ0F(ξ). +Proof of Corollary 5.6. The proof is a simple consequence of the uniform bound (5.10) +obtained in Theorem 5.5. Indeed, for arbitrary w ∈ V, s ̸= s′ and j-th coordinate of +the vector βw +s,s′ such that βw +s,s′(j) = 0 we obtain +|ˆβw +s,s′(j)| = |ˆβw +s,s′(j) − βw +s,s′(j)| ≤ ∥ˆβ − β∥∞ ≤ δ. +91 + +Analogously, for each w ∈ V, s ̸= s′ and j-th coordinate such that βw +s,s′(j) ̸= 0 we have +|ˆβw +s,s′(j)| ≥ |βw +s,s′(j)| − |ˆβw +s,s′(j) − βw +s,s′(j)| ≥ βmin − ∥ˆβ − β∥∞ > 2δ − R ≥ δ, +which concludes the proof. +Proof of Lemma 5.7. Fix ξ > 1. For each w and cSw we have Zw(cSw, 0) = (cSw, 0), so +F(ξ) = +inf +0̸=θ∈C(ξ,S) +� +w∈V +� +s′̸=s +� +cSw∈XSw +exp +� +(βw +s,s′)⊤ +SwcSw +� � +(θw +s,s′)⊤ +SwcSw +�2 +∥θS∥1∥θ∥∞ +, +where +� +βw +s,s′ +� +Sw and +� +θw +s,s′ +� +Sw are restrictions of βw +s,s′ and θw +s,s′ to coordinates from Sw, +respectively. Therefore, we need to bound from below the expression +� +w∈V +� +s′̸=s +� +cSw∈XSw +exp +� +(βw +s,s′)⊤ +SwcSw +� � +(θw +s,s′)⊤ +SwcSw +�2 +∥θS∥1∥θ∥∞ +(5.32) +for each θ ∈ C(ξ, S) and θ ̸= 0. First, we restrict the third sum in the numerator of (5.32) +to the summands corresponding only to vectors eu ∈ XSw having 1 on the coordinate +corresponding to the node u ∈ Sw and 0 elsewhere. +Then we reduce the numerator +of (5.32) to the following form +� +w∈V +� +s′̸=s +� +u∈Sw +exp +� +βw +s,s′(u) +� � +θw +s,s′(u) +�2 . +(5.33) +Recall that Sw = +� +u ∈ −w : βw +0,1(u) ̸= 0 or βw +1,0(u) ̸= 0 +� +. Therefore, if βw +s,s′(u) ̸= 0, then +u ∈ Sw, so the sum (5.33) can be bounded from below by +� +w∈V +� +s′̸=s +� +u:βw +s,s′(u)̸=0 +exp +� +βw +s,s′(u) +� � +θw +s,s′(u) +�2 , +(5.34) +because (5.33) has more summands and the summands are nonnegative. Using reverse +Hölder’s inequality we replace (5.34) by +A−1 +β +� +�� +w∈V +� +s′̸=s +� +u:βw +s,s′(u)̸=0 +|θw +s,s′(u)| +� +� +2 +, +(5.35) +where Aβ is +Aβ = +� +w∈V +� +s′̸=s +� +u:βw +s,s′(u)̸=0 +exp +� +−βw +s,s′(u) +� +. +Next, recall that S is the set of nonzero coordinates of β, so (5.35) is just ∥θS∥2 +1/Aβ. +Summarizing, the sum (5.32) is bounded from below by +∥θS∥1 +Aβ∥θ∥∞ +(5.36) +for each θ ∈ C(ξ, S) and θ ̸= 0. The vector θ belongs to the cone C(ξ, S), which implies +that ∥θSc∥∞ ≤ ∥θSc∥1 ≤ ξ∥θS∥1 and +∥θ∥∞ = max(∥θS∥∞, ∥θSc∥∞) ≤ max(∥θS∥1, ξ∥θS∥1), +which gives us ∥θ∥∞ ≤ ξ∥θS∥1. Applying it in (5.36), we finish the proof. +92 + +5.4 +Numerical examples +In this section we describe the details of algorithm implementation as well as the results +of experimental studies. +5.4.1 +Details of implementation +We provide in details practical implementation of the proposed algorithm. The solution +of (5.5) depends on the choice of λ. Finding the „optimal” parameter λ and the threshold δ +is difficult in practice. Here we solve it using the information criteria (Xue et al., 2012; +Pokarowski and Mielniczuk, 2015; Miasojedow and Rejchel, 2018). +First, using (5.12) we write the minimized function in (5.5) as the sum +ℓ(θ) − λ∥θ∥1 = +� +w∈V +� +s̸=s′ +� +1 +T ℓw +s,s′(θw +s,s′) − λ +� +u∈−w +|θw +s,s′| +� +, +where s, s′ ∈ {0, 1}. Therefore, for fixed w ∈ V and s, s′ ∈ {0, 1} with s ̸= s′, the cor- +responding summand is a function which depends on the vector θ restricted only to its +coordinate vector θw +s,s′ (see notation (5.1)). +So, for each triple w and s ̸= s′ we can +solve the problem separately. In our implementation we use the following scheme. We +start with computing a sequence of minimizers on the grid, i.e. for any triple w ∈ V, +s ̸= s′ we create a finite sequence {λi}N +i=1 uniformly spaced on the log scale, starting from +the largest λi, which corresponds to the empty model. Next, for each value λi we compute +the estimator ˆβw +s,s′[i] of the vector βw +s,s′ +ˆβw +s,s′[i] = argmin +θw +s,s′ +� +ℓw +s,s′(θw +s,s′) + λi∥θw +s,s′∥1 +� +, +(5.37) +where as in (5.12) +ℓw +s,s′(θw +s,s′) = 1 +T +� +c∈X−w +� +−nw(c; s, s′)θw +s,s′⊤Zw(c) + tw(c; s) exp +� +θw +s,s′⊤Zw(c) +�� +. +The notation ˆβw +s,s′[i] should not be confused with βw +s,s′(u) introduced before. +Namely, +ˆβw +s,s′[i] is the i-th approximation of βw +s,s′, while βw +s,s′(u) is the coordinate of βw +s,s′ corres- +ponding to the node u. To solve (5.37) numerically for a given λi we use the FISTA +algorithm with backtracking from Beck and Teboulle (2009). The final LASSO estimator +ˆβw +s,s′ := ˆβw +s,s′[i∗] is chosen using the Bayesian Information Criterion (BIC), which is a po- +pular method of choosing the value of λ in the literature (Xue et al., 2012; Miasojedow +and Rejchel, 2018), i.e. +i∗ = argmin +1≤i≤100 +� +nℓw +s,s′(ˆβw +s,s′[i]) + log(n)∥ˆβw +s,s′[i]∥0 +� +. +Here ∥ˆβw +s,s′[i]∥0 denotes the number of non-zero elements of ˆβw +s,s′[i] and n is the number of +observed jumps of the process. In our simulations we use N = 100. +93 + +Finally, the threshold δ is obtained using the Generalized Information Criterion (GIC). +The similar way of choosing a threshold was used previously in Pokarowski and Mielniczuk +(2015); Miasojedow and Rejchel (2018). For a prespecified sequence of thresholds D we +calculate +δ∗ = argmin +δ∈D +� +nℓw +s,s′(ˆβw,δ +s,s′) + log(2d(d − 1))∥ˆβw,δ +s,s′∥0 +� +, +where ˆβw,δ +s,s′ is the LASSO estimator ˆβw +s,s′ after thresholding with the level δ. +5.4.2 +Simulated data +We consider three models defined as follows. For shortness we denote these models later +on as M1, M2 and M3, respectively. +Model 1. All vertices have the “chain structure”, i.e. for any node, except for the first +one, its set of parents contains only a previous node. Namely, we put V = {1, . . . , d} +and pa(k) = {k − 1}, if k > 1 and pa(1) = ∅. We construct CIM in the following way. +For the first node the intensities of leaving both states are equal to 5. For the rest of +the nodes k = 2, . . . , d, we choose randomly a ∈ {0, 1} and we define Qk(c, s, s′) = 9, if +s ̸= |c−a| and 1 otherwise. In other words, we choose randomly whether the node prefers +to be at the same state as its parent (a = 0) or not (a = 1). Say, the node k prefers to +be at the same state as the node k − 1. Then if these two states coincide, the intensity +of leaving the current state is 1, otherwise it is 9. The intensity is defined analogously, +when the node k does not prefer to be at the same state as the node k − 1. +Model 2. The first 5 vertices are correlated, while the remaining vertices are independent. +We sample 10 arrows between first 5 nodes by choosing randomly 2 parents for each node. +In order to define the intensity matrix, consider the node w ∈ V with pa(w) ̸= ∅ and +a configuration c ∈ Xpa(w) of the parents states. We denote |c| = 1 if all the parents of w +are in the state 1, and |c| = 0 otherwise. Next we define intensities as follows +Qw(c, s, s′) = +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +5 +if pa(w) = ∅, +9 +if pa(w) ̸= ∅, s is preferred state and |c| = 1, +1 +if pa(w) ̸= ∅, s is preferred state and |c| = 0, +9 +if pa(w) ̸= ∅, s is not preferred state and |c| = 0, +1 +if pa(w) ̸= ∅, s is not preferred state and |c| = 1. +As in the previous model, the preferred state is chosen randomly from {0, 1}. In words, +for every node w with pa(w) ̸= ∅ we choose randomly one state, say 0. In this case, if all +parents are 1 the process prefers to be in 1 and if some of the parents are 0 the process +prefers to be in 0. +Model 3. All vertices have a „binary tree” structure with arrows from leaves to the root. +So, leaves have no parents, while the inner nodes have two parents, with the exception +that one node has only a single parent, if d is even. If the node has no parents or its +94 + +parents have different states, then the intensity of leaving a state is 5. Otherwise, if a node +has only one parent or both parents are in the same state, then the intensity of leaving +a state are computed as in Model 1. +The model M1 has a simple structure, which involves all vertices and satisfies our +assumption (5.2). The model M2 corresponds to a dense structure on a small subset of +vertices and does not satisfy assumption (5.2). Another potential difficulty is related to +possible feedback loops, which are usually hard to recognize. Therefore, we also consider +model M2+, which looks like M2, but contains the interaction terms and fulfills (5.2). +The model M3 has slightly more complex structure than M1, but it also satisfies our +assumption (5.2). +We consider two cases: d = 20 and d = 50 for all four models. So, the considered +number of possible parameters of the model (the size of β) is 2d2 = 800 or 5000, respec- +tively. For model with interactions, number of possible parameters is d2(d + 1) = 8400 +or 127500. We use T = 10 and 50 for all models and we replicate simulations 100 times +for each scenario. In Table 5.1 we present averaged results of the simulations in terms of +three quality measures +• power, which is a proportion of correctly selected edges; +• false discovery rate (FDR), which is a fraction of incorrectly selected edges +among all selected edges; +• model dimension (MD), which is a number of selected edges. +We observe that in the models M1 and M3 the results of experiments confirm that +the proposed method works in a satisfactory way. For T = 10 the algorithm has high +power and its FDR is not large. The final model selected by our procedure is slightly +too small (it does not discover a few existing edges). When we increase observation time +(T = 50), then our estimator behaves almost perfectly. +The model M2 is much more difficult and this fact has a direct impact on simulation +results. +Namely, for T = 10 the power of the algorithm is relatively low with FDR +also being rather small. The procedure performs slightly better when we take T = 50. +However, for both observation times the estimator cannot find the true edges in the graph. +One of the reasons of such behaviour is that in M2 the dependence structure in CIM +is not additive in parents. +This fact combined with possible feedback loops leads to +recovering existing edges, but having the opposite to the true ones directions. Looking +deeper into the results for a few examples chosen from our experiments we confirm this +claim, i.e. the edges between nodes are correctly selected, but their directions are wrong. +Therefore, we can conclude that in the complex model M2 our estimator seems at least +to be able to recognize interactions between nodes, which is important in many practical +problems on its own. The results for M2+ confirm that the performance of our method +increases, when we consider more complex parametrization with interaction terms. +95 + +Table 5.1: Results for simulated data. In the model M1 and M3 the true dimension is 19 +for d = 20 and 49 for d = 50. In the model M2 the true model dimension is 10. +Model +d +T +Power +FDR +MD +M1 +20 +10 +0.86 +0.03 +16.8 +50 +1 +0.02 +19.3 +50 +10 +0.61 +0.01 +30.3 +50 +1 +0.01 +49.3 +M2 +20 +10 +0.16 +0.2 +2 +50 +0.78 +0.04 +8.1 +50 +10 +0.10 +0.15 +1.28 +50 +0.62 +0.02 +6.4 +M2+ +20 +10 +0.35 +0.1 +3.9 +50 +0.9 +0.02 +9.2 +50 +10 +0.17 +0.08 +2 +50 +0.68 +0.01 +6.9 +M3 +20 +10 +0.17 +0.1 +3.7 +50 +0.97 +0.01 +18.7 +50 +10 +0.6 +0.09 +3.2 +50 +0.88 +0.003 +43 +5.5 +Extension of the results +In this chapter we proposed the method for structure learning of CTBNs. We confirmed +the good performance of our method both theoretically and experimentally. To simplify +the notation and help the reader to follow our reasoning we restricted ourselves to graphs +with only two possible states for each node. +However, our results can be generalized +in a straightforward way to any finite graphs by extending β to other possible jumps +and possible values of parents. In terms of the explanatory variable, it is equivalent to +the standard encoding of qualitative variables in linear or generalized linear models. To +demonstrate the generalization more clearly we present an example similar to Example 5.1 +presented in the very beginning of this chapter. +Example 5.15. We consider CTBN with three nodes A, B and C. Let their state spaces +be XA = {0, 1, 2}, XB = {0, 1, 2, 3} and XC = {0, 1, 2}, respectively. Then for the node A +we define the function ZA as +ZA(b, c) = [1, I(b = 1), I(b = 2), I(b = 3), I(c = 1), I(c = 2)]⊤ +for each b ∈ {0, 1, 2, 3} and c ∈ {0, 1, 2}. Analogously, we can define representations for +the remaining nodes: +ZB(a, c) = [1, I(a = 1), I(a = 2), I(c = 1), I(c = 2)]⊤ +96 + +for each a ∈ {0, 1, 2} and c ∈ {0, 1, 2} and +ZC(a, b) = [1, I(a = 1), I(a = 2), I(b = 1), I(b = 2), I(b = 3)]⊤ +for each a ∈ {0, 1, 2} and b ∈ {0, 1, 2, 3}. +Therefore, for each node w and for each configuration of parents’ states (e.g. for the +node A and values in the nodes B and C) the value of the function Zw(·, ·) is still a binary +vector with the dimension equal to the sum of the numbers of states in all parents nodes +with subtracted number of nodes and added 2. In this example the parameter vector (5.1) +is defined as +β = +� +βA +0,1, βA +1,0, βA +0,2, βA +2,0, βA +1,2, βA +2,1, βB +0,1, βB +1,0, βB +0,2, . . . , βB +3,1, βB +2,3, βB +3,2, βC +0,1, . . . , βC +2,1 +�⊤. +With a slight abuse of notation, the vector βA +0,1 is given as +βA +0,1 = +� +βA +0,1(1), βA +0,1(B = 1), βA +0,1(B = 2), βA +0,1(B = 3), βA +0,1(C = 1), βA +0,1(C = 2) +�⊤ , +and we interpret it as follows: if all βA +0,1(B = 1), βA +0,1(B = 2) and βA +0,1(B = 3) are equal +to 0, then the intensity of the change from the state 0 to 1 at the node A does not +depend on the state at the node B. Similarly, the coordinates βA +0,1(C = 1) and βA +0,1(C = 2) +describe the dependence between the above intensity and the state at the node C, and +βA +0,1(1) corresponds to the intercept. For the node B the coordinates of the vector βB +1,3 = +� +βB +1,3(1), βB +1,3(A = 1), βB +1,3(A = 2), βB +1,3(C = 1), βB +1,3(C = 2) +� +describe the relation between +the intensity of the jump from the state 1 to 3 at the node B to the intercept, states at +the nodes A and C, respectively. +Our results can be also easily generalized to the case, where we consider not only +additive effect in (5.2), but also interactions between parents. Let us again use an example. +Example 5.16. As previously we consider CTBN with three nodes A, B and C with +corresponding state spaces XA = {0, 1}, XB = {0, 1, 2} and XC = {0, 1}. In a linear +model we have the following Zw functions: +ZA(b, c) = [1, I(b = 1), I(b = 2), I(c = 1)]⊤, +ZB(a, c) = [1, I(a = 1), I(c = 1)]⊤, +ZC(a, b) = [1, I(a = 1), I(b = 1)]⊤ +for each a, c ∈ {0, 1} and b ∈ {0, 1, 2}. +Then after we add pairwise interactions to +the linear model the functions above take the form +ZA(b, c) = [1, I(b = 1), I(b = 2), I(c = 1), I(b = 1, c = 1), I(b = 2, c = 1)]⊤, +ZB(a, c) = [1, I(a = 1), I(c = 1), I(a = 1, c = 1)]⊤, +ZC(a, b) = [1, I(a = 1), I(b = 1), I(a = 1, b = 1)]⊤. +For models with more nodes we can also take into account more complex interactions. +97 + +Chapter 6 +Structure learning for CTBNs for +incomplete data +In the previous chapter we considered the case when we observe CTBN at each moment of +time. Under this assumption we introduced a novel method of structure learning. In this +chapter we show that our method can be adapted to partially observed and noisy data. +In the case of partial observations we need to introduce the observation and the likelihood +of the observed data given a hidden trajectory of a process. We can again parametrize +CIM by (5.2). However, in this case the problem (5.5) becomes more challenging and +leads to the following two problems. First, the theoretical analysis becomes more chal- +lenging because the loss function is not convex. Second, the likelihood function can not be +calculated explicitly, hence, it is difficult to obtain from the computational perspective. +In our solution we formulate the EM algorithm for this case, where the expectation step +is standard and concerns the calculation of the expected log-likelihood. The maximization +step is performed in the same way as for the complete data. Since the density belongs to +the exponential family, the E-step requires to compute the expected values of sufficient +statistics, which is done with the MCMC algorithm developed in Rao and Teh (2013). In +addition, the results from Majewski et al. (2018) combined with Miasojedow and Niemiro +(2017) are used in the analysis of the Monte Carlo scheme. +6.1 +Introduction and notation +Let t = (t0, t1, ..., tn) with 0 = t0 < t1 < ... < tn and S = (S0, S1, ..., Sn) describe +the full trajectory X of the process on the interval [0, T] (t denotes times of jumps, S is +a skeleton, where Si ∈ X is a state at the moment ti). Let X denote the set of all possible +trajectories of the process. Let us consider the case when instead of observing the full +trajectory X we have access only to the partial and noisy data Y with the conditional +density p(Y | X). More precisely, we assume that Y is represented by the observation +of X at times tobs +1 , . . . , tobs +k +with the likelihoods gj(Sjt) for j = 1, . . . , k, where +jt = max{i : ti ≤ tobs +j }. +98 + +We assume that 0 < C < gj < ˜C for 1 ≤ j ≤ k. In this case the full density is given by +pβ(X, Y ) = p(Y | X)pβ(X), +where pβ(X) is given by (2.8). Observe that the dependence of pβ(X) on β is mediated +through our assumption (5.2), which can be inserted into (2.9). For the clarity of pre- +sentation we assume that p(Y | X) is known, however, the adaptation of our method to +the case where p(Y | X) depends also on some unknown parameters is straightforward. +The negative of the log-likelihood function in this case is given by +ℓ(β) = − log +�� +X +pβ(X, Y )dX +� +, +where symbol dX means the summation first over all possible numbers of jumps of the tra- +jectory X, then over all possible jumps, and the integration with respect to times of jumps. +More precisely, +� +f(X)dX = +∞ +� +n=0 +� +S1∈X +· · · +� +Sn∈X +t2 +� +0 +t3 +� +t1 +· · · +T +� +tn−1 +f(n, t1, . . . , tn, S1, . . . , Sn)dt1 . . . dtn. +Again we can define the estimator of the parameter vector β, as previously, +ˆβ = argmin +θ∈R2d(d−1) {ℓ(θ) + λ∥θ∥1} . +(6.1) +Since we are not able to compute ℓ(θ) analytically, we need to propose an efficient algo- +rithm for finding ˆβ. One of the efficient algorithms of solving complex optimization prob- +lems of the form (6.1) is the projected Proximal Gradient Descent (p-PGD) algorithm +(see for example Beck and Teboulle (2009) and Majewski et al. (2018)). For a closed +compact convex set K by � +K(a) we denote the projection of a onto K. Then p-PGD is +defined iteratively by +βk+1 = +� +K +� +proxγk,λ∥·∥1(βk − γk∇ℓ(βk)) +� +, +(6.2) +where {γk}k≥0 is a sequence of step-sizes, and “ prox ” denotes the proximal operator +defined for any convex function g by +proxγ,g(x) = argmin +y +� +g(y) + 1 +2γ ∥y − x∥2 +� +. +In the case of ℓ1 penalty, i.e. g = λ∥ · ∥1, the proximal operator is just a soft-thresholding +operator. Element-wise soft-thresholding operator Sλ : Rn → Rn is defined as +Sλ(xi) = [|xi| − λ]+ sgn(xi), +where [·]+ denotes the positive part. +In our case we are not able to evaluate the gradient ∇ℓ explicitly and we will use +stochastic version of the projected proximal gradient algorithm (p-SPGD), where ∇ℓ +99 + +is replaced by its Monte Carlo approximation. +Under the regularity conditions given +for example in Assumption AD.1 in Douc et al. (2014) we derive that the gradient of +the negative log-likelihood is given by +∇ℓ(β) = −∇ log +�� +pβ(X, Y )dX +� += −∇ +� +pβ(X, Y )dX +� +pβ(X, Y )dX += += − +� +pβ(X, Y )∇ log(pβ(X, Y ))dX +� +pβ(X, Y )dX += += − +� ∇ log(pβ(X, Y ))pβ(X, Y ) +� +pβ(X, Y )dX +dX = += − +� +∇ log(pβ(X, Y ))pβ(X | Y )dX = += −E(∇ log(pβ(X, Y )) | Y ). +(6.3) +This equation is sometimes referred as Fisher’s identity. +Now based on (6.3) we can +approximate ∇ℓ by +Φ(β, X1, . . . , Xm) = − 1 +m +m +� +i=1 +∇ log(pβ(Xi, Y )), +(6.4) +where X1, . . . , Xm is a set of subsequent states of the Markov chain with the stationary +distribution πβ = pβ(X | Y ) ∝ pβ(X, Y ), where in particular each of X1, . . . , Xm is +a trajectory of the process X. To generate this Markov chain we will use the procedure +described below. +6.2 +Sampling the Markov chain with Rao and Teh’s +algorithm +Consider the set M of all possible intensity matrices of Markov jump processes with +the state space X equipped with some matrix distance. Therefore, for any Q ∈ M and +s ∈ X each element Q(s, s) on the diagonal of Q is nonpositive, and otherwise it is non- +negative. Let L ⊂ M be a compact set and choose η > sup +Q∈L +max +s∈X Q(s), where we used +the notation Q(s) = −Q(s, s) introduced in Section 2.5. To generate the Markov chain +parametrized by Q ∈ L we will use Rao and Teh’s algorithm, which uses the idea of +uniformization and the notion of virtual jumps (Rao and Teh (2013)). A virtual jump in +a trajectory described by a pair of time points t and a corresponding skeleton S means +that for two subsequent time points ti and ti+1 we have Si = Si+1. Simply put, it means +that the process can jump from a certain state back to the same state. Here we provide +a comprehensive description of a single iteration of the algorithm. Given an arbitrary +trajectory (t, S) of X, such that Y (tobs +i ) = X(tobs +i ) for 1 ≤ i ≤ k, we generate another +trajectory (¯t, ¯S) with this property using the following procedure: +1. We generate times of virtual jumps v from piecewise homogeneous Poisson process +with the intensity η − Q(X(t)), which means that for every interval [ti, ti+1) we +100 + +sample a number ki of virtual jumps from the Poisson distribution with the para- +meter equal to (η − Q(Si))(ti+1 − ti); then the times of virtual jumps are uniformly +distributed on [ti−1, ti). +2. We add virtual jumps to the trajectory in a correct order and we obtain new times +of jumps t′ = t ∪ v and a new corresponding skeleton S′ = (S′ +0, ..., S′ +n′), where n′ is +the number of elements of t′. Therefore S′ = (S0, . . . , S0, S1, . . . , S1, . . . , Sn, . . . , Sn) +with k0 instances of S0, k1 instances of S1, etc. In other words, this skeleton con- +tains the same jumps at the same time points as S and for every interval [ti, ti+1) +the states in S′ are equal to Si−1. +3. We sample a new skeleton ¯S of the size n′ using the standard Forward-Filtering +Backward-Sampling algorithm (FFBS), which for completeness is provided at the end +of this chapter in Section 6.5. Thus, the resulting distribution of the skeleton will be +ν(S0) �n′ +i=1 P(S′ +i−1, S′ +i) �k +j=1 gj(S′ +jt′) +� +S ν(S0) �n′ +i=1 P(Si−1, Si) �k +j=1 gj(Sjt′) +, +where ν is the initial distribution (which does not depend on Q), and +P(s, s′) = +� +� +� +� +� +Q(s, s′) +η +, +if s ̸= s′, +1 − Q(s) +η +, +if s = s′, +(6.5) +where s, s′ ∈ X. The summation in the denominator is taken with respect to all +possible skeletons of size n′ containing virtual jumps. +4. From the trajectory (t′, ¯S) we discard newly acquired virtual jumps (i.e. we re- +move ¯Si such that ¯Si = ¯Si−1) and we obtain a new set ¯t of times of jumps and +the resulting trajectory (¯t, ¯S), which describes the desired Markov chain. +The procedure above describes one step of the algorithm and Rao and Teh (2013) showed +that both trajectories (t, S) and (¯t, ¯S) describe the same MJP. Simply put, the Poisson +rate η dominates the leaving rates of all states of the MJP and the new skeleton will +contain more events than there are jumps in the MJP path. The corresponding trajectory +is regarded as a redundant representation of a pure-jump process that always jumps to +a new state. Note, that our new stochastic matrix P defined in (6.5) allows self-transitions +(we refer to them as virtual jumps), and as η increases their number grows as well. These +self-transitions will be discarded in the final step of the algorithm, which compensates for +an increased number of events. +The step of the algorithm is the composition of two Markov kernels. First we add +101 + +virtual jumps according to the kernel M J +Q defined by +M J +Q((t, S), (˜t, ˜S)) = I(¯t = t ∪ v) +n−1 +� +i=0 +� +[(η − Q(si))(ti+1 − ti)]ki e−(η−Q(si))(ti+1−ti)I(ti < vi,1 < · · · < vi,ki < ti+1) +ji+1 +� +l=ji +I(˜Sl = Si) +� +. +(6.6) +Next we draw a skeleton according to the kernel M S +Q given by +M S +Q((˜t, ˜S), (¯t, ¯S)) = I(¯t = ˜t) +ν(¯S0) �n +i=1 P(¯Si−1, ¯Si) �k +j=1 gj(¯Sjt) +� +˜S +� +ν(˜S0) �n +i=1 P(˜Si−1, ˜Si) �k +j=1 gj(˜Sj˜t) +�, +(6.7) +where n denotes the length of the vector ˜t. The dependence of M S +Q on Q is hidden in +the definition of P. +Note that adding virtual jump times v defines the new skeleton uniquely and we denote +it by Sv. Therefore, since sampling a new skeleton does not change times of jumps, the full +kernel MQ = M S +QM J +Q is given by +MQ((t, S), (¯t, ¯S)) = M J +Q((t, S), (¯t, Sv))M S +Q((¯t, Sv), (¯t, ¯S)). +(6.8) +The kernel MQ acts on a function f(t, S) as follows +MQf(t, S) = E +� +f(¯t, ¯S) | (t, S) +� += += +∞ +� +k0=0 +· · · +∞ +� +kn−1=0 +� +n−1 +� +i=0 +I{ti < vi,1 < · · · < vi,ki < ti+1} +� +¯S +f(¯t, ¯S)MQ((t, S), (¯t, ¯S))dv. +where the inside sum is taken with respect to all possible skeletons of the same length +as ¯t. Further, when it does not cause any confusion, for convenience we often denote any +single trajectory (t, S) as X, and as Xi − the trajectory obtained after i-th iteration of +the algorithm with the starting trajectory X0. Then for any two adjacent trajectories Xi +and Xi+1 the function MQf(Xi) stands for E [f(Xi+1) | Xi]. Moreover, for any trajec- +tory X let V (X) = V (t, S) = n + 1 (recall that n denotes the number of jumps on the +trajectory X described by t and S). +6.3 +Structure learning via penalized maximum likeli- +hood function +Recall the assumption (5.2) introduced in the previous chapter +log(Qw(c, s, s′)) = βw +s,s′⊤Zw(c) . +For a given parameter vector β ∈ R2d(d−1) this defines the intensity matrix Q and this +mapping can be regarded as an isometry. So, if β belongs to a compact set K ∈ R2d(d−1), +102 + +then Q belongs to some compact set L ∈ M and the construction above is still valid. +In this case, we will frequently write Mβ instead of MQ. Now we introduce the main +theoretical result regarding the convergence of our algorithm. +Theorem 6.1. Let K ∈ R2d(d−1) be some compact convex set. +Denote as NK(β) the +normal cone to the set K at the point β +NK(β) = {a ∈ R2d(d−1) : ⟨a, z − β⟩ for all z ∈ K}. +Moreover, denote +S = {β ∈ K: 0 ∈ ∇ℓ(β) + λ∂∥β∥1 − NK}, +where ∂∥β∥1 denotes the subgradient. Suppose that (ℓ+λ∥·∥1)(S) has non-empty interior. +Assume also EV 2(X0) < ∞. Let the sequence {γk, k ∈ N} satisfy γk > 0, lim +k→∞ γk = 0, +and +∞ +� +k=1 +γk = ∞, +∞ +� +k=1 +γ2 +k < ∞, +∞ +� +k=1 +|γk − γk+1| < ∞. +Let {βk} be a sequence generated by the projected stochastic proximal gradient descent as +in (6.2). Then +dist(βk, S ∩ K) +k→∞ +→ 0 +a.s. +Remark 6.2. +(1) Obviously, lim +k→∞ γk = 0 follows from the convergence �∞ +k=1 γ2 +k < ∞. +(2) The symbol (ℓ + λ∥ · ∥1)(S) should be understood as the image of the set S under +the function h(θ) = ℓ(θ) + λ∥θ∥1. +The theorem is a consequence of Theorem 5.4 of Majewski et al. (2018). It states that +the sequence of parameter vectors βk generated by the projected SPGD algorithm con- +verges almost surely to a stationary point of the function being minimized, where instead +of the gradient of the negative log-likelihood we used its Markov chain approximation. +Before proving the theorem we need a few auxiliary results and some additional no- +tation. For any function f of the trajectory X and any signed-measure µ, we define its +V -variation by +∥µ∥V = sup +|f|≤V +|µ(f)|, +(6.9) +where +µ(f) = +� +X +fdµ, +and the integration is over all possible trajectories of the process. Also we denote +|f|V = sup +X∈X +|f(X)| +V (X) , +(6.10) +where supremum is taken with respect to all possible trajectories. +First we prove three auxiliary lemmas concerning the kernels MQ defined by (6.8). +103 + +Lemma 6.3. Fix a compact set L ∈ M. Then there exist constants ρ1, ρ2 ∈ (0, 1) and +b1, b2 < ∞ such that for any trajectory X we have +sup +Q∈L +MQV (X) ≤ ρ1V (X) + b1 +and +sup +Q∈L +MQV 2(X) ≤ ρ2V 2(X) + b2. +Proof. The proof is a simple extension of proofs of Lemma 5 and Proposition 6 in Miaso- +jedow and Niemiro (2017). First we note that in the first step of Rao and Teh’s algorithm +we do not add any new jumps. This implies that in order to obtain the desired bounds +on MβV (X) we simply need to bound the expectation EV (X′) of the jumps for the tra- +jectory X′ obtained on the Step 3. Analogously, instead of bounding MβV 2(X) we bound +EV 2(X′). Indeed we have +MβV 2(X) = E[V 2(X′) | X] = E[E(V 2(X′) +�� |X′| = n′ + 1) | X] +and E(|X′| = n′ +1 | X) ≤ n+1+ηT with V (X) = n+1, where |X′| denotes the number +of states in the trajectory X′. For the trajectory X′ = (¯t, S′) we have +V (X′) = 1 + +n′−1 +� +i=0 +I(S′ +i ̸= S′ +i+1). +Therefore, we get +EV 2(X′) = 1 + 2E +�n′−1 +� +i=0 +I(S′ +i ̸= S′ +i+1) +� ++ E +�� +i̸=j +I(S′ +i ̸= S′ +i+1)I(S′ +j ̸= S′ +j+1) +� +. +(6.11) +Applying Lemma 2 of Miasojedow and Niemiro (2017) together with the definition (6.5), +the definition of η and assumptions on likelihoods gj(Sjt), for each i = 0, . . . , n′ − 1 we +obtain +P +� +S′ +i = s | S′ +i+1 = s +� +≥ δi > 0. +This is a lower bound for the backward transition probability used by the FFBS algorithm. +An analogous inequality for the forward transition probability is also true. Hence, +E +� +I(S′ +i ̸= S′ +i+1) +� += E +� +E(I(S′ +i ̸= S′ +i+1) | S′ +i+1) +� += += P +� +S′ +i ̸= s | S′ +i+1 = s +� +≤ 1 − δi, +(6.12) +which means that we can bound the second term on the RHS of (6.11) from above +by 2 �n′−1 +i=0 (1 − δi). Also, for each i ̸= j we have +I(S′ +i ̸= S′ +i+1)I(S′ +j ̸= S′ +j+1) ≤ I(S′ +i ̸= S′ +i+1). +(6.13) +104 + +Thus, using (6.12) and (6.13), the third term on the RHS of (6.11) can be bounded by +E +� +�� +n′−1 +� +j=0 +n′−1 +� +i=0 +i̸=j +I(S′ +i ̸= S′ +i+1) +� +�� = +n′−1 +� +j=0 +n′−1 +� +i=0, +i̸=j +E +� +E(I(S′ +i ̸= S′ +i+1) | S′ +i+1) +� += += +n′−1 +� +j=0 +n′−1 +� +i=0, +i̸=j +P(S′ +i ̸= s | S′ +i+1 = s) ≤ +≤ +n′−1 +� +j=0 +n′−1 +� +i=0, +i̸=j +(1 − δi). +Combining both bounds we obtain +EV 2(X′) ≤ 1 + 2n′ − 2 +n′−1 +� +i=0 +δi + n′(n′ − 1) − +n′−1 +� +j=0 +n′−1 +� +i=0, +i̸=j +δi ≤ +≤ (1 − δ)(n′ + 1)2 + 1 ≤ (1 − δ)(n + 1) + b, +where δ = min +i {δi} ∈ (0, 1) and b is some finite constant. +This finishes the proof of +the second part of the lemma. +The first inequality can be shown either analogously to the second one or by applying +Jensen’s inequality. Indeed, +(MQV (X))2 ≤ MQV 2(X) ≤ ρ2V 2(X) + b2, +which in turn implies that +MQV (X) ≤ +� +ρ2V 2(X) + b2 ≤ √ρ2V (X) + +� +b2, +which concludes the proof. +Lemma 6.4. If E[V (X0)] < ∞, then sup +n≥1 +MQV (Xn) < ∞. If in addition E[V 2(X0)] < ∞, +then sup +n≥1 +MQV 2(Xn) < ∞. +Proof. Recall that Xn is the trajectory obtained after n-th iteration of Rao and Teh’s +algorithm starting from the trajectory X0. +As previously we can consider EV (Xn+1) +instead of MQV (Xn). Hence, by the previous lemma we have +E[V (Xn+1)] = E[E[V (Xn+1) | Xn]] = E[MQV (Xn)] ≤ ρEV (Xn) + b, +where ρ ∈ (0, 1) and b < ∞. Then, by iterating this majorization procedure recursively +we get +E[V (Xn+1)] ≤ ρn+1EV (X0) + b +n+1 +� +i=1 +ρi ≤ ρEV (X0) + +b +1 − ρ . +Since the RHS of this inequality does not depend on n, then E[V (Xn+1)] is bounded by +a finite constant. This concludes the proof for the first bound. The second inequality can +be shown analogously using the bound for supQ∈L MQV 2(X) in Lemma 6.3. +105 + +Lemma 6.5. For any compact set L ⊂ M there exists C ∈ (0, ∞) such that for +any Q, ˜Q ∈ L and all trajectories X we have +∥MQ(X, ·) − M ˜Q(X, ·)∥V ≤ CV (X)∥Q − ˜Q∥. +Proof. For any Q, ˜Q ∈ L the expression of interest (see the definitions (6.6)-(6.9)) can be +bounded by the sum of two terms as follows +sup +|f|≤V +��MQf(t, S) − M ˜Qf(t, S) +�� = sup +|f|≤V +���M S +QM J +Qf(t, S) − M S +˜QM J +˜Qf(t, S) +��� +≤ sup +|f|≤V +���M S +QM J +Qf(t, S) − M S +QM J +˜Qf(t, S) +��� + sup +|f|≤V +���M S +QM J +˜Qf(t, S) − M S +˜QM J +˜Qf(t, S) +��� +:= I1 + I2. +We can bound I1 by +∞ +� +k0=0 +· · · +∞ +� +kn−1=0 +� +n−1 +� +i=0 +I{ti < vi,1 < · · · < vi,ki < ti+1} +� +¯S +|f(¯t, ¯S)|× +× +���M J +Q((t, S), (¯t, Sv))M S +Q((¯t, Sv), (¯t, ¯S)) − M J +˜Q((t, S), (¯t, Sv))M S +Q((¯t, Sv), (¯t, ¯S)) +��� dv. +Recall that ki denotes the number of virtual jumps on the interval [ti, ti+1). Since |f| ≤ V +and for any possible ¯S we have V (¯t, ¯S) ≤ 1+n+�n−1 +i=0 ki, and � +¯S +M S +Q +� +(¯t, Sv), (¯t, ¯S) +� += 1 +(see (6.7)), then we can further bound I1 by +∞ +� +k0=0 +· · · +∞ +� +kn−1=0 +� +n−1 +� +i=0 +I{ti < vi,1 < · · · < vi,ki < ti+1}× +× +� +1 + n + +n−1 +� +i=0 +ki +� ���M J +Q ((t, S), (¯t, Sv)) − M J +˜Q ((t, S), (¯t, Sv)) +��� dv, +(6.14) +Next, let U denote the set of indices u such that only u-th rows of Q and ˜Q differ. Let +Q1 = Q, Q|U| = ˜Q and for u ∈ U we define the matrix Qu+1 as Qu with the u-th row +replaced by the corresponding row of ˜Q. In particular, for u ∈ U we have Qu(˜s) ̸= Qu+1(˜s) +for a certain state ˜s and for all states s ̸= ˜s we have Qu(s) = Qu+1(s). Then the expression +under the integral can be bounded by +���M J +Q ((t, S), (¯t, Sv)) − M J +˜Q ((t, S), (¯t, Sv)) +��� ≤ +≤ +� +u +��M J +Qu ((t, S), (¯t, Sv)) − M J +Qu+1 ((t, S), (¯t, Sv)) +�� . +Each term in the last sum can be expressed in the form +106 + +��M J +Qu((t, S), (¯t, Sv)) − M J +Qu+1((t, S), (¯t, Sv)) +�� = += +����� +n−1 +� +i=0 +[(η − Qu(Si)) (ti+1 − ti)]ki e−(η−Qu(Si))(ti+1−ti) − +− +n−1 +� +i=0 +[(η − Qu+1(Si)) (ti+1 − ti)]ki e−(η−Qu+1(Si))(ti+1−ti) +����� = += +n−1 +� +i=0 +(ti+1 − ti)ki +n−1 +� +i=0 +Si̸=˜s +(η − Qu(Si))ki e−(η−Qu(Si))(ti+1−ti)× +× +����(η − Qu(˜s)) +� +Si=˜s +ki +e +−(η−Qu(˜s)) � +Si=˜s +(ti+1−ti) +− (η − Qu+1(˜s)) +� +Si=˜s +ki +e +−(η−Qu+1(˜s)) � +Si=˜s +(ti+1−ti)���� . +Now let x = η − Qu(˜s), y = η − Qu+1(˜s), a = � +Si=˜s +ki and b = � +Si=˜s +(ti+1 − ti). Let +also r = max(x, y). Hence, we need to bound the expression |xae−bx − yae−by| for some +x, y, a, b > 0. From Lagrange’s mean value theorem we have +��xae−bx − yae−by�� ≤ sup +z∈(x,y) +���� +d +dz(zae−bz) +���� |x − y| = sup +z∈(x,y) +��aza−1e−bz − bzae−bz�� |x − y|. +Next we can bound the above supremum by +sup +z∈(x,y) +��aza−1e−bz − bzae−bz�� ≤ sup +z∈(x,y) +max(aza−1e−bz, bzae−bz). +(6.15) +Let us assume first that the first expression of (6.15) is the maximum, then we obtain +aza−1e−bz ≤ a˜ηa−1e−b˜η, +(6.16) +where ˜η = min(r, a−1 +b ). +In the case where the second expression is the maximum we +obtain analogously bzae−bz ≤ b˜ηae−b˜η, where ˜η = min(r, a +b). Using the first assumption +with the corresponding inequality (6.16), the fact that a ≤ +n−1 +� +i=0 +ki and the fact that the set +of indices U is finite, we can bound I1 by +� +u +∞ +� +k0=0 +· · · +∞ +� +kn−1=0 +� +n−1 +� +i=0 +I{ti < vi,1 < · · · < vi,ki < ti+1} +n−1 +� +i=0 +Si̸=˜s +(η − Qu(Si))ki e−(η−Qu(Si))(ti+1−ti) +× +n−1 +� +i=0 +Si=˜s +˜ηkie−˜η(ti+1−ti) · +n−1 +� +i=0 +ki +� +1 + n + +n−1 +� +i=0 +ki +� +|Qu+1(˜s) − Qu(˜s)| dv ≤ +≤ +� +u +|Qu+1(˜s) − Qu(˜s)| · E +�n−1 +� +i=0 +ki +� +1 + n + +n−1 +� +i=0 +ki +�� +≤ +≤ |U|∥Q − ˜Q∥(ηT + nηT + ηT + (ηT)2) = += |U|∥Q − ˜Q∥(ηT(n + 2) + (ηT)2) = C1(n)∥Q − ˜Q∥, +107 + +where C1(n) is a certain linear function of n. +Using the same technique and the fact that b ≤ T we can bound I1 by +� +u +|Qu+1(˜s) − Qu(˜s)| · E +� +T +� +1 + n + +n−1 +� +i=0 +ki +�� +≤ T|U|∥Q − ˜Q∥ · E +� +1 + n + +n−1 +� +i=0 +ki +� += += T|U|∥Q − ˜Q∥(1 + n + ηT) = C2(n)∥Q − ˜Q∥, +where C2(n) is a certain linear function of n. +Now we can bound I2 in a similar way as we did for I1 by an expression similar +to (6.14), namely by +∞ +� +k0=0 +· · · +∞ +� +kn−1=0 +� +n−1 +� +i=0 +I{ti < vi,1 < · · · < vi,ki < ti+1} +� +1 + n + +n−1 +� +i=0 +ki +� +× +× M J +˜Q((t, S), (¯t, Sv)) +� +¯S +���M S +Q((¯t, Sv), (¯t, ¯S)) − M S +˜Q((¯t, Sv), (¯t, ¯S)) +��� dv. +Before we continue, for any possible skeleton S let us denote a few auxiliary functions +LQ(S) = ν(S0) +n +� +i=1 +P(Si−1, Si) +k +� +j=1 +gj(Skj), +RQ = +� +S +LQ(S), +HQ(S) = LQ(S) +RQ +. +Therefore, we need to obtain the bound for +� +S +��HQ(S) − H ˜Q(S) +�� = +� +S +����� +LQ(S) +RQ +− L ˜Q(S) +R ˜Q +����� ≤ +≤ +� +S +��LQ(S) − L ˜Q(S) +�� +RQ ++ +� +S +L ˜Q(S) +����� +1 +RQ +− 1 +R ˜Q +����� . +(6.17) +The initial distribution ν and likelihoods gj for all j are the same for different intensity +matrices Q. Using this fact and “triangle” inequality for two products of positive numbers +����� +n +� +j=1 +xj − +n +� +j=1 +yj +����� ≤ +n +� +j=1 +|xj − yj| +j−1 +� +i=1 +xi +n +� +i=j+1 +yi, +with xi = P(Si−1, Si) and yi = ˜P(Si−1, Si), where ˜P is defined by ˜Q in the same way +as P defined by Q (see (6.5)), we obtain the inequality +� +S +��LQ(S) − L ˜Q(S) +�� ≤ +� +S +˜Ck +η ∥Q − ˜Q∥ +n +� +j=1 +j−1 +� +i=1 +P(Si−1, Si) +n +� +i=j+1 +˜P(Si−1, Si) ≤ +≤ +˜Ck +η ∥Q − ˜Q∥ +n +� +j=1 +� +Sj +� +Sj +j−1 +� +i=1 +P(Si−1, Si) +n +� +i=j+1 +˜P(Si−1, Si) ≤ +˜Ck +η ∥Q − ˜Q∥ · n · |S|. +108 + +Here we used the assumption that all likelihoods are bounded C ≤ gj ≤ ˜C for 1 ≤ j ≤ k, +and we locally denoted the number of all possible states of the process as |S|. Note, that +the sum over all possible skeletons S was divided into three sums: the first one is over +all possible states of Sj, the second - over all possible states of S1, . . . , Sj−1 and the last +one is over all possible states Sj+1, . . . , Sn. Applying again bounds on the likelihoods we +easily obtain that for any Q +1 +˜Ck ≤ +1 +RQ +≤ 1 +Ck , +which leads to the fact that the first expression in (6.17) is bounded from above by +C3(n)∥Q − ˜Q∥, where C3(n) is some linear function of n. The second expression in (6.17) +is bounded by +˜Ck +����� +1 +RQ +− 1 +R ˜Q +����� ≤ ˜Ck +� +S +��LQ(S) − L ˜Q(S) +�� +RQR ˜Q +. +Applying two previously obtained inequalities we derive the bound C4(n)∥Q − ˜Q∥, where +C4(n) is some linear function of n. Now combining all obtained bounds for I1 and I2 we +conclude the proof. +Lemma 6.6. For the measurable function V : X → [1, +∞) let us denote by +DV (β, β′) = sup +X +∥Mβ(X, ·) − Mβ′(X, ·)∥V +V (X) +the V -variation of the kernels Mβ and Mβ′ and let Fβ : X → R+ be the function such that +supβ∈K |Fβ|V < ∞. Moreover, define +ˆFβ = +� +n≥0 +M n +β (Fβ − πβ(Fβ)). +Then +|Mβ ˆFβ − Mβ′ ˆFβ′|V ≤ C{DV (β, β′) + |Fβ − Fβ′|V }. +Proof. The proof follows the same arguments as the proof of the Lemma 4.2 in Fort et al. +(2011) in the supplement materials to the paper. In addition, some references to the first +papers using similar argumentation can be found there. +First, we use the following decomposition of M k +βf − M k +β′f for any k ≥ 1 +M k +βf − M k +β′f = +k−1 +� +j=0 +M j +β (Mβ − Mβ′) +� +M k−j−1 +β′ +f − πβ′(f) +� +. +By the Proposition 7 in Miasojedow and Niemiro (2017) the sets {X : |V (X)| < h} are +the small sets for any h ∈ R. Therefore, combining it with Lemma (6.3) we have by +Theorem 9 in Roberts and Rosenthal (2004) that there exist constants Cβ and ρβ ∈ (0, 1) +such that +∥M k +β(X, ·) − πβ∥V ≤ Cβρk +βV (X). +109 + +This property is called geometric ergodicity of the kernel Mβ with invariant distribution πβ. +Hence, for any k ≥ 1 and any trajectory X⋆ +∥πβ − πβ′∥V +≤ ∥πβ − M k +β(X⋆, ·)∥V + ∥M k +β(X⋆, ·) − M k +β′(X⋆, ·)∥V + ∥M k +β′(X⋆, ·) − πβ′∥V +≤ +� +Cβρk +β + Cβ′ρk +β′ +� +V (X⋆) ++ sup +|f|V ≤1 +����� +k−1 +� +j=0 +M j +β (Mβ − Mβ′) +� +M k−j−1 +β′ +f − πβ′(f) +� +(X⋆) +����� . +(6.18) +We can bound each summand from the sum on the RHS by +sup +|f|V ≤1 +M j +β +���(Mβ − Mβ′)(M k−j−1 +β′ +f − πβ′(f))(X⋆) +��� . +Now let us denote H = [M k−j−1 +β′ +f − πβ′(f)]. Then the expression within the absolute +value operator is bounded by +sup +|f|V ≤1 +sup +|g|≤|H| +|(Mβ − Mβ′)g(X⋆)| ≤ sup +|f|V ≤1 +|H|V sup +|g|≤V +|(Mβ − Mβ′)g(X⋆)| = += sup +|f|V ≤1 +sup +X +|M k−j−1 +β′ +f(X) − πβ′(f)(X)| +V (X) +· ∥Mβ(X⋆, ·) − Mβ′(X⋆, ·)∥V ≤ +≤ Cβ′ρk−j−1 +β′ +· DV (β, β′)V (X⋆). +Thus the last term in the (6.18) is bounded by +Cβ′ DV (β, β′) +k−1 +� +j=0 +ρk−j−1 +β′ +M j +βV (X⋆) ≤ +≤ Cβ′ DV (β, β′) +k−1 +� +j=0 +ρk−j−1 +β′ +� +πβ(V ) + Cβρj +βV (X⋆) +� +≤ +≤ +Cβ′ +1 − ρβ′ DV (β, β′) (πβ(V ) + CβV (X⋆)) . +Taking the limit as k → +∞ in the first term in (6.18) we obtain +∥πβ − πβ′∥V ≤ +Cβ′ +1 − ρβ′ DV (β, β′) (πβ(V ) + CβV (X⋆)) . +(6.19) +Now from (7) in Fort et al. (2011) we have +Mβ ˆFβ − Mβ′ ˆFβ′ = +� +n≥1 +n−1 +� +j=0 +� +M j +β − πβ +� +(Mβ − Mβ′) +� +M n−j−1 +β′ +Fβ − πβ′(Fβ) +� +− +� +n≥1 +{M n +β′(Fβ′ − Fβ) − πβ′(Fβ′ − Fβ)} − +� +n≥1 +πβ{M n +β′Fβ − πβ′(Fβ)}. +(6.20) +110 + +Let us consider the first term. Similarly to the previous step by G we denote the operator +G = [M n−j−1 +β′ +Fβ − πβ′(Fβ)]. Then we can bound +��� +M j +β − πβ +� +(Mβ − Mβ′) +� +M n−j−1 +β′ +Fβ − πβ′(Fβ) +� +(X) +�� ≤ +≤ sup +|g|≤|G| +��� +M j +β − πβ +� +(Mβ − Mβ′) g(X) +�� ≤ +≤ |G|V sup +|g|≤V +��� +M j +β − πβ +� +(Mβ − Mβ′) g(X) +�� ≤ +≤ |G|V +sup +|h|≤∥Mβ−Mβ′∥V +��� +M j +β − πβ +� +h(X) +�� ≤ +≤ |G|V DV (β, β′) sup +|h|≤V +��� +M j +β − πβ +� +h(X) +�� ≤ +≤ Cβ′ρn−j−1 +β′ +|Fβ|V DV (β, β′) · Cβρj +βV (X). +For the second and third terms in (6.20) we obtain the bounds +��M n +β′(Fβ′ − Fβ)(X) − πβ′(Fβ′ − Fβ) +�� ≤ Cβ′ρn +β′V (X)|Fβ′ − Fβ|V +and +|πβ{M n +β′Fβ − πβ′(Fβ)}(X)| = |(πβ − πβ′){M n +β′Fβ − πβ′(Fβ)}(X)| ≤ +≤ ∥πβ − πβ′∥V |M n +β′Fβ(X) − πβ′(Fβ)|V ≤ +≤ ∥πβ − πβ′∥V Cβ′ρn +β′|Fβ|V . +(6.21) +Therefore, combining the inequalities (6.19) – (6.21) we get +|Mβ ˆFβ(X) − Mβ′ ˆFβ′(X)| ≤ +Cβ′Cβ +(1 − ρβ′)(1 − ρβ)|Fβ|V DV (β, β′)V (X)+ ++ +Cβ′ +1 − ρβ′ V (X)|Fβ′ − Fβ|V + ++ +Cβ′ +(1 − ρβ′)|Fβ|V DV (β, β′) (πβ(V ) + CβV (X)) . +Thus, since supβ∈K |Fβ|V < ∞, there exists a positive constant Lβ,β′ for which we have +|Mβ ˆFβ(X) − Mβ′ ˆFβ′(X)| ≤ Lβ,β′V (X)(DV (β, β′) + |Fβ′ − Fβ|V ). +This concludes the proof. +The proof of the main Theorem 6.1 is based on Theorem 6.7 which is obtained by com- +bining Theorem 5.4 and Proposition 5.5 of Majewski et al. (2018) with a slight adjustment +of the notation to our context. For any compact convex set K by +NK(x) = {a ∈ Rd : ⟨a, z − x⟩ for all z ∈ K} +we denote the normal cone to K at the point x. We consider an open set B ∈ Rd and +functions f, g : B → R. We assume that f is a continuously differentiable function and +also for all β ∈ K its gradient satisfies +∇f(β) = +� +X +Φ(β, X)πβ(dX) +111 + +for some probability measure πβ and an integrable function Φ(β, X). +By {βk, k ∈ N} we denote the sequence generated by the projected SPGD: +βk ∈ +� +K +� +proxγk,g(βk−1 − γkΦ(βk−1, ξk)) +� +, +(6.22) +where ξk is a random variable with πβk−1 distribution. Moreover, by {δk, n ∈ N} we denote +the gradient perturbation sequence defined by +δk = Φ(βk−1, ξk) − ∇ℓ(βk−1). +Moreover, for any measurable function W : X → [1, +∞) recall the definitions of +∥µ∥W and |f|W given in (6.9) and (6.10). Then we define W-variation of the kernels Mβ +and Mβ′ by +DW(β, β′) = sup +X +∥Mβ(X, ·) − Mβ′(X, ·)∥W +W(X) +. +Theorem 6.7. Denote +S = {β ∈ K : 0 ∈ ∇f(β) + ∂g(β) − NK(β)}, +where ∂g is a subgradient of g : B → R (see e.g. Rockafellar (1970)). Suppose that the set +(f +g)(S) has empty interior and sup +k∈N +∥δk∥ ≤ ∞. We also make the following assumptions. +(1) The function g is convex, Lipschitz and bounded from below. +(2) The sequence of step sizes {γk} satisfies γk > 0 and limk→∞ γk = 0 and +∞ +� +k=1 +γk = ∞, +∞ +� +k=1 +|γk − γk−1| < ∞, +∞ +� +k=1 +γ2 +k < ∞. +(3) There exist constants ρ ∈ [0, 1) and b < ∞ and a measurable function W : X → +[1, +∞) such that +sup +β∈K +|Φ(β, ·)|W 1/2 < ∞, +sup +β∈K +MβW ≤ λW + b. +In addition, for any l ∈ (0, 1] there exists C < ∞ and ρ ∈ (0, 1) such that for +any X ∈ X +sup +β∈K +∥M n +β (X, ·) − πβ∥W l ≤ CρnW l(X). +(4) The kernels Mβ and the stationary distributions πβ are locally Lipschitz with respect +to β, i.e. for any compact set K and any β, β′ ∈ K there exists C < ∞ such that +sup +β∈K +∥Φ(β, ·) − Φ(β′, ·)∥W 1/2 + DW 1/2(β, β′) ≤ C∥β − β′∥. +(5) E[W(ξ1)] < ∞. +Then the sequence {xk, k ∈ N} generated by iterations (6.22) converges to S. +112 + +Proof of Theorem 6.1. For better transparency of the proof we will use m = 1 in (6.4), +the generalization of the reasoning to the case of m > 1 is straightforward. In our case +the role of the function f plays the negative log-likelihood ℓ(β) and the function g is the +ℓ1-penalty. Both functions satisfy the assumptions of Theorem 6.7. +Then, by the formula (5.14) for the gradient of the negative log-likelihood function +and the Fisher identity in (6.3) the function Φ(βk, X) in our case takes the form +Φ(βk, X) = +� +w∈V +� +c∈X−w +� +s̸=s′ +� +−nw(c; s, s′) + tw(c; s) exp(βw +k,s,s′⊤Zw(c)) +� +Zw(c), +(6.23) +where we take βk from the k-th iteration of the p-SPGD algorithm as the parameter +vector. Other components such as nw(c; s, s′), tw(c; s) and Z(c) correspond to a single +trajectory X of the Markov jump process. +Integrating this function over all possible +trajectories with respect to πβ = pβ(Y | X) gives us the desired gradient ∇ℓ(β) of +the negative log-likelihood. +In place of the function W in the assumptions of Theorem 6.7 we take the function V 2. +Note that the original Theorem 5.4 of Majewski et al. (2018) on the convergence of +the algorithm does not use the function W, instead it has an additional assumption on +the gradient perturbation sequence +δk = Φ(βk−1, Xk) − ∇ℓ(βk−1), +k ∈ N. +That assumption states that the sequence {δk, k ∈ N} can be decomposed as δk = eδ +k +rδ +k, +where {eδ +k, k ∈ N} and {rδ +k, k ∈ N} are two sequences satisfying limk→∞ ∥rδ +k∥ = 0 and the +series �∞ +k=1 γkeδ +k converges. However, Proposition 5.5 in Majewski et al. (2018) implies +that by introducing Assumptions (3)–(5) we obtain the required decomposition of δk. +Therefore let us check the rest of the assumptions of Theorem 6.7. +Assumption (2) on step-sizes is automatically satisfied. First we review Assumption (3) +with W = V 2, which consists of three conditions. The first condition that sup +βk∈K +|Φ(βk, ·)|V +is bounded is easy to check because for any trajectory X the sum of the terms nw(c; s, s′) +is bounded by the total number of jumps V (X), the sum of the terms tw(c; s) is bounded +by the total observation time T and vectors βk come from the compact set K, which +means that exponent is bounded by some constant. The second condition follows directly +from Lemma 6.3. +The last condition representing geometric ergodicity was shown in +Lemma 6.6. +In our setting Assumption (4) takes the form +DV (β, β′) + |ˆΦ(β, ·) − ˆΦ(β′, ·)|V ≤ C∥β − β′∥ +for some constant C. We obtain it by combining Lemma 6.5 and the trivial fact that +|Φ(β, ·) − Φ(β′, ·)|V ≤ C∥β − β′∥ for some constant C. +In the course of the proof of the mentioned above decomposition Majewski et al. (2018) +used the following property of the function W, which needs to be checked as well. For any +trajectory ξk under the assumption EW(ξ0) < ∞ there holds sup +k≥1 +E[W(ξk)] < ∞. In our +113 + +case we can obtain the same property. Assuming EV 2(X0) < ∞ by Lemma 6.4 we have +that sup +k≥1 +E[V 2(Xk)] < ∞. This concludes the proof of the theorem. +6.4 +Numerical results +In this section we describe the details of implementation of the proposed algorithm as +well as the results of experimental studies. +6.4.1 +Details of implementation +We provide in details implementation of the proposed algorithm in practice. +Recall +that the optimization problem (6.1) is solved by the iterative algorithm called projected +stochastic proximal gradient descent given in (6.2). Instead of the gradient of the ne- +gative log-likelihood ∇ℓ(β) we use its MCMC approximation Φ(β, X1, . . . , Xm), where +X1, . . . , Xm is a set of trajectories generated by Rao and Teh’s scheme given in Sec- +tion 6.2. The solution of (6.1) depends on the choice of λ. As we mentioned in previous +chapters, finding the „optimal” parameter λ and the threshold δ is difficult in practice. In +this case we also solve it using the same information criteria as in Chapter 5, where again +instead of the gradient of the negative log-likelihood we use its MCMC approximation. +The function Φ(θ, X1, . . . , Xm) is an average of the functions Φ(θ, Xi) introduced +in (6.23) (recall that we use the symbol β only for the true parameter vector and θ +otherwise). Now, in the analogous way as we divided the optimization problem (5.5) in +Subsection 5.4.1 we can divide the current one. Namely, for fixed w ∈ V and s, s′ ∈ {0, 1} +with s ̸= s′, the corresponding summand in Φ(θ, X1, . . . , Xm) is a function which depends +on the vector θ restricted only to its coordinate vector θw +s,s′ (see notation (5.1)). So, for +each triple w and s ̸= s′ we can solve the problem separately. +Let us denote these +summands of Φ(θ, X1, . . . , Xm) as Φw +s,s′(θw +s,s′). +Hence, in the current implementation we can use the scheme from Subsection 5.4.1. +Namely, we start with computing a sequence of minimizers on the grid, i.e. for any triple +w ∈ V, s ̸= s′ we create a finite sequence {λi}N +i=1 uniformly spaced on the log scale, starting +from the largest λi, which corresponds to the empty model. Next, for each value λi we +compute the estimator ˆβw +s,s′[i] of the vector βw +s,s′ +ˆβw +s,s′[i] = argmin +θw +s,s′ +� +Φw +s,s′(θw +s,s′) + λi∥θw +s,s′∥1 +� +. +(6.24) +The notation ˆβw +s,s′[i] means the i-th approximation of βw +s,s′. To solve (6.24) numerically +for a given λi we use the SPGD algorithm without the projection onto the compact set. +In practice, the algorithm still converges well so we did not use the projection. The final +LASSO estimator ˆβw +s,s′ := ˆβw +s,s′[i∗] is chosen using the Bayesian Information Criterion (BIC) +applied to the MCMC approximation of the gradient of the negative log-likelihood, i.e. +i∗ = argmin +1≤i≤N +� +nΦw +s,s′(θw +s,s′)(ˆβw +s,s′[i]) + log(n)∥ˆβw +s,s′[i]∥0 +� +. +114 + +Here ∥ˆβw +s,s′[i]∥0 denotes the number of non-zero elements of ˆβw +s,s′[i] and n is the number of +jumps in the trajectory generated by Rao and Teh’s algorithm. In our simulations we use +N = 100. +Finally, the threshold δ is obtained using the Generalized Information Criterion (GIC) +as in Subsection 5.4.1, also applied to the MCMC approximation of the gradient of the ne- +gative log-likelihood. For a prespecified sequence of thresholds D we calculate +δ∗ = argmin +δ∈D +� +nΦw +s,s′(ˆβw,δ +s,s′) + log(2d(d − 1))∥ˆβw,δ +s,s′∥0 +� +, +where ˆβw,δ +s,s′ is the LASSO estimator ˆβw +s,s′ after thresholding with the level δ. +6.4.2 +Simulated data +We consider the chain model analogous to the model M1 in Subsection 5.4.2. All vertices +have the “chain structure”, i.e. for any node, except for the first one, its set of parents +contains only a previous node. Namely, we put V = {1, . . . , d} and pa(k) = {k − 1}, if +k > 1 and pa(1) = ∅. We construct CIM in the same way as in Subsection 5.4.2. Namely, +for the first node the intensities of leaving both states are equal to 5. For the rest of +the nodes k = 2, . . . , d, we choose randomly a ∈ {0, 1} and we define Qk(c, s, s′) = 9, if +s ̸= |c−a| and 1 otherwise. In other words, we choose randomly whether the node prefers +to be at the same state as its parent (a = 0) or not (a = 1). +We consider two cases with the number of nodes equal to d = 5 and d = 10. So, +the considered number of possible parameters of the model (the size of β) is 2d2 = 50 +or 200, respectively. We use T = 10 for 5 nodes and T = 20 for 10 nodes. We replicate +simulations 100 times for each scenario. As the partial observation we take 100, 200 and +400 equally spaced points for 5 nodes and 200, 400 and 800 for 10 nodes. In Figure 6.1 +we present averaged results of the simulations in terms of three quality measures +• power, which is a proportion of correctly selected edges; +• false discovery rate (FDR), which is a fraction of incorrectly selected edges +among all selected edges; +• true model (TM), which is an indicator whether the algorithm selected the true +model without any errors. +In Figure 6.2 we provide the results of simulations for the same models in case of com- +plete trajectories. We observe that the results of experiments confirm that the proposed +method works in a satisfactory way. We observe that with increasing number of observa- +tion points results are close to the ones in case of complete data. The larger the number +of points the higher the power of the algorithm and tends to 1. The FDR is quite low in +all cases. For the half simulations in case of 10 nodes and the time T = 20 the algorithm +discovers the true model when we choose a big enough number of observation points. +115 + +FDR +TM +Power +10 +20 +30 +40 +0.1 +0.2 +0.3 +0.4 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1.0 +Number of observation on interval [0,1] +Number of nodes +5 (Time =10) +10 (Time =20) +Figure 6.1: Results of simulations for partially observed data. +FDR +TM +Power +20 +40 +60 +80 +0.050 +0.075 +0.100 +0.125 +0.0 +0.2 +0.4 +0.6 +0.925 +0.950 +0.975 +1.000 +Observation time +Number of nodes +5 +10 +20 +Figure 6.2: Results of simulations for fully observed data. +116 + +6.5 +FFBS Algorithm +For completeness of the proposed scheme we provide the description of the forward- +filtering backward-sampling algorithm for discrete-time Markov chains taken from Rao +and Teh (2013) with a slightly changed notation. Earlier references for the FFBS algo- +rithm can be found there as well. +Let (S0, . . . , Sn) be a discrete-time Markov chain with a discrete state space X = +{1, . . . , N}. Let P be a transition matrix P(s, s′) = p(Sj+1 = s′ | Sj = s). Let ν be +an initial distribution over states at time point 0 and let Y = (Y0, . . . , Yn) be a sequence +of noisy observations with likelihoods gj(s) = p(Yj | Sjt = s). Given a set of observations +Y = (Y0, . . . , Yn), FFBS returns an independent posterior sample of the state vector. +Define aj(s) = p(Y0, . . . , Yj−1, Sj = s). From the Markov property, we have the fol- +lowing recursion: +aj+1(s′) = +� +s +aj(s)gj(s)P(s, s′). +We calculate this for all possible states s′ ∈ X performing a forward pass. At the end of +the forward pass we obtain the distribution +bn(s) = gn(s)an(s) = p(Y, Sn = s) ∝ p(Sn = s | Y ) +and sample Sn from it. Next, note that +p(Sj = s | Sj+1 = s′, Y ) ∝ p(Sj = s, Sj+1 = s′, Y ) = += aj(s)gj(s)P(s, s′)p(Yj+1, . . . , Yn | Sj+1 = s′) ∝ +∝ aj(s)gj(s)P(s, s′), +where the second equality follows from the Markov property. This is also an easy distribu- +tion to sample from, and the backward pass of FFBS successively samples new elements +of Markov chain from Sn−1 to S0. The pseudocode for the algorithm is given below. +117 + +Algorithm 1: The forward-filtering backward-sampling algorithm +Input: An initial distribution over states ν, a transition matrix P, a sequence of +noisy observations Y = (Y0, . . . , Yn) with likelihoods +gj(s) = p(Yj | Sjt = s). +Output: A realization of the Markov chain (S0, . . . , Sn) +Initialize a0(s) = ν(s). ; +for j = 0 to n − 1 +aj+1(s′) = � +s +aj(s)gj(s)P(s, s′) +for s′ ∈ X.; +Sample Sn ∼ bn(·), where bn = gn(s)an(s). +for j = n − 1 to 0 +Define bj(s) = aj(s)gj(s)P(s, Sj+1);; +Sample Sj ∼ bj(·).; +return (S0, . . . , Sn) +118 + +Chapter 7 +Conclusions and discussion +In this thesis we explored two types of probabilistic graphical models (PGM): Bayesian +networks (BN) and continuous time Bayesian networks (CTBN). First, we explained +the concept of PGMs and the motivation to study them with a few examples of suc- +cessful applications. Then, we discussed more thoroughly PGMs of interest describing +the problems within both frameworks and provided necessary preliminaries. In terms of +contributions we were focused on structure learning, which is one of the most challenging +tasks in the process of exploring PGMs and is interesting in itself. We also discussed other +types of problems and reviewed some previously known results concerning these problems +to provide some context. +The problem of structure learning for BNs is difficult due to the superexponential +growth of the space of directed acyclic graphs (DAG) with the number of variables and +also because the underlying graph needs to be acyclic. We solve this problem by dividing +it into two tasks. First, we use a known method called partition MCMC to slice the set of +variables into layers where any variable in any layer can have parents only from the pre- +vious layers and has at least one parent from the previous adjacent layer. Second, we find +the arrows using the knowledge about the layers. In the case of continuous data we use +the assumption that our network is a Gaussian Bayesian network and hence each variable +is a linear combination of its parents. Thus, we solve the problem of finding arrows by +finding the non-zero coefficients in the linear combination of all the variables from previ- +ous layers using Thresholded LASSO estimator. In the case of discrete and binary data +we use the assumption that probability of each variable being equal to 1 is a sigmoid func- +tion of a linear combination of its parents. Hence, again we solve the problem of finding +arrows by finding the non-zero coefficients in the linear combination of all the variables +from previous layers using Thresholded LASSO estimator for logistic regression. Finally, +for the discrete data where each variable has a finite state space we use a softmax func- +tion instead of the sigmoid function. We demonstrated theoretical consistency of LASSO +and Thresholded LASSO estimators for the continuous model and showed their effec- +tiveness on the benchmark Bayesian networks of different sizes and structure comparing +the proposed method to several existing methods for structure learning. +The problem of structure learning for CTBNs in the case of complete data is also +119 + +reduced to solving the optimizational problem for the penalized with ℓ1-penalty maxi- +mum likelihood function. We assumed that a conditional intensity of a variable is a linear +function of the states of its parents, which can be easily extended to a polynomial depen- +dence. Starting from the full graph we remove irrelevant edges and estimate parameters +for existing ones simultaneously in case of LASSO estimator. In case of thresholded ver- +sion of this LASSO estimator we only learn the structure. We proved the consistency of +the proposed estimators and demonstrated coherence of theoretical results with numerical +results from simulated data. +The last problem considered in the thesis was structure learning for CTBNs in the case +of incomplete data. The optimizational problem takes the same form as for complete +data but we cannot write the likelihood function explicitly anymore. Instead of the nega- +tive log-likelihood function we used its Markov chain Monte Carlo approximation, where +Markov chain was generated using Rao and Teh’s algorithm. The optimizational problem +itself was solved by projected stochastic proximal gradient descent algorithm. We proved +the convergence of this algorithm to the set of stationary points of the minimized func- +tion. We used the same assumption on conditional intensities as in the case of complete +data. In practice to discover the arrows we used the thresholded version of the obtained +estimator. We showed on a small simulated example that the quality of the proposed +method is similar to the case of complete data and increases with the number of observed +points per interval. +As for the future research we want to obtain similar theoretical results for Bayesian +networks in the case of discrete data as we have obtained for the continuous data. In future +we intend to perform more experiments and comparisons with existing approaches for all +proposed methods. For some methods there are no open implementations or there are +implementations in different programming languages, which makes it difficult to perform +the comparison. The main goal was to show theoretical value of the proposed methods +and show that the results of experiments are consistent with theory, which in our opinion +was achieved. +120 + +Bibliography +Andersen, P. K. and Gill, R. D. (1982). Cox’s regression model for counting processes: A +large sample study. Ann. Statist., 10:1100–1120. +Baraniuk, R., Duarte, M., and Hegde, C. (2011). Introduction to compressive sensing. +Connexions e-textbook. +Bass, R. F. (2011). Stochastic Processes. Cambridge Series in Statistical and Probabilistic +Mathematics. Cambridge University Press. +Bathla Taneja, S., Douglas, G., Cooper, G., Michaels, M., Druzdzel, M., and Visweswaran, +S. (2021). +Bayesian network models with decision tree analysis for management of +childhood malaria in Malawi. BMC Medical Informatics and Decision Making, 21. +Beck, A. and Teboulle, M. (2009). A fast iterative shrinkage-thresholding algorithm for +linear inverse problems. SIAM Journal on Imaging Sciences, 2:183–202. +Bickel, P. J., Ritov, Y., and Tsybakov, A. B. (2009). Simultaneous analysis of Lasso and +Dantzig selector. The Annals of Statistics, 37:1705–1732. +Boudali, H. and Dugan, J. B. (2006). A continuous-time Bayesian network reliability +modeling, and analysis framework. IEEE transactions on reliability, 55(1):86–97. +Bühlmann, P. and van de Geer, S. (2011). Statistics for high-dimensional data: methods, +theory and applications. Springer Series in Statistics, New York: Springer. +Casella, G. and George, E. I. (1992). +Explaining the Gibbs sampler. +The American +Statistician, 46(3):167–174. +Chen, X. and Xuan, J. (2020). Bayesian inference of gene regulatory network. In Tang, +N., editor, Bayesian Inference on Complicated Data, chapter 5. IntechOpen, Rijeka. +Chung, K. and Walsh, J. (2005). Markov processes, Brownian motion, and time symmetry. +2nd ed. Springer New York, NY. +Colombo, D. and Maathuis, M. H. (2014). Order-independent constraint-based causal +structure learning. J. Mach. Learn. Res., 15(1):3741–3782. +Cooper, G. F. (1990). +The computational complexity of probabilistic inference using +Bayesian belief networks. Artificial Intelligence, 42(2):393–405. +121 + +Daly, R., Shen, Q., and Aitken, S. (2011). Learning Bayesian networks: approaches and +issues. The Knowledge Engineering Review, 26(2):99–157. +Dempster, A. P., Laird, N. M., and Rubin, D. B. (1977). +Maximum likelihood from +incomplete data via the EM algorithm. Journal of the Royal Statistical Society: Series +B (Methodological), 39(1):1–22. +Douc, R., Moulines, E., and Stoffer, D. (2014). Nonlinear time series. Theory, methods +and applications with R examples. CRC Press. +Eaton, D. and Murphy, K. (2007). Bayesian structure learning using dynamic program- +ming and MCMC. UAI. +Fan, Y. and Shelton, C. R. (2012). Learning continuous-time social network dynamics. +arXiv:1205.2648. +Fort, G., Moulines, E., and Priouret, P. (2011). Convergence of adaptive and interacting +Markov chain Monte Carlo algorithms. Ann. Statist., 39:3262–3289. +Frey, B. and Jojic, N. (2005). A comparison of algorithms for inference and learning in +probabilistic graphical models. IEEE Transactions on Pattern Analysis and Machine +Intelligence, 27(9):1392–1416. +Friedman, N. and Koller, D. (2001). Being Bayesian about network structure: A bayesian +approach to structure discovery in bayesian networks. Mach Learn, 50. +Gasse, M., Aussem, A., and Elghazel, H. (2014). A hybrid algorithm for Bayesian net- +work structure learning with application to multi-label learning. Expert Syst. Appl., +41(15):6755–6772. +Gatti, E., Luciani, D., and Stella, F. (2012). A continuous time Bayesian network model +for cardiogenic heart failure. Flexible Services and Manufacturing Journal, 24(4):496– +515. +Gelman, A. and Shirley, K. (2012). Inference from simulations and monitoring conver- +gence. Handbook of Markov Chain Monte Carlo. +Geman, S. and Geman, D. (1984). Stochastic relaxation, Gibbs distributions, and the +Bayesian restoration of images. IEEE Transactions on Pattern Analysis and Machine +Intelligence, PAMI-6(6):721–741. +Geyer, C. (2011). Introduction to Markov Chain Monte Carlo, pages 3–48. CRC Press. +Giudici, P. and Castelo, R. (2003). Improving Markov Chain Monte Carlo model search +for data mining. Machine Learning, 50:127–158. +122 + +Grzegorczyk, M. and Husmeier, D. (2008). Improving the structure MCMC sampler for +Bayesian networks by introducing a new edge reversal move. Machine Learning, 71(2- +3):265–305. +Gupta, A., Slater, J., Boyne, D., Mitsakakis, N., Beliveau, A., Druzdzel, M., Brenner, D., +Hussain, S., and Arora, P. (2019). Probabilistic graphical modeling for estimating risk +of coronary artery disease: Applications of a flexible machine-learning method. Medical +Decision Making, 39:1032–1044. +Hastie, T., Tibshirani, R., and Wainwright, M. (2015). Statistical Learning with Sparsity: +The Lasso and Generalizations. Chapman & Hall/CRC. +Hastings, W. K. (1970). Monte Carlo sampling methods using Markov chains and their +applications. Biometrika, 57(1):97–109. +Heckerman, D. (2021). A tutorial on learning with Bayesian networks. arXiv 2002.00269. +Huang, J., Sun, T., Ying, Z., Yu, Y., and Zhang, C.-H. (2013). Oracle inequalities for the +lasso in the Cox model. Annals of statistics, 41(3):1142–1165. +Huang, J. and Zhang, C.-H. (2012). +Estimation and selection via absolute penalized +convex minimization and its multistage adaptive applications. +Journal of Machine +Learning Research, 13:1839–1864. +Jacod, J. and Shiryaev, A. N. (2003). Limit Theorems for Stochastic Processes. Springer +Berlin Heidelberg. +Jorge, P., Abrantes, A., Lemos, J., and Marques, J. (2007). +Long term tracking of +pedestrians with groups and occlusions. Bayesian Network Technologies: Applications +and Graphical Models, pages 151–175. +Jorge, P., Abrantes, A., and Marques, J. (2004). On-line object tracking with Bayesian +Networks. https://www.researchgate.net/publication/251372022. +Koivisto, M. and Sood, K. (2004). Exact Bayesian structure discovery in Bayesian net- +works. J. Mach. Learn. Res., 5:549–573. +Koller, D. and Friedman, N. (2009). Probabilistic Graphical Models: Principles and Tech- +niques. Adaptive computation and machine learning. MIT Press. +Komodakis, N., Paragios, N., and Tziritas, G. (2007). MRF optimization via dual decom- +position: Message-passing revisited. In 2007 IEEE 11th International Conference on +Computer Vision, pages 1–8. +Kuipers, J. and Moffa, G. (2017). Partition MCMC for inference on acyclic digraphs. +Journal of the American Statistical Association, 112(517):282–299. +123 + +Lauritzen, S. L. and Spiegelhalter, D. J. (1988). Local computations with probabilities +on graphical structures and their application to expert systems. Journal of the Royal +Statistical Society. Series B (Methodological), 50(2):157–224. +Lezaud, P. (1998). Chernoff-type bound for finite Markov chains. The Annals of Applied +Probability, 8(3):849–867. +MacKay, D. J. C. (2003). Information Theory, Inference, and Learning Algorithms. Copy- +right Cambridge University Press. +Madigan, D., York, J., and Allard, D. (1995). Bayesian graphical models for discrete data. +International Statistical Review / Revue Internationale de Statistique, 63(2):215–232. +Majewski, S., Miasojedow, B., and Moulines, E. (2018). Analysis of nonsmooth stochastic +approximation: the differential inclusion approach. arXiv: 1805.01916v1. +Meng, X.-L. and Rubin, D. B. (1991). Using EM to obtain asymptotic variance-covariance +matrices: +The SEM algorithm. +Journal of the American Statistical Association, +86(416):899–909. +Miasojedow, B. and Niemiro, W. (2017). Geometric ergodicity of Rao and Teh’s algorithm +for Markov jump processes and CTBNs. Electronic Journal of Statistics, 11(2):4629– +4648. +Miasojedow, B. and Rejchel, W. (2018). Sparse estimation in Ising model via penalized +Monte Carlo methods. Journal of Machine Learning Research, 19(75):1–26. +Minka, T. P. (2001). Expectation propagation for approximate Bayesian inference. In +Proceedings of the 17th Conference in Uncertainty in Artificial Intelligence, UAI ’01, +pages 362–369, San Francisco, CA, USA. Morgan Kaufmann Publishers Inc. +Negahban, S., Yu, B., Wainwright, M. J., and Ravikumar, P. K. (2009). A unified frame- +work for high-dimensional analysis of M-estimators with decomposable regularizers. In +Advances in Neural Information Processing Systems, pages 1348–1356. +Nodelman, U. (2007). Continuous Time Bayesian Networks. PhD thesis, Department of +Computer Science, Stanford University. +Nodelman, U. and Horvitz, E. (2004). +Continuous time Bayesian networks for in- +ferring users’ presence and activities with extensions for modeling and evaluation. +https://www.researchgate.net/publication/228686433. +Nodelman, U., Koller, D., and Shelton, C. (2005). Expectation propagation for continuous +time Bayesian networks. In Proceedings of the Twenty-first Conference on Uncertainty +in AI (UAI), pages 431–440, Edinburgh, Scottland, UK. +124 + +Nodelman, U., Shelton, C., and Koller, D. (2002). Continuous time Bayesian networks. +In Proceedings of the Eighteenth Conference on Uncertainty in Artificial Intelligence +(UAI), pages 378–387. +Nodelman, U., Shelton, C. R., and Koller, D. (2012). Expectation Maximization and com- +plex duration distributions for continuous time Bayesian networks. arXiv 1207.1402. +Opgen-Rhein, R. and Strimmer, K. (2007). From correlation to causation networks: a +simple approximate learning algorithm and its application to high-dimensional plant +gene expression data. BMC Systems Biology, 1(37). +Pearl, J. (1985). Bayesian networks: A model of self-activated memory for evidential +reasoning. In Proc. of Cognitive Science Society (CSS-7). +Pearl, J. (2000). +Causality: Models, Reasoning and Inference. +Cambridge University +Press. +Pokarowski, P. and Mielniczuk, J. (2015). Combined l1 and greedy l0 penalized least +squares for linear model selection. J. Mach. Learn. Res., 16:961–992. +Protter, P. E. (2005). Stochastic Integration and Differential Equations. Springer Berlin +Heidelberg. +Rabiner, L. and Juang, B. (1986). An introduction to hidden Markov models. IEEE +ASSP Magazine, 3(1):4–16. +Raftery, A. E. (1995). Bayesian model selection in social research. Sociological Methodol- +ogy, 25:111–163. +Rao, V. and Teh, Y. W. (2013). Fast MCMC sampling for Markov jump processes and +extensions. Journal of Machine Learning Research, 14:3207–3232. +Roberts, G. O. and Rosenthal, J. S. (2004). +General state space Markov chains and +MCMC algorithms. Probability Surveys, 1:20–71. +Rockafellar, R. T. (1970). Convex analysis. Princeton Mathematical Series. Princeton +University Press, Princeton, N. J. +Russell, S. and Norvig, P. (2010). Artificial Intelligence: A Modern Approach. Prentice +Hall, 3 edition. +Sachs, K., Perez, O., Pe’er, D., Lauffenburger, D. A., and Nolan, G. P. (2005). Causal +protein-signaling networks derived from multiparameter single-cell data. +Science, +308(5721):523–529. +Schäfer, J. and Strimmer, K. (2005). +A shrinkage approach to large-scale covariance +matrix estimation and implications for functional genomics. Statistical Applications in +Genetics and Molecular Biology, 4(1). +125 + +Scutari, M. (2010). Learning Bayesian networks with the bnlearn R package. Journal of +Statistical Software, 35(3):1–22. +Scutari, M., Graafland, C. E., and Gutiérrez, J. M. (2018). +Who learns better +Bayesian network structures: Accuracy and speed of structure learning algorithms. +arXiv:1205.2648. +Sontag, D. and Jaakkola, T. (2007). New outer bounds on the marginal polytope. In +Platt, J., Koller, D., Singer, Y., and Roweis, S., editors, Advances in Neural Information +Processing Systems, volume 20. Curran Associates, Inc. +Spiegelhalter, D. J. and Lauritzen, S. L. (1990). Sequential updating of conditional prob- +abilities on directed graphical structures. Networks, 20(5):579–605. +Spirtes, P. and Glymour, C. (1991). An algorithm for fast recovery of sparse causal graphs. +Social Science Computer Review - SOC SCI COMPUT REV, 9:62–72. +Spirtes, P., Glymour, C., and Scheines, R. (2000). Causation, Prediction, and Search, +2nd Edition. Springer New York, NY. +Stella, F., Acerbi, E., Vigano, E., Poidinger, M., Mortellaro, A., and Zelante, T. (2016). +Continuous time Bayesian networks identify Prdm1 as a negative regulator of TH17 +cell differentiation in humans. Scientific Reports, 6. +Stella, F., Acerbi, E., Zelante, T., and Narang, V. (2014). Gene network inference using +continuous time Bayesian networks: A comparative study and application to Th17 cell +differentiation. BMC Bioinformatics, 15. +Stella, F. and Amer, Y. (2012). Continuous time Bayesian network classifiers. Journal of +Biomedical Informatics, 45(6):1108–1119. +Thiesson, B. (1995). Accelerated quantification of Bayesian networks with incomplete +data. In Proceedings of the First International Conference on Knowledge Discovery +and Data Mining, KDD’95, pages 306–311. AAAI Press. +Tibshirani, R. (1996). Regression shrinkage and selection via the LASSO. Journal of the +Royal Statistical Society: Series B (Methodological), 58(1):267–288. +Tsamardinos, I., Brown, L. E., and Aliferis, C. F. (2006). The max-min hill-climbing +Bayesian network structure learning algorithm. Mach. Learn., 65(1):31–78. +van de Geer, S. (2008). High-dimensional generalized linear models and the LASSO. The +Annals of Statistics, 36:614–645. +van de Geer, S. (2016). Estimation and Testing Under Sparsity: Cole d’t de Probabilits +de Saint-Flour XLV - 2015. Springer Publishing Company, Incorporated, 1st edition. +126 + +Villa, S. and Stella, F. (2018). +Learning continuous time Bayesian networks in non- +stationary domains (extended abstract). In Proceedings of the 27th International Joint +Conference on Artificial Intelligence, IJCAI’18, pages 5656–5660. AAAI Press. +Wainwright, M. J., Jaakkola, T., and Willsky, A. S. (2005). MAP estimation via agreement +on trees: message-passing and linear programming. IEEE Transactions on Information +Theory, 51:3697–3717. +Wasyluk, H., Onisko, A., and Druzdzel, M. (2001). Support of diagnosis of liver disorders +based on a causal Bayesian network model. Medical science monitor : international +medical journal of experimental and clinical research, 7 Suppl 1:327–32. +Xu, J. and Shelton, C. R. (2008). +Continuous time Bayesian networks for host level +network intrusion detection. In Joint European Conference on Machine Learning and +Knowledge Discovery in Databases, pages 613–627. Springer. +Xue, L., Zou, H., and Cai, T. (2012). Nonconcave penalized composite conditional likeli- +hood estimation of sparse Ising models. The Annals of Statistics, 40:1403–1429. +Yang, S., Khot, T., Kersting, K., and Natarajan, S. (2016). Learning continuous time +Bayesian networks in relational domains: A non-parametric approach. In AAAI. +Ye, F. and Zhang, C.-H. (2010). Rate Minimaxity of the Lasso and Dantzig Selector for +the lq loss in lr Balls. Journal of Machine Learning Research, 11:3519–3540. +Yedidia, J., Freeman, W., and Weiss, Y. (2001). Generalized belief propagation. Advances +in Neural Information Processing Systems 13, 13. +127 + diff --git a/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/load_file.txt b/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..cfd216e12f18cda9a02bee731816eb583fe735e3 --- /dev/null +++ b/A9FQT4oBgHgl3EQfMzZX/content/tmp_files/load_file.txt @@ -0,0 +1,5157 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf,len=5156 +page_content='Maria Curie-Skłodowska University in Lublin Faculty of Mathematics, Physics and Computer Science Maryia Shpak Structure Learning and Parameter Estimation for Graphical Models via Penalized Maximum Likelihood Methods PhD dissertation Supervisor dr hab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Mariusz Bieniek, prof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' UMCS Institute of Mathematics University of Maria Curie-Sklodowska April 2022 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='13269v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='ML] 30 Jan 2023 Abstract Probabilistic graphical models (PGMs) provide a compact and flexible framework to model very complex real-life phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They combine the probability theory which deals with uncertainty and logical structure represented by a graph which allows to cope with the computational complexity and also interprete and communicate the obtained know- ledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the thesis we consider two different types of PGMs: Bayesian networks (BNs) which are static, and continuous time Bayesian networks which, as the name suggests, have temporal component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We are interested in recovering their true structure, which is the first step in learning any PGM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is a challenging task, which is interesting in itself from the causal point of view, for the purposes of interpretation of the model and the decision making process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' All approaches for structure learning in the thesis are united by the same idea of maximum likelihood estimation with LASSO penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The problem of structure learning is reduced to the problem of finding non-zero coefficients in the LASSO estimator for a generalized linear model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In case of CTBNs we consider the problem both for complete and incomplete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We support the theoretical results with experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Keywords and phrases: Probabilistic graphical models, PGM, Bayesian networks, BN, continuous time Bayesian networks, CTBN, maximum like- lihood, LASSO penalty, structure learning, Markov Jump Process, MJP, Markov chain, Markov chain Monte Carlo, MCMC, Stochastic Proximal Gra- dient Descent, drift condition, incomplete data, Expectation-Maximization, EM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' ii Acknowledgements Throughout the process of writing this thesis I have received a lot of support and assistance and I wish to express my gratitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First, I would like to thank my supervisor, Professor Mariusz Bieniek, who was a great support during this challenging process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' His curiosity, open-mindedness and extensive knowledge gave me a chance to research things that are outside of his main field of expertise, and his strive for quality and perfection never let me settle for mediocre results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Next, I want to thank my second advisor Professor Błażej Miasojedow from University of Warsaw, who introduced us to the field of probabilistic graphical models and some other areas of statistics, stochastic processes and numerical approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' His great expertise and enormous patience allowed me to gain massive knowledge and understanding of these fields, when sometimes I did not believe I could.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' I also would like to thank dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Wojciech Rejchel from Nicolaus Copernicus University in Toruń , whose expertise in model selection was key in the analysis of theoretical properties of our novel methods for structure learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' I wish to thank mgr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Grzegorz Preisbich and mgr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Tomasz Cąkała for making many numerical results for our methods possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' I want to thank my university, Maria Skłodowska-Curie University, for an academic leave giving me the opportunity to finish the dissertation and some additional funding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The part of the research was also supported by the Polish National Science Center grant: NCN contract with the number UMO-2018/31/B/ST1/00253.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also I would like to show my appreciation to other people from my university helping me in various ways, among them are Professor Maria Nowak, Professor Jarosław Bylina, Professor Tadeusz Kuczu- mow, Professor Jurij Kozicki and many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, I would like to thank my parents, Pavel and Natallia, who were always there for me to guide me and help me through years of research, without their support this thesis would not be possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' I also wish to extend my special thanks to my dear friends for their emotional support and helping me to stay disciplined, especially I thank Elvira Tretiakova and Olga Kostina.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' iii Contents Abstract ii Acknowledgements iii 1 Introduction 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Motivation .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Probabilistic Graphical Models .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Overview of the thesis and its contributions .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 4 2 Preliminaries 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Notation .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Bayesian networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Continuous Time Markov Processes .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 12 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Conditional Markov Processes .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Continuous time Bayesian networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 15 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6 The LASSO penalty .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 19 3 Statistical inference for networks with known structure 21 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Learning probabilities in BNs .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 21 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Inference in Bayesian networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 25 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Learning probabilities in BNs for incomplete data .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 47 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Learning parameters for CTBNs .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 49 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Inference for CTBNs .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 55 4 Structure learning for Bayesian networks 59 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Problem of learning structure of Bayesian Networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 59 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Partition MCMC method .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 61 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 The novel approach to structure learning .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 62 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Discrete case .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 71 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Numerical results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 73 5 Structure learning for CTBNs for complete data 77 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Notation and preliminaries .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 77 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Main results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 81 iv 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Proofs of the main results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 83 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Numerical examples .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 93 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Extension of the results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 96 6 Structure learning for CTBNs for incomplete data 98 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Introduction and notation .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 98 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Sampling the Markov chain with Rao and Teh’s algorithm .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 100 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Structure learning via penalized maximum likelihood function .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 102 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Numerical results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 114 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 FFBS Algorithm .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 117 7 Conclusions and discussion 119 v Chapter 1 Introduction 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Motivation It is a common knowledge that we live in the world where data plays crucial role in many areas and applications of great importance for our society and the importance of data is still growing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The amount of data in the world is now estimated in dozens of zettabytes, and by 2025 the amount of data generated daily is expected to reach hundreds of exa- bytes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There is a demand for models and algorithms that can deal with these amounts of data effectively finding useful patterns and providing better insights into the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' On top of it, most environments require reasoning under uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Probabilistic graphical models (PGMs) provide such a framework that allows to deal with these and many other challenges in various situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The models combine the probability theory which deals with uncertainty in a mathematically consistent way, and logical structure which is repre- sented by a graph encoding certain independence relationships among variables allowing to cope with the computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' PGMs encode joint distributions over a set of random variables (often of a significant amount) combining the graph theory and probabilities, which allows to represent many complex real-world phenomena compactly and overcome the complexity of the model which is exponential in the number of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are also some other advantages that these models have.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Namely, because of their clear structure, PGMs enable us to visualize, interprete and also communicate the gained knowledge to others as well as make decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Some models, for example Bayesian networks, have directed graphs in their core and offer ways to establish causality in various cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, graphical models allow us not only to fit the observed data but also elegantly incorporate prior knowledge, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' from experts in the domain, into the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Besides, certain models take into account a temporal component and consider systems’ dynamics in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Graphical models are successfully applied to a large number of domains such as image processing and object recognition, medical diagnosis, manufacturing, finance, statistical physics, speech recognition, natural language processing and many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let us briefly present here a few examples of various applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Bayesian networks, one of the PGMs considered in this thesis, are extensively used in 1 the development of medical decision support systems helping doctors to diagnose patients more accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the work by Wasyluk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2001) the authors built and described a probabilistic causal model for diagnosis of liver disorders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the domain of hepatology, inexperienced clinicians have been found to make a correct diagnosis in jaundiced patients in less than 45% of the cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, the number of cases of liver disorders is on the rise and, especially at early stages of a disease, the correct diagnosis is difficult yet critical, because in many cases damage to the liver caused by an untreated disorder may be irreversible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we already mentioned and as it is stressed out in the work above, a huge advantage that these models have is that they allow to combine existing frequency data with expert judgement within the framework as well as update themselves when the new data are obtained, for example patients data within a hospital or a clinic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' What is also important in the medical diagnosis is that PGMs, Bayesian networks in particular, efficiently model simultaneous presence of multiple disorders, which happens quite often, but in many classification approaches the disorders are considered to be mutually exclusive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The overall model accuracy, as the authors Wasyluk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2001) claim, seems to be better than that of beginning diagnosticians and reaches almost 80%, which can be used for the diagnosis itself as well as the way to help new doctors to learn the strategy and optimization of the diagnosis process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A few other examples of the PGMs application in medical field are management of childhood malaria in Malawi (Bathla Taneja et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2021)), estimating risk of coronary artery disease (Gupta et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2019)), etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The next popular area of graphical models application is computational biology, for example Gene Regulatory Network (GRN) inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' GRN consists of genes or parts of genes, regulatory proteins and interactions between them and plays a key role in medi- ating cellular functions and signalling pathways in cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Accurate inference of GRN for a specific disease returns disease-associated regulatory proteins and genes, serving as po- tential targets for drug treatment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Chen and Xuan (2020) argued that Bayesian inference is particularly suitable for GRNs as it is very flexible for large-scale data integration, because the main challenge of GRNs is that there exist hundreds of proteins and tens of thousands of genes with one protein possibly regulating hundreds of genes and their regulatory relationship may vary across different cell types, tissues, or diseases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More- over, the estimation is more robust and easier to compare on multiple datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Chen and Xuan (2020) demonstrated this by applying their model to breast cancer data and identified genes relevant to breast cancer recurrence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As another example in this area, Sachs et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005) used Bayesian network computational methods for derivation of causal influences in cellular signalling networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These methods automatically elucidated most of the traditionally reported signalling relationships and predicted novel interpathway network causalities, which were verified experimentally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Reconstruction of such networks might be applied to understanding native-state tissue signalling biology, complex drug actions, and dysfunctional signalling in diseased cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The use of probability models is extensive also in computer vision applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In their work Frey and Jojic (2005) advocate for the use of PGMs in the computer vision problems 2 requiring decomposing the data into interacting components, for example, methods for automatic scene analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They apply different techniques in a vision model of multiple, occluding objects and compare their performances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Occlusion is a very important effect and one of the biggest challenges in computer vision that needs to be taken into account, and PGMs are considered to be a good tool to handle that effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' PGMs are also used for tracking different moving objects in video sequences, for example long-term tracking of groups of pedestrians on the street (Jorge et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2007)), where the main difficulties concern total occlusions of the objects to be tracked, as well as group merging and split- ting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another example is on-line object tracking (Jorge et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2004)) useful in real time applications such as video surveillance, where authors overcame the problem of needing to analyze the whole sequence before labelling trajectories to be able to use the tracker on-line and also the problem of unboundedly growing complexity of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Probabilistic Graphical Models In the previous subsection we described the advantages of PGMs and why one might be interested in studying them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this work we focus on two types of PGMs: Bayesian Networks (BN) and Continuous Time Bayesian Networks (CTBN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first term has rather long history and tracks back to 1980s (Pearl (1985)) whereas the second term is relatively modern (Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2002)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The underlying structure for both models is a directed graph, which can be treated either as a representation of a certain set of independencies or as a skeleton for factorizing a distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In some cases the directions of arrows in the graph can suggest causality under certain conditions and allow not only the inference from the data but also intervene into the model and manipulate desired parameters in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' BNs are static models, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' they do not consider a temporal component, while in CTBNs as the name suggests we study models in the context of continuous time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The framework of CTBNs is based on homogeneous Markov processes, but utilizes ideas from Bayesian networks to provide a graphical representation language for these systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A broad and comprehensive tutorial on existing research for learning Bayesian net- works and some adjacent models can be found in Daly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The subject of causality is extensively explored in Spirtes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2000) and Pearl (2000), some references are also given in Daly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Several examples of the use of BNs were presented above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In contrast to regular Bayesian networks, CTBNs have not been studied that well yet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The most extensive work concerning CTBNs is PhD thesis of Nodelman (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Some related works include for example learning CTBNs in non-stationary domains (Villa and Stella (2018)), in relational domains (Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2016)) and continuous time Bayesian network classifiers (Stella and Amer (2012)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As an example, CTBNs have been suc- cessfully used to model the presence of people at their computers together with their availability (Nodelman and Horvitz (2004)), for dynamical systems reliability modeling 3 and analysis (Boudali and Dugan (2006)), for network intrusion detection (Xu and Shel- ton (2008)), to model social networks (Fan and Shelton (2012)), to model cardiogenic heart failure (Gatti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2012)), and for gene network inference (Stella et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2014) or Stella et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2016)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Overview of the thesis and its contributions There are several problems within both the BN and CTBN frameworks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Both of them have graph structures which need to be discovered and this is considered to be one of the main challenges in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This thesis is dedicated exclusively to solving this problem in both frameworks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another problem is to learn the parameters of the model: in the case of BNs it is a set of conditional probability distributions and in the case of CTBNs it is a set of conditional intensity matrices (for details see Chapter 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The last problem is the statistical inference based on the obtained network (details are in Chapter 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The thesis is constructed as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Chapter 2 we provide all the necessary pre- liminaries for better understanding the frameworks of Bayesian networks and continuous time Bayesian networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Next, in Chapter 3 we overview known results on learning net- works’ parameters as well as inference to fully cover the concept of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Chapter 4 is dedicated to the structure learning problem for BNs, where we provide novel algorithms for both discrete and continuous data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Chapters 5 and Chapter 6 cover the problems of structure learning for CTBNs in cases of complete and incomplete data, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, Chapter 7 concludes the thesis with the summary and the discussion of obtained results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Algorithms in both Chapters 4 and 5 lean on feature selection in generalized linear models with the use of LASSO (Least Absolute Shrinkage and Selection Operator) penalty function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It relies on the idea of penalizing the parameters of the model, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' adding or subtracting the sum of absolute values of the parameters of the model with some hyperparameter, in order to better fit the model and perform a variable selection by forcing some parameters to be equal to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The term first appeared in Tibshirani (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More on the topic of LASSO can be found for example in Hastie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6 we provide a short description of the concept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The main contributions of the thesis are collected in Chapters 4, 5 and 6 and they are as follows: we provide the novel algorithm for learning the structure of BNs based on penalized maximum likelihood function both for discrete and continuous data;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' we present and prove the consistency results for the algorithm in case of continuous data;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' we compare the effectiveness of our method with other most popular methods for structure learning applied to benchmark networks of continuous data of different sizes;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 4 we provide the novel algorithm for learning the structure of CTBNs based on pena- lized maximum likelihood function for complete data and present two theoretical consistency results with proofs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' we provide the novel algorithm for learning the structure of CTBNs based on pe- nalized maximum likelihood function for incomplete data where the log-likelihood function is replaced by its Markov Chain Monte Carlo (MCMC) approximation due to inability to express it explicitly;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' we present and prove the convergence of the proposed MCMC scheme and the consistency of the learning algorithm;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' for the mentioned above MCMC approximation we designed the algorithm to pro- duce necessary samples;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' in both cases of complete and incomplete data we provide results of the simulations to show the effectiveness of proposed algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Part of the content (Chapter 5) in its early stages has been published on arXiv: Shpak, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Miasojedow, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', and Rejchel, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Structure learning for CTBNs via pe- nalized maximum likelihood methods, arXiv e-prints, 2020, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='48550/ arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='07648.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 5 Chapter 2 Preliminaries In this chapter we provide theoretical background on Bayesian networks (BNs), Markov processes, conditional Markov processes and continuous time Bayesian networks (CTBNs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start with the notation common for BNs and CTBNs which we will use through the whole thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we provide a few basic definitions needed to define and under- stand the concepts of BNs and CTBNs with their interpretation and examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Most of the contents of this chapter comes from the Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2002), Nodelman (2007), Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Notation First, by upper case letters, for example, Xi, B, Y , we denote random variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the case of CTBNs upper case letters represent the whole collection of random variables indexed by continuous time, hence in this case Xi(t), Y (t) are random variables for par- ticular time points t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Values of variables are denoted by lower case letters, sometimes indexed by numbers or otherwise representing different values of the same random variable - e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' xi, s, s′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The set of possible values for a variable X is denoted by Val(X) and by |X| we will denote the number of its elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Sets of variables are denoted by bold-face upper case letters - e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' X - and correspon- ding sets of values are denoted by bold-face lower case letters - e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' x or x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The set of possible values and its size is denoted by Val(X) and |X|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A pair G = (V, E) denotes a directed graph, where V is the set of nodes and E is the set of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The notation u → w means that there exists an edge from the node u to the node w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will also call them arrows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The set V \\ {w} is denoted by −w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, we define the set of the parents of the node w in the graph G by paG(w) = {u ∈ V : u → w}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When there is no confusion, for convenience we sometimes write pa(w) instead of paG(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Other useful and relevant locally notation we provide in the corresponding sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Bayesian networks In this section we provide an overview of Bayesian networks (BNs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start with the in- tuition behind BNs followed by the representation of BNs together with its formal defini- tion and notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The problems of inference and learning for BNs are considered more thoroughly in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 and Chapter 4 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The goal is to represent a joint distribution p over some set of random variables X = {X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Even in the simplest case where these variables are binary-valued, the joint distribution requires the specification of 2n − 1 numbers - the probabilities of the 2n different assignments of the values {x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , xn}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The explicit representation of the joint distribution is hard to handle from every perspective except for small values of n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Computationally, it is very expensive to manipulate and generally too large to store in computer memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Cognitively, it is impossible to acquire so many numbers from a human expert;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' moreover, most of the numbers would be very small and would correspond to events that people cannot reasonably consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Statistically, if we want to learn the distribution from data, we would need ridiculously large amounts of data to estimate so many parameters robustly (Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Bayesian networks help us specify a high-dimensional joint distribution compactly by exploiting its independence properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The key notion behind the BN representation is conditional independence, which on the one hand allows to reduce amount of estimated parameters significantly and on the other hand, allows to avoid very strong and naive independence assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Two random variables X and Y are independent (denoted by X ⊥ Y ) if and only if the equality P(X ∈ A, Y ∈ B) = P(X ∈ A)P(Y ∈ B) holds for all Borel sets A, B ⊆ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For short, we will write it in the form P(X, Y ) = P(X)P(Y ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There is also another way to think of independence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If the random variables X and Y are independent, then P(X ∈ · | Y ) = P(X ∈ ·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Intuitively, this says that having evidence about Y does not change the distribution of our beliefs on the occurrence of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If we wish to model a more complex domain represented by some set of variables, it is unlikely that any of the variables will be independent of each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Conditional independence is a weaker notion of independence, but it is more common in real-life situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Two random variables X and Y are conditionally independent given a set of random variables C (symbolically X ⊥ Y | C) if and only if P(X ∈ A, Y ∈ B | C) = P(X ∈ A | C)P(Y ∈ B | C) (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1) holds for all Borel sets A, B ⊆ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 7 Obviously (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1) implies P(X ∈ A | C, Y ) = P(X ∈ A | C), which can be written shortly as P(X | C, Y ) = P(X | C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So intuitively, the influence that X and Y have on each other is mediated through the variables in the set C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It means that, when we have some evidence about variables from C, having any additional information about Y does not change our beliefs about X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let us demonstrate this definition on a simplified example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let X be a random variable representing the case if a person has lung cancer and Y representing the case if the same person has yellow teeth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These variables are not independent as having yellow teeth is one of the secondary symptoms of lung cancer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, when we know that the person is a smoker knowing that they have yellow teeth does not give us any additional insight on lung cancer, and vice versa, as we consider smoking to be the reason of both symptoms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is easier to believe that in a given domain most variables will not directly affect most other variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Instead, for each variable only a limited set of other variables influence it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is the intuition which leads to the notion of a Bayesian network B over a set of random variables B which is a compact representation of a specific joint probability distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The formal definition is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A Bayesian network B over a set of random variables B is formed by a directed acyclic graph (DAG) G whose nodes correspond to the random variables Bi ∈ B, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the set of conditional probability distributions (CPDs) for each Bi, specifying the conditional distribution P(Bi | paG(Bi)) of Bi as a function of its parent set in G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The CPDs form a set of local probability models that can be combined to describe the full joint distribution over the variables B via the chain rule: P(B1, B2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Bn) = n � i=1 P(Bi | paG(Bi)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2) The graph G of a Bayesian network encodes a set of conditional independence assump- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In particular, a variable B ∈ B is independent of its non-descendants given the set of its parents paG(B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See for example Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 of an Extended Student network taken from Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As it can be seen, each variable is connected only to a small amount of other variables in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this example according to (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2) the joint distribution takes the following form: P(C, D, I, G, S, L, J, H) = = P(C)P(D | C)P(I)P(G | D, I)P(S | I)P(L | G)P(J | L, S)P(H | G, J).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 8 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1: The Extended Student network This example will be considered in more detail further in the thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we discuss basic structures for BNs including some examples and give the inter- pretation of the structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' BNs represent probability distributions that can be formed via products of smaller, local conditional probability distributions (one for each variable).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If the joint distribution is expressed in this form, it means that the independence assump- tions for certain variables are introduced into our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To understand what types of independencies are described by directed graphs for simplicity let us start from looking at BN B with three nodes: X, Y, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case, B essentially has only three possible structures, each of which leads to different independence assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Common parent, also called common cause.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If G is of the form X ← Y → Z, and Y is observed, then X ⊥ Z | Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, if Y is unobserved, then X ̸⊥ Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Intuitively this stems from the fact that Y contains all the information that determines the outcomes of X and Z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' once it is observed, there is nothing else that affects these variables’ outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The case with smoking and lung cancer described above is such an example of common cause.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See the illustration (c) in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Cascade, or indirect connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If G is of the form X → Y → Z, and Y is observed, then, again X ⊥ Z | Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, if Y is unobserved, then X ̸⊥ Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Here, the intuition is again that Y holds all the information that determines the outcome of Z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' thus, it does not matter what value X takes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 in (a) and (b) there are shown cases of indirect causal and indirect evidential effects, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' V -structure or common effect, also known as explaining away.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If G is of the form X → Y ← Z, then knowing Y couples X and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In other words, X ⊥ Z if Y is unobserved, but X ̸⊥ Z | Y if Y is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See the case (d) in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The last case requires additional explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Suppose that Y is a Boolean variable 9 Coherence Difficulty Intelligence Grade SAT Letter Job HappyFigure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2: The four possible two-edge trails from X to Y via Z: (a) An indirect causal effect;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (b) An indirect evidential effect;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (c) A common cause;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (d) A common effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' that indicates whether our lawn is wet one morning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' X and Z are two explanations for it being wet: either it rained (indicated by X), or the sprinkler turned on (indicated by Z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If we know that the grass is wet (Y is true) and the sprinkler did not go on (Z is false), then the probability that X is true must be one, because that is the only other possible explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, X and Z are not independent given Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To generalize this for a case of more variables and demonstrate the power but also the limitations of Bayesian networks we will need the notions of d-separation and I-maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let Q, W, and O be three sets of nodes in a Bayesian network B represented by G, where the variables O are observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let us use the notation I(p) to denote the set of all independencies of the form (Q ⊥ W | O) that hold in a joint distribution p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To extend structures mentioned above to more general networks we can apply them recursively over any larger graph, which leads to the notion of d-separation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Recall that we say that there exists an undirected path in G between the nodes u and w if there exists the sequence v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , vn ∈ V such that vi → vi+1 or vi ← vi+1 for each i = 0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , n, where v0 = u and vn+1 = w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, an undirected path in G between Q ∈ Q and W ∈ W is called active given observed variables O if for every consecutive triple of variables X, Y, Z on the path, one of the following holds: common cause: X ← Y → Z and Y /∈ O (Y is unobserved);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' causal trail: X → Y → Z and Y /∈ O (Y is unobserved);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' evidential trail: X ← Y ← Z and Y /∈ O (Y is unobserved);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' common effect: X → Y ← Z and Y or any of its descendants are observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 10 X Z X Z Z Y X Y X Z Y (a) (b) (c) (d)Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3: An example for d-separation: X1 and X6 are d-separated given X2,X3 (left), X2, X3 are not d-separated given X1, X6 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, we say that Q and W are d-separated given O if there are no active paths bet- ween any node A ∈ Q and B ∈ W given O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See examples for d-separation in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the second example there is no d-separation because there is an active path which passes through the V -structure created when X6 is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The notion of d-separation lets us describe a large fraction of the dependencies that hold in our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It can be shown that if Q and W are d-separated given O, then Q ⊥ W | O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will write I(G) = {(Q ⊥ W | O) : Q, W are d-separated given O} to denote the set of independencies corresponding to all d-separations in G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If p factorizes over G, then I(G) ⊆ I(p) and p can be constructed easily.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case, we say that G is an I-map for p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In other words, all the independencies encoded in G are sound: variables that are d-separated in G are conditionally independent with respect to p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, the converse is not true: a distribution may factorize over G, yet have independencies that are not captured in G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So an interesting question here is whether for the probability distribution p we can always find a perfect map I(G) for which I(G) = I(p) or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The answer is no (see an example from Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another related question is whether perfect maps are unique when they exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is not the case either, for example, DAGs X → Y and X ← Y encode the same independencies, yet form different graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In a ge- neral case we say that two Bayesian networks B1, B2 are I-equivalent if their DAGs encode the same dependencies I(G1) = I(G2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For a case of three variables we can notice that graphs (a), (b) and (c) in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 encode the same dependencies, so as long as we do not turn graphs into V -structures ((d) is the only structure which encodes the de- pendency X ̸⊥ Y | Z) we can change directions in them and get I-equivalent graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This brings us to a fact that if G1, G2 have the same skeleton (meaning that if we drop the directionality of the arrows, we obtain the same undirected graph) and the same V -structures, then I(G1) = I(G2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the full proof of this statement, other previously made statements and more information about BNs see Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 11 X4 X2 X6 X1 X3 X5X4 X2 X6 X1 X3 X52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Continuous Time Markov Processes In this section we collect auxiliary results on Markov processes with continuous time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can think of a continuous time random process X as a collection of random variables indexed by time t ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is sometimes more convenient to view X across all values of t as a single variable, whose values are functions of time, also called paths or trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Markov condition is the assumption that the future of a process is in- dependent of its past given its present.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More explicitly, the process X satisfies the Markov property iff P(X(t+∆t) | X(s), 0 ≤ s ≤ t) = P(X(t+∆t) | X(t)) for all t, ∆t > 0 (Chung and Walsh (2005)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this thesis we focus on Markov processes with finite state space which are basically defined by initial distribution and a matrix of transition intensities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The framework of CTBNs is based on the notion of homogeneous Markov processes in which the transition intensities do not depend on time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let X be a stochastic process with continuous time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let the state space of X be V al(X) = {x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', xN}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then X is a homogeneous Markov process if and only if its behavior can be specified in terms of an initial distribution P X 0 over V al(X) and a Markovian transition model usually presented as an intensity matrix QX = � ����� −q1 q12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' q1N q21 −q2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' q2N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' qN1 qN2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' −qN � ����� , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3) where qi = � j̸=i qij and all the entries qi and qij are positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Intuitively, the intensity qi gives the “instantaneous probability” of leaving state xi and the intensity qij gives the “instantaneous probability” of the jump from xi to xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More formally, for i ̸= j lim ∆t→0 P(X(t + ∆t) = xj | X(t) = xi) = qij∆t + O(∆t2), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4) and for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , N lim ∆t→0 P(X(t + ∆t) = xi | X(t) = xi) = 1 − qi∆t + O(∆t2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5) Therefore, the matrix QX describes the instantaneous behavior of the process X and also makes the process satisfy the Markov assumption since it is defined solely in terms of its current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The instantaneous specification of the transition model of X induces a probability distribution over the set of its possible trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To see how the distribution is induced, we must first recall the notion of a matrix function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 12 Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The matrix exponential for a matrix Q is defined as exp Q = ∞ � k=0 Qk k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now the set of Equations (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4) and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5) can be written collectively in the form lim ∆t→0 P(X(t + ∆t) | X(t)) = lim ∆t→0 exp(QX∆t) = lim ∆t→0 � I + QX∆t + O(∆t2) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6) So given the matrix QX we can describe the transient behavior of X(t) as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If X(0) = xi then the process stays in state xi for an amount of time exponentially dis- tributed with parameter qi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, the probability density function f and the corres- ponding distribution function F for the time when X(t) remains equal to xi are given by f(t) = qi exp(−qit), t ≥ 0, F(t) = 1 − exp(−qit), t ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The expected time of changing the state is 1/qi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Upon transitioning, X jumps to the state xj with probability qij/qi for j ̸= i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Assume that we want to model the behavior of the barometric pressure B(t) discretized into three states (b1 = falling, b2 = steady, and b3 = rising).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then for instance we could write the intensity matrix as QB = � �� −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='05 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='21 � �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If we view units of time as hours, this means that if the pressure is falling, we expect that it will stop falling in a little less than 5 hours (1/0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='21 hours).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It will then transition to being steady with probability 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2/0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='21 ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='95 and to falling with probability 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01/0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='21 ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='0476.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When the transition model is defined solely in terms of an intensity matrix (as above), we refer to it as using a pure intensity parameterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The parameters for an N state process are {qi, qij ∈ QX, 1 ≤ i, j ≤ N, i ̸= j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is not the only way to parameterize a Markov process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the distribution over transitions of X factors into two pieces: an exponential distribution over when the next transition will occur and a multinomial distribution over where the process jumps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is called a mixed intensity parameterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The mixed intensity parameterization for a homogeneous Markov pro- cess X with N states is given by two sets of parameters qX = {qi, 1 ≤ i ≤ N} and θX = {θij, 1 ≤ i, j ≤ N, i ̸= j}, where qX is a set of intensities parameterizing the exponential distributions over when the next transition occurs and θX is a set of probabilities parameterizing the distribution over where the process jumps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 13 To relate these two parametrizations we note the following theorem from Nodelman (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let X and Y be two Markov processes with the same state space and the same initial distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If X is defined by the intensity matrix QX given by (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3), and Y is the process defined by the mixed intensity parameterization qY = {q′ 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , q′ N} and θY = {θ′ ij, i ̸= j}, then X and Y are stochastically equivalent, meaning they have the same state space and transition probabilities, if and only if q′ i = qi for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , N and θ′ ij = qij qi for all 1 ≤ i, j ≤ N, i ̸= j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Conditional Markov Processes In order to compose Markov processes in a larger network, we need to introduce the notion of a conditional Markov process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is an inhomogeneous Markov process where the intensities vary with time, but not as a direct function of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Rather, the intensities depend on the current values of a set of other variables, which also evolve as Markov processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let Y be a process with a state space Val(Y ) = {y1, y2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', ym}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Assume that Y evolves as a Markov process Y (t) whose dynamics are conditioned on a set V of variables, each of which can also evolve over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we have a conditional intensity matrix (CIM) which can be written as QY |V = � ����� −q1(V) q12(V) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' q1m(V) q21(V) −q2(V) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' q2m(V) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' qm1(V) qm2(V) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' −qm(V) � ����� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Equivalently, we can view CIM as a set of intensity matrices QY |v one for each instantiation of values v to the variables V, see Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Since the framework of CTBNs which we consider in the thesis has a graph at its core, we will refer to the set of variables V as the set of parents of Y and denote it by paG(Y ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that if the parent set paG(Y ) is empty, then CIM is simply a standard intensity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Just as a regular intensity matrix, CIM induces the distribution of the dynamics of Y given the behavior of paG(Y ) = V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If V takes the value v on the interval [t, t + ε) for some ε > 0, then as in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6) lim ∆t→0 P(Yt+∆t | Yt, v) = lim ∆t→0 exp(QY |v∆t) = lim ∆t→0 � I + QY |v∆t + O(∆t2) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If we specify an initial distribution of Y , then we have defined a Markov process whose behavior depends on the instantiation v of values of paG(Y ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Consider a variable E(t) which models whether or not a person is eating (e1 = not eating, e2 = eating) conditioned on a variable H(t) which models whether or 14 not the person is hungry (h1 = not hungry, h2 = hungry).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we can specify exemplary CIM for E(t) as QE|h1 = � −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 10 −10 � QE|h2 = � −2 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='01 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For instance, given this model, we expect that a person who is hungry and not eating is going to start eating in half an hour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also, we expect a person who is not hungry and is eating to stop eating in 6 minutes (1/10 hour).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Continuous time Bayesian networks In this section we define the notion of CTBN, which in essence is a probabilistic graphical model with the nodes as variables, the state evolving continuously over time, and where the evolution of each variable depends on the state of its parents in the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Before the formal definition we recall an example from Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Con- sider the situation in medical research where some drug has been administered to a patient and we wish to know how much time it takes for the drug to have an effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The answer to this question will likely depend on various factors, such as how recently the patient ate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We want to model the temporal process for the effect of the drug and how its dynamics depends on other factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In contrast to previously developed methods of approaching such a problem (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' event history analysis, Markov process models) the notion of CTBN introduced by Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2002) allows the specification of models with a large struc- tured state space where some variables do not directly depend on others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For example, the distribution of how fast the drug takes effect might be mediated through how fast it reaches the bloodstream, which in turn may be affected by how recently the person ate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 shows an exemplary graph structure for CTBN modelling the drug effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are nodes for the uptake of the drug and for the resulting concentration of the drug in the bloodstream.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The concentration is also affected by how full patient’s stomach is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The drug is supposed to alleviate joint pain, which may be aggravated by falling pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The drug may also cause drowsiness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The model contains a cycle, indicating that whether the person is hungry depends on how full their stomach is, which depends on whether or not they are eating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let G = (V, E) denote a directed graph with possible cycles, where V is the set of nodes and E is the set of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Further in the context of probabilistic graphical models we use the terms “nodes” and “random variables” interchangeably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For every w ∈ V we consider a corresponding space Xw of possible states at w and we assume that each space Xw is finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We consider a continuous time stochastic process on a product space X = � w∈V Xw, so a state s ∈ X is a configuration s = (sw)w∈V, where sw ∈ Xw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If W ⊆ V, then we write sW = (sw)w∈W for the configuration s restricted to the nodes in W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We also use the notation XW = � w∈W Xw, so we can write sW ∈ XW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In what follows 15 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4: (a) we use the bold symbol s to denote configurations belonging to X only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' All restricted configurations will be denoted with the standard font s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now suppose we have a family of functions Qw : XpaG(w) × (Xw × Xw) → [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For a fixed c ∈ XpaG(w) we consider Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' ·, · ) as a conditional intensity matrix (CIM) at the node w (only off-diagonal elements of this matrix have to be specified, the dia- gonal ones are irrelevant).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The state of CTBN at time t is a random element X(t) of the space X of all the configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let Xw(t) denote its w-th coordinate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The process {(Xw(t))w∈V : t ≥ 0} is assumed to be Markov and its evolution can be described informally as follows: transitions, or jumps, at the node w depend on the current confi- guration of its parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If the state of any parent changes, then the node w switches to other transition probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If sw ̸= s′ w, where sw, s′ w ∈ Xw, then P (Xw(t + dt) = s′ w | X−w(t) = s−w, Xw(t) = sw) = Qw(spaG(w), sw, s′ w) dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A continuous time Bayesian network N over a set of random variables X = {X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn} is formed by two components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first one is an initial distribution P 0 X specified as a Bayesian network B over X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second component is a continuous transi- tion model, specified as a directed (possibly cyclic) graph G whose nodes correspond to the random vari- ables Xi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' a conditional intensity matrix QXi|paG(Xi), specifying the continuous dynamic of each variable Xi given its parents’ configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 16 Eating Hungry Full Uptake stomach Barometer Concentration Joint Drowsy painEssentially, CTBN is a Markov jump process (MJP) on the state space X with tran- sition intensities given by Q(s, s′) = � � � Qw(spaG(w), sw, s′ w), if s−w = s′ −w and sw ̸= s′ w for some w, 0, if s−w ̸= s′ −w for all w, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7) for s ̸= s′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Obviously, Q(s, s) is defined “by subtraction” to ensure that � s′ Q(s, s′) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For convenience, we will often write Q(s) = −Q(s, s) so that Q(s) ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In particular, Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw) = − � s̸=s′ Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s, s′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is important to note that we make a fundamental assumption in the construction of the CTBN model: two variables cannot transition at the same time (a zero in the definition of Q(s, s)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This can be viewed as a formalization of the view that variables must represent distinct aspects of the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We should not, therefore, model a domain in which we have two variables that functionally and deterministically change simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For example, in the drug effect network, we should not add a variable describing the type of food, if any, a person is eating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We could, however, change the value space of the “Eating” variable from a binary “yes/no” to a more descriptive set of possibilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Further we omit the symbol G in the indices and write pa(w) instead of paG(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For CTBN the density of a sample trajectory X = X([0, T]) on a bounded time interval [0, T] decomposes as follows: p(X) = ν(X(0)) � w∈V p(Xw || Xpa(w)) , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8) where ν is the initial distribution on X and p(Xw || Xpa(w)) is the density of piecewise homogeneous Markov jump process with the intensity matrix equal to Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' ·, · ) in every time sub-interval such that Xpa(w) = c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Below we explicitly write an expression for the density p(Xw || Xpa(w)) in terms of moments of jumps and the skeleton of the process (Xw, Xpa(w)), as in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8), where by skeleton we understand the sequence of states of the process corresponding to the sequence of moments of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let T w = (tw 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , tw i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=') and T pa(w) = (tpa(w) 0 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , tpa(w) j , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=') denote moments of jumps at the node w ∈ V and at parent nodes, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By convention, put tw 0 = tpa(w) 0 = 0 and tw |T w|+1 = tpa(w) |T pa(w)|+1 = tmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Analogously, Sw and Spa(w) denote the corresponding skeletons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus we divide the time interval [0, tmax] into disjoint segments [tpa(w) j , tpa(w) j+1 ), j = 0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' |T pa(w)| such that Xpa(w) is constant and Xw is homogeneous in each segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Next we define sets Ij = {i > 0 : tpa(w) j < tw i < tpa(w) j+1 } with notation jbeg and jend for 17 the first and the last element of Ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we obtain the following formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' p(Xw || Xpa(w)) = p(T w, Sw || Spa(w), T pa(w)) = = |T pa(w)| � j=0 � I(Ij ̸= ∅) � � i∈Ij Qw(spa(w) j ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw i−1, sw i )× × � i∈Ij\\{jbeg} exp � −(tw i − tw i−1)Qw(spa(w) j ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw i−1) � × × exp � −(tw jbeg − tpa(w) j )Qw(spa(w) j ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw jbeg−1) − (tpa(w) j+1 − tw jend)Qw(spa(w) j ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw jend) � � + + I(Ij = ∅) exp � −(tpa(w) j − tpa(w) j+1 )Qw(spa(w) j ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' sw jbeg−1) � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Below in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 there is an example of a trajectory of the node w with two possible states and of its parent with also two possible states 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case the sets of indices are I0 = {2, 3, 4}, I1 = {∅} and I2 = {7}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5: An exemplary trajectory of a node w and its parents pa(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In consequence, using the fundamental property of the exponential function we may write p(Xw || Xpa(w)) in the form p(Xw || Xpa(w)) = � c∈Xpa(w) � s∈Xw � s′∈Xw s′̸=s Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s, s′)nT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s,s′) exp � −Qw(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s, s′)tT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s) � , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9) where nT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s, s′) denotes the number of jumps from s ∈ Xw to s′ ∈ Xw at the node w on the time interval [0, T], which occur when the parent configuration is c ∈ Xpa(w), 18 pa(w) pa(w) = 1 = 1 so 1 pa(w) pa(w) S1 =0 0 w : 1 w s°=0 s=0 0 to t tu tw t?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' itw itw t t2 T5 T6 +w t8 > +pa(w) +pa(w) +pa(w) 1• tT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s) is the length of time that the node w is in the state s ∈ Xw on the time interval [0, T], when the configuration of parents is c ∈ Xpa(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To simplify the notation we omit the upper index T in nT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s, s′) and tT w(c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' s) further in the thesis, except for the part where we consider martingales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6 The LASSO penalty In this section we shortly describe the notions of the LASSO penalty and LASSO estima- tors which constitute the base of the novel algorithms for structure learning in the thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' LASSO is the acronym for Least Absolute Shrinkage and Squares Operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The term was invented by Tibshirani (1996) though the general concept was introduced even earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Most of the contents of this section come from Hastie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The underlying idea of the LASSO estimators is the assumption of sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A sparse statistical model is one in which only a relatively small number of parameters (or predic- tors) play an important role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Consider a linear regression model with N observations yi of a target variable and xi = (xi1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , xip)⊤ of p associated predictor variables which are also called features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The goal is to predict the target from the predictors for future data and also to discover which predictors are relevant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the linear regression model we assume that yi = β0 + p � j=1 βjxij + ϵi, where β = (β0, β1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , βp) is the vector of unknown parameters and ϵi is an error term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The standard way to find β is to minimize the least-squares function N � i=1 � yi − β0 − p � j=1 βjxij �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Typically all of the estimates appear to be non-zero, which complicates the interpretability of the model especially with a high number of possible predictors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, since the data have noise the model will try to fit the training observations too much and the parameters will most probably take extreme values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In case when p > N the estimates are not even unique, so most of solutions will overfit the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The solution is to regularize the estimation process, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' add some constraints on the pa- rameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The LASSO estimator uses ℓ1-penalty, which means that we minimize the least- square function with an additional bound on ℓ1-norm of β, namely ∥β∥1 = �p j=1 |βj| ≤ t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The value t is the user-specified parameter usually called hyperparameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The motiva- tion to use ℓ1-penalty instead of any other ℓq-penalty comes from the fact that if t is small enough we obtain a sparse solution with only a small amount of non-zero parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This does not happen for ℓq-norm if q > 1, and if q < 1 the solutions are sparse but the problem is not convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Convexity simplifies the computations as well as the theoretical analysis of the properties of the estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This allows for scalable algorithms capable of handling 19 problems with even millions of parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Before the optimization process we typically standardize the predictors so that each column is centred, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the mean for each column is 0, and has unit variance, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the mean of squares is equal to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We also centre the target column, so in the result we can omit the intercept term β0 in the estimation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The LASSO penalty is used not only in linear regression but in a wide variety of models, for example generalized linear models where the target and the linear model are connected through some link function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, in a more general case we can formulate the optimization problem as ˆβ = argmin θ∈Rp [L(θ, D) + λ∥θ∥1], where L(θ, D) is the arbitrary loss function for the data D and the parameter vector θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The tuning hyperparameter λ corresponds to the constraining value t, there is one-to- one correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is so-called Lagrangian form for the LASSO problem described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the setting of structure learning for Bayesian networks, both static and continuous, we formulate the problem as an optimization problem for a linear or generalized linear model, where the parameter vectors encode the dependencies between variables in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We use the LASSO penalty in all the formulated problems, hence the prob- lem of finding arrows in the graph reduces to recovering certain non-zero parameters in the LASSO estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As the loss functions we use the negative log-likelihood function and the residual sum of squares.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 20 Chapter 3 Statistical inference for networks with known structure There are three main classes of problems concerning Bayesian networks (both static and continuous time).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first one is to discover the structure of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Namely, we need to specify the underlying graph of the network, which nodes are the variables of interest, and its edges encode the dependencies between the variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This problem will be covered in subsequent chapters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second problem is to learn the parameters of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Namely, knowing the structure of the network we need to specify the behaviour of the network in any specified node given the states of its parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the context of static BN this behaviour is encoded by conditional probability distributions (CPD, see (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The corresponding parameters in case of CTBNs are conditional intensity matrices (CIM, see (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The third type of problems is to make statistical inference using the network with known structure and parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For instance, we may want to predict the state of some node of interest or, knowing states of some nodes, find which combination of the remaining nodes explains them the best.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, we may be interested in prediction of the future dynamics (in time) of some nodes of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this chapter we discuss well known results concerning the problems of learning the parameters of the network and then the inference based on the fully discovered net- work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The contents of this chapter are mainly based on Koller and Friedman (2009), Nodelman (2007) and Heckerman (2021) with more detailed references throughout it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Learning probabilities in BNs First we discuss the discrete case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We assume that the Bayesian network with the known underlying graph G includes n nodes each corresponding to a variable Xi ∈ X for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also, each variable Xi is discrete, having ri possible values x1 i , x2 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , xri i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We denote an observed value of Xi in l-th observation as Xi[l].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If each node is observed m times, then we obtain the sample dataset D = {D1, D2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Dm} with the sample Dl = (X1[l], X2[l], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn[l]) indicating the observed values of all the nodes in the l-th 21 sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We refer to each Dl as a case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If all cases are complete, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' no missing values oc- curred in the dataset D, it is considered as complete data;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' otherwise, it is called incomplete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Missing values in data can occur for many different reasons, for instance, people filling out a survey may prefer not to answer some questions or certain measurements might not be available for some patients in a medical setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are mainly two categories of methods for parameter estimation in BN: one is for dealing with the complete data, and the other is for incomplete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will provide concise descriptions of two algorithms for the first category such as maximum likelihood estimation and Bayesian method;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' and we will briefly discuss algorithms for the second category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Assume that as in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2) we can write the joint distribution of the variables in X as follows P(X1, X2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Xn | θ) = n � i=1 P(Xi | paG(Xi), θi) for some vector of parameters θ = (θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , θn), where θi is the vector of parameters for the local distribution P(Xi | paG(Xi), θi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For shortness, further in this chapter we will write pa(Xi) instead of paG(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the case of discrete and completely observed data categorical distribution is commonly used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We note that in literature concerning learning Bayesian networks this type of distribution is often referred to as multinomial distribution or in some cases as unrestricted multinomial distribution (for example Heckerman (2021)) to differentiate this distribution from multinomial distributions that are low-dimensional functions of pa(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence we assume that each local distribution function is a collection of categorical distributions, one distribution for each configuration of its parents, namely P(Xi = xk i | paj i, θi) = θijk > 0, for 1 ≤ k ≤ ri, 1 ≤ j ≤ qi, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1) where qi = � Xj∈pa(Xi) rj and pa1 i , pa2 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , paqi i denote all possible configurations of pa(Xi), and θi = ((θijk)ri k=2)qi j=1 are the parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the parameter θij1 is given by the difference 1 − �ri k=2 θijk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For convenience, let us denote the vector of parameters θij = (θij2, θij3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , θijri) for all 1 ≤ i ≤ n and 1 ≤ j ≤ qi so that θi = (θij)qi j=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As it is well known, the maximum likelihood estimation (MLE) is a method of estima- ting the parameters of a probability distribution by maximizing the likelihood function, so that under the assumed statistical model the observed data is the most probable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Ba- sically, if Ck is the result of a random test for an event C with several possible outcomes C1, C2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Cn it will appear in the maximum likelihood for this event.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, the esti- mated value of ˆC will be set as parameter θ if it maximizes the value of the likelihood function P(C | θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 22 For the general Bayesian network with n nodes we denote the likelihood function as L(θ : D) = P(D | θ) = m � l=1 P(Dl | θ) = m � l=1 P(X1[l], X2[l], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn[l] | θ) = = m � l=1 n � i=1 P(Xi[l] | pai[l], θi) = n � i=1 m � l=1 P(Xi[l] | pai[l], θi) = � i Li(θi : D), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2) where by pai[l] = pa(Xi)[l] we denote the l-th observation of the parents vector of the vari- able Xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This representation shows that the likelihood decomposes as a product of inde- pendent factors, one for each CPD in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This important property is called the global decomposition of the likelihood function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, this decomposition is an imme- diate consequence of the network structure and does not depend on any particular choice of the parameterization for CPDs (see Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If the conditional distribution of Xi given its parents paG(Xi) is the categorical dist- ribution, then the local likelihood function can be further decomposed as follows Li(θi : D) = m � l=1 P(Xi[l] | pai[l], θi) = m � l=1 qi � j=1 ri � k=1 P(Xi[l] = xk i | pai[l] = paj i, θi) = qi � j=1 ri � k=1 θ N(xk i ,paj i ) ilk , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3) where N(xk i , paj i) is the number of cases in D for which Xi = xk i and pa(Xi) = paj i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Considering that the dataset is complete for each possible value paj i of the parents pa(Xi) of the node Xi, the probability P(Xi | paj i) is the independent categorical dist- ribution not related to any other configurations pal i of pa(Xi) for j ̸= l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, as the result of the MLE method we obtain the estimated parameter ˆθ as follows ˆθijk = N(xk i , paj i) N(paj i) , where N(paj i) denotes the number of cases when the configuration paj i appears in the full set of observations for the vector of variables pa(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that in general the MLE approach attempts to find the parameter vector θ that is “the best” given the data C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' On the other hand, the Bayesian approach does not attempt to find such a point estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Instead, the underlying principle is that we should keep track of our beliefs about values of θ, and use these beliefs for reaching conclusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In other words, we should quantify the subjective probability we have initially assigned to different values of θ taking into account new evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that in representing such subjective probabilities we now treat θ as a random variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, the Bayesian approach is based on the Bayes rule p(θ | C) = p(C | θ)p(θ) p(C) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4) Hence, the basic idea of the Bayesian method for parameter learning is the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We treat θ as a random variable with a prior distribution p(θ), and it is very common 23 to set p as the uniform distribution, especially in the case when we have no prior knowledge about θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Given a distribution with unknown parameters and a complete set of observed data C, new beliefs about θ, namely p(θ | C), can be estimated according to the previous knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The aim is to calculate p(θ | C) which is called the posterior probability of the parameter θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For computational efficiency we want to use a conjugate prior, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' when the posterior distribution after conditioning on the data is in the same parametric family as the prior one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Here we assume that each vector θij has the prior Dirichlet distribution, so that p(θij) = Dir(θij | αij1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , αijri) = Γ(αij) �ri k=1 Γ(αijk) ri � k=1 θ αijk−1 ijk , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5) where αij = �ri k=1 αijk, αijk > 0, k = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , ri, αij1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , αijri are hyperparameters and Γ(·) is Gamma function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is the standard conjugate prior to both categorical and multi- nomial distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, the probability of observed samples is p(D) = � p(θij)p(D | θij)dθij = = � Γ(αij) �ri k=1 Γ(αijk) ri � k=1 θ αijk−1 ijk × ri � k=1 θ Nijk ijk dθij = = Γ(αij) Γ(αij + Nij) ri � k=1 Γ(αijk + Nijk) Γ(αijk) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6) where for shortness Nijk = N(xk i , paj i) and Nij = N(paj i) = �ri k=1 Nijk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The integral is (ri − 1)-dimensional over the set {θijk ≥ 0, 2 ≤ k ≤ ri, �ri k=2 θijk ≤ 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we have already mentioned, in Bayesian method, if we do not have prior distri- bution we assume it to be uniform, which is consistent with the principle of maximum entropy in information theory, it maximizes the entropy of random variables with bounded support.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, if there is no information used for determination of prior distribution, we set hyperparameters α1 = · · · = αr = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Combining (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6) under the assumptions of parameter independence and complete data finally we obtain the posterior distribution as follows p(θij | D) = Dir(θij | αij1 + Nij1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , αijri + Nijri).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7) Therefore, we have an estimate for each parameter θijk from data D as follows ˆθijk = αijk + Nijk αij + Nij , 1 ≤ k ≤ ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Continuous Variable Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When we were discussing the MLE method for disc- rete BNs, we mentioned the global decomposition rule which applies to any type of CPD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' That is, if the data are complete, the learning problem reduces to a set of local lear- ning problems, one for each variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The main difference is in applying the maximum likelihood estimation process to CPD of a different type: how we define the sufficient 24 statistics, and how we compute the maximum likelihood estimate from them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this paragraph, we briefly discuss how MLE principles can be applied in the setting of linear Gaussian Bayesian networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Consider a variable X with parents U = {U1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Uk} with linear Gaussian CPD: p(X | u) = N(β0 + β1u1 + · · · + βkuk, σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Our task is to learn the parameters ˆθX|U = (β0, β1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , βk, σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To find the MLE values of these parameters, we need to differentiate the likelihood function and to solve the equa- tions that define a stationary point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As usual, it is easier to work with the log-likelihood function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using the definition of the Gaussian distribution, we have that ℓ(θX|U : D) = log LX(θX|U : D) = = � l � −1 2 log(2πσ2) − 1 2σ2(β0 + β1u1[l] + · · · + βkuk[l] − x[l])2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We consider the gradients of the log-likelihood with respect to all of the parameters β0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , βk and σ2 and as a result we get a number of equations, which describe the solution to a system of linear equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' From the Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 in Koller and Friedman (2009), it follows that if B is a linear Gaussian Bayesian network, then it defines a joint distribution that is jointly Gaussian, and the MLE estimate has to match the constraints implied by it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Briefly speaking, to estimate p(X | U) we estimate the means of X and U and the covariance matrix of {X}∪U from the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The vector of means and the covariance matrix define the joint Gaussian distribution over {X} ∪ U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, for example using the formulas provided by Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 in Koller and Friedman (2009), we find the unique linear Gaussian that matches the joint Gaussian with these parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The sufficient statistics we need to collect to estimate linear Gaussians are the uni- variate terms of the form � m x[m] and � m ui[m], and the interaction terms of the form � m x[m] · ui[m] and � m uj[m] · ui[m].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' From these we can estimate the mean and the co- variance matrix of the joint distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Inference in Bayesian networks In this section we assume that the network structure is known, meaning we know all the existing edges and their directions as well as all the CPDs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The problem of inference for BNs is a challenging task on its own and there is a lot of research done on the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will not go into much of a detail on the inference since our focus is on learning their structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, the question of inference is worth mentioning here in order to get a wholesome picture of such a powerful tool as BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First, we discuss what the notion of inference means in the case of BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Typically it refers to: marginal inference, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' finding the probability of a variable being in a certain state, given that other variables are set to certain values;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' or 25 maximum a posteriori (MAP) inference, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' finding the values of a given set of variables that best explain (in the sense of the highest MAP probability) why a set of other variables have certain values, Let us demonstrate both categories of questions using an example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will use the BN structure of a well-known ASIA network (see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1) first introduced in Lauritzen and Spiegelhalter (1988).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It illustrates the causal structure of a patient having a certain lung disease based on several factors, one being whether or not the patient has recently been to Asia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case, an exemplary question on marginal inference might be what is the probability of a patient who is a smoker and has dyspnoea having a certain lung disease, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' lung cancer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the MAP inference, we might want to know what is the most likely set of conditions (with “smoking” and “dyspnoea” excluded) that could have caused the symptoms mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we provide short descriptions of the most popular exact and approximate infe- rence algorithms for BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Among them are variable elimination and belief propagation for the marginal inference, methods for the MAP inference and the sampling-based inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the purposes of transparency of the presentation the inference methods for BNs will be demonstrated for the discrete and finite case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1: The ASIA Bayesian network structure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Variable Elimination This inference algorithm is defined in terms of so-called factors and is developed to answer questions of marginal inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Factors generalize the notion of CPDs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A factor φ is a function of value assignments of a set of random variables V with positive real values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The set of variables V is called the scope of the factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are two operations on factors that are repeatedly performed in a variable elimination algorithm (VE) and hence are of great importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 26 visit to Asia?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' smoking?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' tuberculosis?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' lung cancer?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' bronchitis?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' either tub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' or lung cancer?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' positive X-ray?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' dyspnoea?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='• The factor product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If V1, V2, and V3 are disjoint sets of variables and we have factors φ1 and φ2 with scopes V1 ∪ V2 and V2 ∪ V3 respectively, then we define the factor product φ1 · φ2 as a new factor ψ with the scope V1 ∪ V2 ∪ V3 by ψ(V1, V2, V3) = φ1(V1, V2) · φ2(V2, V3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This product is the new factor over the union of the variables defined for each instan- tiation by multiplying the value of φ1 on the particular instantiation by the value of φ2 on the corresponding instantiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, ψ(v1, v2, v3) = φ1(v1, v2) · φ2(v2, v3) for each instantiation, where v1 ∈ Val(V1), v2 ∈ Val(V2) and v3 ∈ Val(V3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The factor marginalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This operation “locally” eliminates a set of variables from a factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If we have a factor φ(V1, V2) over two sets of variables V1, V2, marginalizing V2 produces a new factor τ(V1) = � V2 φ(V1, V2), where the sum is over all joint assignments for the set of variables V2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, τ(v1) = � v2∈Val(V2) φ(v1, v2), v1 ∈ Val(V1) for each instantiation v1 ∈ Val(V1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, in the context of factors we can write our distribution over all variables as a product of factors, where each factor presents a CPD as in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2): P(X1, X2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Xn) = n � i=1 φi(Ai), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8) where Ai = (Xi, paG(Xi)) represents a set of variables including the i-th variable and its parents in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we can describe the full VE algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Assume we want to find marginal dist- ribution of a fixed variable from X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First we need to choose in which order O to eliminate remaining variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The choice of an optimal elimination ordering O is an NP-hard problem and it may dramatically affect the running time of the variable elimination algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Some intuitions and techniques on how to choose an adequate ordering are given for example in Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For each variable Xi (ordered according to the ordering O) we perform the following steps: multiply all factors containing Xi (on the first round all the φi containing Xi);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' marginalize out Xi according to the definition of the factor marginalization to obtain a new factor τ (which does not necessarily correspond to a probability distribution, even though each φ is CPD);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 27 replace the factors used in the first step with τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Essentially, we loop over the variables as ordered by O and eliminate them in this order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Performing those steps we use simple properties of product and summation on factors, namely, both operations are commutative and products are associative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The most im- portant rule is that we can exchange summation and product, meaning that if a set of variables X is not in the scope of the factor φ1, then � X φ1 · φ2 = φ1 · � X φ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9) So far we saw that the VE algorithm can answer queries of the form P(V), where V is some subset of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, in addition to this type of questions it can answer marginal queries of the form P(Y | E = e) = P(Y, E = e) P(E = e) , where P(X, Y, E) is a probability distribution over sets of query variables Y, observed evidence variables E, and unobserved variables X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can compute this probability by performing variable elimination once on P(Y, E = e) and then once again on P(E = e) taking into account only instantiations consistent with E = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An exemplary run of the VE algorithm is presented in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It corresponds to Extended Student example first mentioned in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Step Variables Factors used Variables New eliminated involved factor 1 C φC(C), φD(D, C) C, D τ1(D) 2 D φG(G, I, D), τ1(D) G, I, D τ2(G, I) 3 I φI(I), φS(S, I), τ2(G, I) G, S, I τ3(G, S) 4 H φH(H, G, J) H, G, J τ4(G, J) 5 G τ3(G, S), τ4(G, J), φL(L, G) G, J, L, S τ5(J, L, S) 6 S τ5(J, L, S), φJ(J, L, S) J, L, S τ6(J, L) 7 L τ6(J, L) J, L τ7(J) Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1: A run of variable elimination for the query P(J).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Message Passing Algorithms Markov random fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the framework of probabilistic graphical models there exists another technique for compact representation and visualization of a probability distribu- tion which is formulated in the language of undirected graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This class of models (known as Markov Random Fields or MRFs) can succinctly represent independence assumptions that directed models cannot represent and the opposite is also true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are advan- tages and drawbacks to both of those methods but that is not the focus of this thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 28 We will introduce and discuss MRFs only to the extent we need to properly describe and explain notions and methods concerning BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the methods provided below for marginal and MAP inference are applicable both to MRFs and BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A Markov Random Field (MRF) is a probability distribution over vari- ables X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn defined by an undirected graph G in which nodes correspond to vari- ables Xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The probability has the form P(X1, X2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=', Xn) = 1 Z � c∈C φc(Xc), where C denotes the set of cliques (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' fully connected subgraphs) of G and each factor φc is a non-negative function over the variables in a clique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The partition function Z = � (x1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=',xn) � c∈C φc(Xc) is a normalizing constant that ensures that the distribution sums to one, where the sum- mation is taken over all possible instantiations of all the variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, given a graph G, our probability distribution may contain factors whose scope is any clique in G and the clique can be a single node, an edge, a triangle, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that we do not need to specify a factor for each clique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is not hard to see that Bayesian networks are a special case of MRFs with a norma- lizing constant equal to 1 where the clique factors correspond to CPDs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One can notice that if we take a directed graph G, add side edges to all parents of a given node and remove their directionality, then the CPDs (seen as factors over each variable and its ancestors) factorize over the resulting undirected graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The resulting process is called moralization (see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A Bayesian network can always be converted into an undirected network with normalizing constant 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2: Moralization of a Bayesian network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Message passing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we mentioned above, the VE algorithm can answer marginal queries of the form P(Y | E = e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, if we want to ask the model for another query, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' P(Y2 | E2 = e2), we need to restart the algorithm from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Fortunately, in the process of computing marginals, VE algorithm produces many intermediate factors τ as a side-product of the main computation, which turn out to be the same as the ones that we need to answer other marginal queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 29 B B MoralizationMany complicated inference problems can be solved by message-passing algorithms, in which simple messages are passed locally among simple elements of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An illust- rative example was shown in the book MacKay (2003) for a problem of counting soldiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Consider a line of soldiers walking in the mist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The commander, which is in the line, wishes to count the soldiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The straightforward calculation is impossible because of the mist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, it can be done in a simple way which does not require any complex operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The algorithm requires the soldiers’ ability to add two integer numbers and add 1 to it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The algorithm consists of the following steps (for example see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3): the front soldier in the line says the number ‘one’ to the soldier behind him, the rearmost soldier in the line says the number ‘one’ to the soldier in front of him, the soldier, which is told a number from the soldier ahead or the soldier behind, adds 1 to it and passes the new number to the next soldier in the line on the other side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3: A line of soldiers counting themselves using message-passing rule-set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, the commander can find the global number of soldiers by simply adding to- gether the numbers: heard from the soldier in front of him, from the soldier behind him and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This method makes use of a property of the total number of soldiers: the number can be written as the sum of the number of soldiers in front of a point and the number behind that point, two quantities which can be computed separately, because the two groups are separated by the commander.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When this requirement is satisfied this message- passing algorithm can be modified for a general graph with no cycles (as an example see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When the graph has no cycles (see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4a) for each soldier we can uniquely separate the group into two groups, ‘those in front’, and ‘those behind’ and perform the algorithm above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, it is not always possible for a graph with cycles, for instance for a soldier in a cycle (such as ‘Jim’) in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4b such a separation is not unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using the same principle we will now describe the message passing for tree-structured networks (called belief propagation, BP for short) and then the modification of the method for general networks (called clique tree algorithm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Belief propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let us first look at tree-structured graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Consider what happens if we run the VE algorithm on a tree in order to compute a marginal distribution P(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 30 2 3 A 3 2 Commander(a) No cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (b) Contains a cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4: A swarm of soldiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can easily find the optimal ordering for this problem by rooting the tree at the node associated with Xi and iterating through the nodes in post-order (from leaves to the root), just like for a swarm of soldiers with no cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At each step, we will eliminate one of the variables, say Xj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' this will involve computing the factor τk(xk) = � xj φ(xk, xj)τj(xj), where Xk is the parent of Xj in the tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At a later step, the variable Xk will be eliminated in the same manner, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' τk(xk) will be passed up the tree to the parent Xl of Xk in order to be multiplied by the factor φ(xl, xk) before being marginalized out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As a result we obtain the new factor τl(xl).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The factor τj(xj) can be thought of as a message that Xj sends to Xk that summarizes all of the information from the subtree rooted at the node Xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can visualize this transfer of information using arrows on the tree, see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At the end of the VE algorithm, the node Xi receives messages from all of its children and the final marginal P(Xi) is obtained by marginalizing those messages out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5: Message passing order when using VE to compute P(X3) on a small tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' With the same indices as above, suppose that after computing P(Xi) we want to compute P(Xk) as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We would again run VE for the new tree rooted at the node Xk, waiting until it receives all messages from its children.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the new tree consists 31 Commander JimCommander Jimx3 m13(x3) / m53(x3) x1 m21(x1) / x4 x5 m43(x3) x2of two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first one is the subtree rooted at Xk with all its descendants from the original tree (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' rooted at Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The other part is the subtree rooted at Xl (which was the parent of Xk in the original tree, but now is the child of Xk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, this part contains the node Xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The key insight is that the messages received by Xk from Xj now will be the same as those received when Xi was the root.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, if we store the inter- mediary messages of the VE algorithm, we can quickly compute other marginals as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Notice for example, that the messages sent to Xk from the subtree containing Xi will need to be recomputed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So, how do we compute all the messages we need?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Again, referring to the soldier counting problem, a node is ready to transmit a message to its parent after it has received all the messages from all of its children.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' All the messages will be sent out after precisely 2|E| steps, where |E| is the number of edges in the graph, since each edge can receive messages only twice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To define belief propagation (BP) algorithm formally let us see what kind of messages can be sent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the purposes of marginal inference we will use sum-product message passing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This algorithm is defined as follows: while there is a node Xk ready to transmit to Xl it sends the message mk→l(xl) = � xk φ(xk)φ(xk, xl) � j∈Nb(k)\\{l} mj→k(xk), where Nb(k) \\ {l} means all the neighbours of the k-th node, excluding l-th node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that this message is precisely the factor τ that Xk would transmit to Xl during a round of variable elimination with the goal of computing P(Xi), and also note that the product on the RHS of this equation naturally equals to 1 for leaves in the tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' After having computed all messages, we may answer marginal queries over any vari- able Xj in constant time using the equation: P(Xj) ∝ ψ(Xj) � l∈Nb(j) ml→j(xj), where ψ(Xj) is a product of all factors φ whose scope contains Xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In case of BNs we have the equality instead of proportionality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Clique Tree Algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First let us define what is meant by a clique tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Clique tree is an undirected tree such that its nodes are clusters Ci of variables, meaning Ci is a subset of a set of all variables {X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each edge between clusters Ci and Cj is associated with a sepset (separation set) Si,j = Ci ∩ Cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See a simple example demonstrating a clique tree for a chain network in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So far we assumed that the graph is a tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' What if that is not the case?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then the clique tree algorithm (also called the junction tree algorithm in the literature) can be used;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' it partitions the graph into clusters of variables so that interactions among clusters will have a tree structure, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' a cluster will be only directly influenced by its neighbours in the tree, we denote it T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we can perform message passing on this tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This leads to tractable global solutions if the local (cluster-level) problems can be solved exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In addition, clique trees must satisfy two following properties: 32 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6: An example of a chain network consisting of three variables A, B and C;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' corresponding MRF and a clique tree with C1 = {A, B}, C2 = {B, C} and S1,2 = {B}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' family preservation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' for each factor φ there is a cluster such that factor’s scope is a subset of the cluster;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' running intersection property (RIP), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' for each pair of clusters Ci, Cj and a vari- able X ∈ Ci ∩ Cj all clusters and sepsets on the unique path between Ci and Cj contain the variable X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that we may always find a trivial clique tree with one node containing all the vari- ables in the original graph, but obviously such trees are useless.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Optimal trees are the ones that make the clusters as small and modular as possible;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' unfortunately, as in case of VE, the problem of finding the optimal tree is also NP-hard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A special case when we can find it is when we originally have a tree, in this case we can put each connected pair of nodes into a separate cluster, it is easy to check that both conditions are met.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One of the prac- tical ways to find a good clique tree is to use a simulation of VE, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the elimination order fixed for VE will induce the graph from which we will take maximal cliques and set them as our clusters and form a tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' RIP will be satisfied automatically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that we do not need to run VE, just to simulate it for a chosen ordering and get the induced graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More formally: Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let Φ be a set of factors (CPDs in the case of Bayesian Networks) over X = {X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn}, and ≺ be an elimination ordering for some subset X ⊆ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The induced graph denoted by IΦ,≺ is an undirected graph over X , where Xi and Xj are connected by an edge if they both appear in some intermediate factor ψ generated by the VE algorithm using ≺ as an elimination ordering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7 there is an example of an induced graph for the Student example using the elimination ordering of Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1, cliques in that graph and a corresponding clique tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One can see that RIP is satisfied, for a proof that trees corresponding to induced graphs by VE will satisfy RIP see Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now let us define the full clique tree algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First, we define the potential ψi(Ci) of each cluster Ci as the product of all the factors φ in G that have been assigned to Ci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 33 A B A,B B B,C B A cBy the family preservation property, this is well-defined, and we may assume that our distribution is of the form P(X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn) = 1 Z � i ψi(Ci).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, at each step of the algorithm, we choose a pair of adjacent clusters Ci, Cj in a tree graph T and compute a message whose scope is the sepset Si,j between the two clusters mi→j(Si,j) = � Ci\\Si,j ψi(Ci) � l∈Nb(i)\\{j} ml→i(Sl,i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='10) In the context of clusters, Nb(i) denotes the set of indices of neighboring clusters of Ci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We choose Ci and Cj only if Ci has received messages from all of its neighbors except Cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Just as in belief propagation, this procedure will terminate in exactly 2|ET | steps because this process is equivalent to making an upward pass and a downward pass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the upward pass, we first pick a root and send all messages towards it starting from leaves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When this process is complete, the root has all the messages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, it can now send the appropriate message to all of its children.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This algorithm continues until the leaves of the tree are reached, at which point no more messages need to be sent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This second phase is called the downward pass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' After it terminates, we will define the belief of each cluster based on all the messages that it receives βi(Ci) = ψi(Ci) � l∈Nb(i) ml→i(Sl,i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='11) These updates are often referred to as Shafer-Shenoy updates and the full procedure is also referred as sum-product belief propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then each belief is the marginal of the clique βi(Ci) = � X\\Ci P(X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now if we need to compute the marginal probability of a particular variable X we can select any clique whose scope contains X, and eliminate the redundant variables in the clique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A key point is that the result of this process does not depend on the clique we selected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' That is, if X appears in two cliques, they must agree on its marginal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Two adjacent cliques Ci and Cj are said to be calibrated if � Ci\\Si,j βi(Ci) = � Cj\\Si,j βj(Cj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A clique tree T is calibrated if all pairs of adjacent cliques are calibrated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For a calibrated clique tree, we use the term clique beliefs for βi(Ci) and sepset beliefs for µi,j(Si,j) defined as either side of the above equality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As the end result of sum-product belief propagation procedure we get a calibrated tree, which is more than simply a data structure that stores the results of probabilistic inference for all of the cliques in the tree, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' their beliefs (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It can also be viewed 34 as an alternative representation of the joint measure over all variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For sepset beliefs we have that µi,j(Si,j) = mi→j(Si,j)mj→i(Si,j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using this fact at convergence of the clique tree calibration algorithm, we get the unnor- malized joint measure ˜P as ˜P(X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn) = � i ψi(Ci) = � i βi(Ci) � (i,j) µi,j(Si,j), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='12) where the product in the numerator is over all cliques and the product in the denominator is over all sepsets in the tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As a result we get a different set of parameters that captures unnormalized measure that defined our distribution (in case of BNs it is simply the distribution) and there is no information lost in the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, we can view the clique tree as an alternative representation of the joint measure, one that directly reveals the clique marginals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second approach, mathematically equivalent but using a different intuition, is message passing with division.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In sum-product belief propagation messages were passed between two cliques only after one had received messages from all of its neighbors except the other one as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='10) and the resulting belief was (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Nonetheless, a different approach to compute the same expression is to multiply in all of the messages, and then divide the resulting factor by the message from the other clique to avoid double-counting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To make this notion precise, we must define a factor-division operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let X and Y be disjoint sets of variables and let φ1 and φ2 be two factors with scopes X ∪ Y and Y respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we define the division φ1 φ2 as a factor-division ψ with the scope X ∪ Y as follows ψ(X, Y) = φ1(X, Y) φ2(Y) , where we define 0 0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We now see that we can compute the expression of equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='10) by computing the beliefs as in equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='11) and then dividing by the remaining message mi→j(Si,j) = � Ci\\Si,j βi(Ci) mj→i(Si,j) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The belief of the j-th clique is updated by multiplying its previous belief by mi→j and dividing it by the previous message passed along this edge (regardless of the direction) stored in sepset belief µi,j to avoid double counting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This algorithm is called belief update message passing and is also known as the Lauritzen-Spiegelhalter algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 MAP inference The maximum a posteriori (MAP) problem has a broad range of applications, in computer vision, computational biology, speech recognition, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By using MAP inference we lose the ability to measure our confidence (or uncertainty) in our conclusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Never- theless, there are good reasons for using a single MAP assignment rather than using 35 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7: (a) Induced graph for VE in the Student example, using the elimination order of Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 (b) Cliques in the induced graph:{C, D}, {D, I, G}, {G, I, S}, {G, J, S, L} and {G, H, J}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (c) Clique tree for the induced graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the marginal probabilities of the different variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first reason is the preference for obtaining a single coherent joint assignment, whereas a set of individual marginals may not make sense as a whole.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second is that there are inference methods that are applicable to the MAP problem and not to the task of computing probabilities, so that the former may be tractable even when the latter is not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The problem of finding the MAP assignment in the general case is NP−hard (Cooper (1990)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are two types of Maximum a Posteriori (MAP) inference: a MAP query and a marginal MAP query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Assume first that the set of all variables X = Y ∪ E consists of two disjoint sets, where E is the evidence meaning that we know values of those variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then a MAP query aims to find the most likely assignment to all of the non-evidence variables Y MAP(Y | E = e) = argmax y P(Y = y | E = e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now assume that the set of all variables X = Y ∪ W ∪ E consists of three disjoint sets, where E is still the evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case a marginal MAP query aims to find the most likely assignment to the subset Y, marginalizing over the rest of the variables W MAP(Y | E = e) = argmax y P(Y = y | E = e) = = argmax y � w P(Y = y, W = w | E = e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 36 Coherence Coherence Difficulty Intelligence Dificulty Intelligence Grade SAT Grade SAT Letter Letter Job Job Happy Happy (a) (b) C,D G,I,D G,I, S G,J, S, L G,H,J D G,I G,S G,J (c)Both tasks can be solved within the same variable elimination (VE) and message passing frameworks as marginal inference, where instead of summation we use maximization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second type of query is much more complicated both in theory and in practice since it involves both maximization and summation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In particular, exact inference methods such as VE can be intractable, even in simple networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, first we will briefly discuss them and then introduce some more efficient methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Recall that while discussing VE we introduced two operations on factors, which were the foundation in performing the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we need to introduce one additional operation called the factor maximization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let X be a set of variables, and Y ̸∈ X a variable not belonging to the set X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let φ(X, Y ) be a factor over those variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We define the factor maximization of Y in φ to be a factor ψ over X such that: ψ(X) = max Y φ(X, Y ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, ψ(x) = max y∈Val(Y ) φ(x, y) for each instantiation x ∈ Val(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Similarly to the property (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9) we have that if a set of variables X is not in the scope of the factor φ1, then max X (φ1 · φ2) = φ1 · max X φ2 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='13) and max X (φ1 + φ2) = φ1 + max X φ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='14) This leads us to a max-product variable elimination algorithm for a general MAP query, which is constructed in the same way as a sum-product variable elimination algorithm in Subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1, but we replace the marginalizing step (summation) with maximization over corresponding variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This way we find the maximum value for the joint probability, though the original and more interesting problem is to find the most probable assignment corresponding to that maximum probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This process is called a traceback procedure, which is quite straightforward (details can be found in Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the process of eliminating variables we find their maximizing value given the values of the variables that have not yet been eliminated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When we pick the value of the final variable, we can then go back and pick the values of the remaining variables accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Recall that the joint distribution P in Bayesian networks is represented by a product of factors, where each factor coincides with a CPD (we introduced this representation in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we can write the marginal MAP query as argmax y � W P(y, W) = argmax y � W � i φi, where we skipped the evidence set for the transparency of notation since it does not effect the main point of discussion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First we compute max y � W � i φi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 37 This form immediately suggests an algorithm combining the ideas of sum-product and max-product variable elimination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Specifically, the summations and maximizations out- side the product can be viewed as operations on factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, to compute the value of this expression, we simply have to eliminate the variables in W by summing them out, and the variables in Y by maximizing them out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When eliminating a variable X, whether by summation or by maximization, we simply multiply all the factors whose scope in- volves X, and then eliminate X to produce the resulting factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The ability to perform this step is justified by the interchangeability of factor summation and maximization with factor product (properties (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='13)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The traceback procedure to find the most probable assignment can also be found in Koller and Friedman (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At first glance it seems that algorithms for both queries have the same complexity but that is not the case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It can be shown that even on very simple networks, elimination algorithms can require exponential time to solve a marginal MAP query (see Example 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7 in Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The difficulty comes from the fact that we are not free to choose an arbitrary elimination ordering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When summing out variables, we can utilize the fact that the operations of summing out different variables commute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, when performing summing-out operations for sum-product variable elimination, we could sum out the variables in any order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Similarly, we could use the same flexibility in the case of max-product elimination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Unfortunately, the max and sum operations do not commute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, in order to maintain the correct semantics of marginal MAP queries, as specified in the equation, we must perform all the variable summations before we can perform any of the variable maximizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can also use the message passing framework, or more general case of clique tree algorithm, to MAP inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 we used clique trees to compute the sum- marginals over each of the cliques in the tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Here, we compute a set of max-marginals over each of those cliques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By the max-marginal of a function f defined on the set X relative to a set of variables Y ⊂ X we denote such a factor that for each y ∈ Y MaxMarginalf(y) = max ⟨x⟩Y=y f(x) determines the value of the unnormalized probability of the most likely joint assign- ment x ∈ X consistent with y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We compute the whole set for two reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First, the set of max-marginals can be a useful indicator for how confident we are in particular components of the MAP assignment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Second, in many cases, an exact solution to the MAP problem via a variable elimination procedure is intractable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this case, to compute approximate max- marginals we can use message passing procedure in cluster graphs, similar to the clique tree procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These pseudo-max-marginals can be used for selecting an assignment;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' while this assignment is not generally the MAP assignment, we can nevertheless provide some guarantees in certain cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As before, our task consists of two parts: computing the max-marginals and decoding them to extract a MAP assignment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As for the first part, in the same way as we modified sum-product VE to sum-product message-passing we modify max-product VE to max-product belief propagation algorithm in clique trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The resulting algorithm executes precisely the same initialization and 38 overall message scheduling as in the sum-product belief propagation algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The only difference is that we use max-product rather than sum-product message passing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As a result of running the algorithm we will get a set of max-marginals for every clique of our clique tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each belief is the max-marginal of the clique βi(Ci) = MaxMarginalp(Ci) and all pairs of adjacent cliques are max-calibrated µi,j(Si,j) = max Ci\\Si,j βi(Ci) = max Cj\\Si,j βj(Cj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Similarly to sum-product message passing we get reparameterization of the distribution in the form (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='12) with corresponding beliefs of the max-product belief propagation algo- rithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we need to decode those max-marginals to get a MAP assignment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the case of variable elimination, we had the max-marginal only for a single last to be eliminated variable and could identify the assignment for that particular variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To compute the as- signments to the rest of the variables, we had to perform a traceback procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now the situation appears different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One obvious solution is to use the max-marginal for each variable to compute its own optimal assignment, and thereby compose a full joint assign- ment to all variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, this simplistic approach works only in case when there is a unique MAP assignment, equivalently, each max-marginal has a unique maximal value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For generic probability measures this is not a very rigid constraint, thus, we can find the unique MAP assignment by locally optimizing the assignment to each variable separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Otherwise, in most cases to break ties we can introduce a slight random perturbation into all of the factors, making all of the elements in the joint distribution have slightly dif- ferent probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, there might be cases when we need to preserve the structure in relationships between some variables, for example some variables can share parameters or there might be some deterministic structure that should be preserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Under these circumstances we find a locally optimal assignment using for example traceback proce- dure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Afterwards we can verify if this assignment is a MAP assignment (for procedure and verification see Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' MAP as Linear Optimization Problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In MAP inference we search for assign- ments which maximize a certain measure, in our case either the joint probability over all non-evidence variables or the probability over some set of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, it is na- tural to consider it directly as an optimization problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There exists extensive literature on optimization algorithms and we can apply some of those ideas and algorithms to our specific case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The main idea here is to reduce our MAP problem to an Integer Linear Program- ming (ILP) problem, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' an optimization problem over a set of integer valued variables, where both the objective and the constraints are linear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First, to define ILP problem we need to turn the product representation of the joint probability as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='8) into a sum, replacing the probability with its logarithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is possible because all the factors (CPDs) 39 are positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Hence, we want to compute argmax ξ n � i=1 φi(Ai) = argmax ξ n � i=1 log(φi(Ai)), where ξ is a general assignment for the whole vector of variables in the network, and Ai = (Xi, paG(Xi)) represents a set of variables including the i-th variable and its parents in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the whole discussion in this paragraph is actually identical for MRFs with positive factors, the only difference is the number of factors, but since they are not the focus of this thesis, we formulate everything in the Bayesian networks framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For variable indices r ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , n} we define the number of corresponding possible vector instantiations nr = |Val(Ar)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For any joint assignment ξ, if this assignment con- strained to the variables from Ar takes the value of aj r, j = {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , nr}, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' ξAr = aj r, then the factor log(φr) makes a contribution to the objective of a quantity denoted as ηr j = log(φr(aj r)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We introduce optimization variables q(xj r), where r enumerates the different factors, and j enumerates the different possible assignments to the variables from Ar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These variables take binary values, so that q(xj r) = 1 if and only if Ar = aj r and 0 otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is important to distinguish the optimization variables from the random variables in our original graphical model;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' here we have an optimization variable q(xj r) for each joint assignment aj r to the model variables Ar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let q denote a vector of the optimization variables {q(xj r), 1 ≤ r ≤ n, 1 ≤ j ≤ nr} and η denote a vector of the coefficients ηj r sorted in the same order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Both of these are vectors of dimension N = �n r=1 nr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' With this interpretation, the MAP objective can be rewritten as: max q n � r=1 nr � j=1 ηj rq(xj r) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='15) or, in shorthand, max q η⊤q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now that we have an objective to maximize we need to add some consistency con- straints that would guarantee that an assignment q ∈ {0, 1}N we get as a solution of optimization problem is legal, meaning it corresponds to some assignment in X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Namely, first we require that we restrict attention to integer solutions, then we construct two constraints to make sure that these integer solutions are consistent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first constraint enforces the mutual exclusivity within a factor and the second one implies that factors in our network agree on the variables in the intersection of their scopes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this way we reformulate the MAP task as an integer linear program, where we optimize the linear objective of equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='15) subject to discussed constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We note that the problem of solving integer linear programs is itself NP-hard, so that we do not avoid the basic hardness of the MAP problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One of the methods often used to tackle ILP problems is the method of linear program relaxation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this approach we turn a discrete, combinatorial optimization problem into a continuous problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This problem is a linear program (LP), which can be solved in 40 polynomial time, and for which a range of very efficient algorithms exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One can then use the solutions to this LP to obtain approximate solutions to the MAP problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To perform this relaxation, we substitute the condition that the solutions are integer with a relaxed constraint that they are non-negative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This linear program is a relaxation of our original integer program, since every assign- ment to q that satisfies the constraints of the integer problem also satisfies the constraints of the linear program, but not the other way around.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, the optimal value of the ob- jective of the relaxed version will be no less than the value of the (same) objective in the exact version, and it can be greater when the optimal value is achieved at an assign- ment to q that does not correspond to a legal assignment ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An important special case are tree-structured graphs, in which the relaxation is guaranteed to always return integer solutions, which are in turn optimal (for proof and more detailed discussion see Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Otherwise we get approximate solutions, which in order we need to transform into integer (and legal) assignments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One approach is a greedy assignment process, which assigns values to the variables Xi one at a time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another approach is to round the LP solution to its nearest integer value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This approach works surprisingly well in practice and has theoretical guarantees for some classes of ILPs (Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An alternative method for the MAP problem which also comes from the optimization theory is called dual decomposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Dual decomposition uses the principle that our prob- lem can be decomposed into sub-problems, together with linear constraints (the same as in ILP) that enforce some notion of agreement between solutions to the different prob- lems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The sub-problems are chosen such that they can be solved efficiently using exact combinatorial algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The agreement constraints are incorporated using Lagrange multipliers, it is called Lagrangian relaxation, and an iterative algorithm - for example, a subgradient algorithm - is used to minimize the resulting dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The initial work on dual decomposition in probabilistic graphical models was focused on the MAP problem for MRFs (see Komodakis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2007)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By formulating our problem as a linear program or its dual, we obtain a very flexible framework for solving it;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' in particular, we also can easily incorporate additional con- straints into the LP, which reduce the space of possible assignments of q, eliminating some solutions that do not correspond to actual distributions over X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The problems are convex and in principle they can be solved directly using standard techniques, but the size of the problems is very large, which makes this approach unfeasible in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, the LP has special structure: when viewed as a matrix, the equality constraints in this LP all have a particular block structure that corresponds to the structure of adjacent clusters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, when the network is not densely connected, the constraint matrix is also sparse, thus, standard LP solvers may not be fully suited for exploiting this special structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The theory of convex optimization provides a wide spectrum of tools, and some are already being adapted to take advantage of the structure of the MAP problem (see for example, Wainwright et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005), Sontag and Jaakkola (2007)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The empirical evidence suggests that the more specialized solution methods for the MAP problems are 41 often more effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Other methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another method for solving a MAP problem is local search algo- rithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is a heuristic-type solution, which starts with an arbitrary assignment and performs “moves” on the joint assignment that locally increase the probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This tech- nique does not offer theoretical justification;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' however, we can often use prior knowledge to come up with highly effective moves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, in practice, local search may perform extremely well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are also searching methods that are more systematic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They search the space so as to ensure that assignments that are not considered are not optimal, and thereby guarantee an optimal solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Such methods generally search over the space of partial assignments, starting with the empty assignment and successively assigning variables one at a time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One such method is known as branch-and-bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These methods have much greater applicability in the context of marginal MAP prob- lem, where most other methods are not currently applicable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the next subsection we discuss sample-based algorithms which can be applied both to marginal and MAP inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Sampling-based methods for inference In practice, the probabilistic models that we use can often be quite complex, and simple algorithms like VE may be too slow for them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In addition, many interesting classes of models may not have exact polynomial-time solutions at all, and for this reason, much re- search effort in machine learning is spent on developing algorithms that yield approximate solutions to the inference problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this subsection we consider some sampling methods that can be used to perform both marginal and MAP inference queries;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' additionally, they can compute various interesting quantities, such as the expectation E[f(X)] of a function of the random vector distributed according to a given probabilistic model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In general, sampling is rather a hard problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The aim is to generate a random sample of the observations of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, our computers can only generate samples from very simple distributions, such as the uniform distribution over [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' All sampling techniques involve calling some kind of simple subroutine multiple times in a properly constructed way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For example, in case of multinomial distribution with parameters θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , θk instead of directly sampling a multinomial variable we can sample a single uniform variable pre- viously subdividing a unit interval into k regions with region i having size θi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we sample uniformly from [0, 1] and return the value of the region in which our sample falls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Forward sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now let us return to the case of Bayesian networks (BN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can apply the same sampling technique to BNs with multinomial variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start from the nodes which do not have parents, these variables simply have multinomial distribution, and we go down the network to the next generation as arrows point out until we reach the leaves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, for a particular node we need to wait until all of its parents are 42 sampled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When we know all the values of parents the variable naturally has multinomial distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the Student example to sample student’s grade, we would first sample an exam difficulty d′ and an intelligence level i′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, once we have samples d′ and i′, we generate a student grade g′ from P(g | d′, i′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There is one problem though, as we cannot perform it in case of having evidence for any variables besides roots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Monte Carlo and rejection sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Algorithms that construct solutions based on a large number of samples from a given distribution are referred to as Monte Carlo (MC) methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Sampling from an arbitrary distribution p lets us compute integrals of the form EX∼p[f(X)] = � x f(x)p(x), where the summation extends over all possible values of X and p can be thought of as the density of X with respect to counting measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Below we follow the same interpreta- tion also with regards to joint and conditional distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If f(X) does not have special structure that matches the BN structure of p, this integral will be impossible to compute analytically;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' instead, we will approximate it using a large number of samples from p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using Monte Carlo technique we approximate a target expectation with EX∼p[f(X)] ≈ IT = 1 T T � t=1 f(xt), where x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , xT are samples drawn according to p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is easy to show that IT is an un- biased estimator for EX∼p[f(X)] and its variance can be made arbitrarily small with a sufficiently large number of samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now let us consider rejection sampling as a special case of Monte Carlo integration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For example, suppose we have a Bayesian network over the set of variables X = Z ∪ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We may use rejection sampling to compute marginal probabilities P(E = e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can rewrite the probability as P(E = e) = � z P(Z = z, E = e) = � x P(X = x)I(E = e) = EX∼p[I(E = e)] and then take the Monte Carlo approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In other words, we draw many samples from p and report the fraction of samples that are consistent with the value of the marginal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Importance sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Unfortunately, rejection sampling can be very wasteful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If P(E = e) equals, say, 1%, then we will discard 99% of all samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A better way of computing such integrals uses importance sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The main idea is to sample from an auxiliary distribution q (hopefully with q(x) roughly proportional to f(x) · p(x)), and then reweigh the samples in a principled way, so that their sum still approximates the desired integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 43 More formally, suppose we are interested in computing EX∼p[f(X)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Adopting anal- ogous convention regarding notation for probability distribution we may rewrite this in- tegral as EX∼p[f(X)] = � x f(x)p(x) = � x f(x)p(x) q(x)q(x) = = EX∼q[f(X)w(X)] ≈ 1 T T � t=1 f(xt)w(xt), where w(x) = p(x) q(x) and the samples xt are drawn from q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In other words, instead of sampling from p we may take samples from q and reweigh them with w(x);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' the ex- pected value of this Monte Carlo approximation will be the original integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By choosing q(x) = |f(x)|p(x) � |f(x)|p(x)dx we can set the variance of the new estimator to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that the denominator is the quantity we are trying to estimate in the first place and sampling from such q is NP-hard in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the context of our previous example for computing P(E = e), we may take q to be the uniform distribution and apply importance sampling as follows: P(E = e) = Ez∼p[p(e | z)] = Ez∼q � p(e | z)p(z) q(z) � = = Ez∼q �p(e, z) q(z) � = Ez∼q[we(z)] ≈ 1 T T � t=1 we(xt), where we(z) = p(e, z) q(z) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Unlike rejection sampling, this will use all the samples;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' if p(z | e) is not too far from uniform, this will converge to the true probability after only a very small number of samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Markov chain Monte Carlo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now let us turn to performing marginal and MAP inference using sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We will solve these problems using a very powerful technique called Markov chain Monte Carlo (MCMC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A key concept in MCMC is that of a Markov chain, which is a sequence of random elements having Markov property (see 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A Markov chain X = (X0, X1, X2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' ) with each random vector Xi taking values from the same state space Val(X) is specified by the initial distribution P(X0 = x), x ∈ Val(X), and the set of transition probabilities P(Xk+1 = x′ | Xk = x) for x, x′ ∈ Val(X), which do not depend on k (in this case the Markov chain is called homogeneous).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, the transition probabilities at any time in the entire process depend only on the given state and not on the history of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In what follows, we consider finite state space only so we may assume Val(X) = {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , d}, unless stated otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 44 If the initial state X0 is drawn from a vector of probabilities p0, we may represent the probability pt of ending up in each state after t steps as pt = T tp0, where T denotes the transition probability matrix with Tij = P(Xk+1 = i | Xk = j), i, j ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , d}, and T t denotes matrix exponentiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If the limit lim t→∞ pt = π exists, it is called a stationary distribution of the Markov chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A sufficient condition for π to be a stationary distribution is called detailed balance: π(j)Tij = π(i)Tji for all i, j ∈ Val(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The high-level idea of MCMC is to construct a Markov chain whose states are joint assignments to the variables in the model and whose stationary distribution is equal to the model probability p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, running the chain for a number of times, we obtain the sample from the distribution p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In order to construct such a chain, we first recall the conditions under which stationary distributions exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This turns out to be true under two sufficient conditions: irreducibility, meaning that it is possible to get from any state x to any other state x′ with positive probability in a finite number of steps, and aperiodicity, meaning that it is possible to return to any state at any time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the context of continuous variables, the Markov chain must be ergodic, which is a slightly stronger condition than the above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the sake of generality, we will require our Markov chains to be ergodic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At a high level, MCMC algorithms will have the following structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They take as an argument a transition operator T specifying a Markov chain whose stationary distri- bution is p, and an initial assignment X0 = x0 of the chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An MCMC algorithm then performs the following steps: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Run the Markov chain from x0 for B burn-in steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Run the Markov chain for N sampling steps and collect all the states that it visits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The aim of the burn-in phase is to wait until the state distribution is reasonably close to p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, we omit the first B states visited by the chain and then we collect a sample from the chain of the size N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A common approach to set the number B is to use a variety of heuristics to try to evaluate the extent to which a sample trajectory has “mixed”, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' when it is reasonably close to p (see Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also Geyer (2011) advocates that burn-in is unnecessary and uses other ways of finding good starting points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Gelman and Shirley (2012) propose to discard the first half of generated sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We may then use these samples for Monte Carlo integration (or in importance sampling).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We may also use them to produce Monte Carlo estimates of marginal probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, we may take the sample with the highest probability and use it as an estimate of the mode (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' perform MAP inference).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Before we discuss two most important special cases, note that sampling-based methods have theoretical asymptotic justification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, their application for finite samples of 45 reasonable size may lead to drastically inaccurate results, especially in sophisticated and complex models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Successful implementation heavily depends on how well we understand structure of the model as well as on intensive experimentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It can also be achieved by combining sampling with other inference methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Metropolis-Hastings Algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Metropolis-Hastings (MH) algorithm (Hast- ings (1970)) is one of the first ways to construct Markov chains within MCMC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The MH method constructs a transition operator T from two components: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A transition kernel q specified by the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In practice, the distribution q(x′ | x) can take almost any form and very often it is a Gaussian distribution centered at x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An acceptance probability for moves proposed by q, specified by the algorithm as A(x′ | x) = min � 1, p(x)q(x′ | x) p(x′)q(x | x′) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At each step, if the Markov chain is in the state x, then we choose a new point x′ according to the distribution q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, we either accept this proposed change with the probability α = A(x′ | x), or with the probability 1 − α we remain at our current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Notice that the acceptance probability encourages the chain to move towards more likely points in the distribution (imagine for example that q is uniform);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' when q suggests that we move into a low-probability region, we follow that move only a certain fraction of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Given any q the MH algorithm ensures that p is a stationary distribution of the resulting Markov Chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, p will satisfy the detailed balance condition with respect to the Markov chain generated by MH algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is a straight consequence of the definition of A(x′ | x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As the result we wish to build the Markov chain with a small correlation between subse- quent values, which allows to explore the support of the target distribution rather quickly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This correlation consists of two components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The higher the variance of q, the lower the correlation between the current state and the newly chosen one, and the lower the variance of q, the lower the correlation when we stay at the same state hitting the low-probability region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To choose a good kernel q we need to find good balance between the two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For multivariate distributions the covariance matrix for the proposal distribution should reflect the covariance structure of the target.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Gibbs sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A widely-used special case of the Metropolis-Hastings methods is Gibbs sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It was first described in Geman and Geman (1984).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Suppose we have a finite sequence of random variables X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , Xn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We denote the i-th sample as x(i) = (X(i) 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , X(i) n ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Starting with an arbitrary configuration x(0) we perform the procedure below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Repeat until convergence for t = 1, 2, 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' : 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Set x ← x(t−1) 46 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For each variable Xi Sample X′ i ∼ P(Xi | X−i) Update x ← (X(t) 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , X(t) i−1, X′ i, X(t−1) i+1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , X(t−1) n ) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Set x(t) ← x By X−i we denote all the variables in our set except Xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' At each epoch of the step 2 only one site undergoes a possible change, so that successive samples for each iteration can differ in at most one coordinate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that at this step we use updated values of the variables for which we have already sampled new values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The sampling step is quite easy to perform because we only condition on variables from Xi-th Markov blanket, which consists of its parents, children and other parents of its children.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Geman and Geman (1984) it was stated that the distribution of x(t) converges to π as t → ∞ regardless of x(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The only assumption is that we continue to visit each site which is obviously a necessary condition for convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As in case of any MCMC algorithm if we choose an arbitrary starting configuration there is a burn-in phase, for the list of intuitions on how to decide how many samples we want to discard see Casella and George (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To avoid the high correlation between successive samples in Gibbs sampler we can also take every r-th sample instead of all of them, which is rather a question of heuristics and experimenting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Learning probabilities in BNs for incomplete data Here we again consider categorical distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Suppose we observe a single incomp- lete case in our data, which we denote as d ∈ D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Under the assumption of parameter independence, we can compute the posterior distribution of θij for our network as follows: p(θij | d) = (1 − p(paj i | d)){p(θij)} + ri � k=1 p(xk i , paj i | d){p(θij | xk i , paj i)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each term in curly brackets in this equation is a Dirichlet distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, unless both Xi and all the variables in pa(Xi) are observed in case d, the posterior distribution of θij will be a linear combination of Dirichlet distributions, that is a Dirichlet mixture with mixing coefficients (1 − p(paj i | d)) and p(xk i , paj i | d), 1 ≤ k ≤ ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' See Spiegelhalter and Lauritzen (1990) for the details of derivation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When we observe a second incomplete case, some or all of the Dirichlet components in the previous equation will again split into Dirichlet mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, the pos- terior distribution for θij will become a mixture of Dirichlet mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we continue to observe incomplete cases, where each case has missing values for the same set of variables, the posterior distribution for θij will contain a number of components that is exponential in the number of cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In general, for any interesting set of local likelihoods and priors, the exact computation of the posterior distribution for θ will be intractable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, we require an approximation for incomplete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 47 One of the possible ways to approximate is Monte-Carlo methods discussed previously, for example the Gibbs sampler, which must be irreducible and each variable must be chosen infinitely often.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More specifically for our case, to approximate p(θ | D) given an incomplete data set we start with some initial states of the unobserved variables in each case (chosen randomly or otherwise) and as a result, we have a complete random sample Dc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we choose some variable Xi[l] (variable Xi in case l) that is not observed in the original random sample D, and reassign its state according to the probability distribution p(x′ il | Dc \\ {xil}) = p(x′ il, Dc \\ {xil}) � x′′ il p(x′′ il, Dc \\ {xil}), where Dc\\xil denotes the data set Dc with observation xil removed, and the sum in the de- nominator runs over all states of the variable Xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Both the numerator and denominator can be computed efficiently as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the third step we repeat this reassignment for all unobserved variables in D, producing a new complete random sample D′ c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The fourth step is to compute the posterior density p(θij | D′ c) as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='7) and, under the assumption of parameter independence, the joint posterior p(θ | D′ c) will be a product of all densities p(θij | D′ c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, we iterate through last three steps, and use the average of p(θ | D′ c) as our approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Monte-Carlo methods yield accurate results but they are often intractable, for example when the sample size is large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another approximation that is more efficient than Monte- Carlo methods and often accurate for relatively large samples is the Gaussian approxi- mation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The idea is that for large amounts of data we can approximate the distribution p(θ | D) ∝ p(D | θ)p(θ) as a multivariate-Gaussian distribution, namely p(θ | D) ≈ p(D | ˜θ)p(˜θ) exp � −1 2(θ − ˜θ)H(θ − ˜θ)⊤ � , where ˜θ is the configuration of θ that maximizes g(θ) = ln(p(D | θ)p(θ)) and H is a negative Hessian of g(θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The vector ˜θ is also called the maximum a posteriori (MAP) configuration of θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are various methods to compute the second derivatives proposed in literature (Meng and Rubin (1991), Raftery (1995), Thiesson (1995)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One more way to learn probabilities from incomplete data is the Expectation-Ma- ximization (EM) algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is an iterative algorithm consisting of two alternating steps - Expectation and Maximization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When the data is incomplete we cannot calculate the likelihood function as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now instead of maximizing likelihood or log- likelihood function we will be maximizing the expected log-likelihood of the complete data set with respect to the joint distribution for X conditioned on the assigned configuration of the parameter vector θ′ and the known data D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The calculation of the expected log- likelihood (Expectation step) amounts to computing expected sufficient statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For incomplete data the expected log-likelihood takes the following form E[ℓ(θ) | D, θ′] = n � i=1 qi � l=1 ri � k=1 ˆNilk log(θilk), 48 where ˆNilk = E[I(Xi = xk i , pa(Xi) = pal i) | D, θ′] = m � j=1 P(Xi = xk i , pa(Xi) = pal i | dj, θ′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Here dj is possibly incomplete j-th case in D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' When Xi and all the variables in pa(Xi) are observed, the term for this case requires a trivial computation: it is either zero or one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Otherwise, we can use any Bayesian network inference algorithm discussed above to evaluate the term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Having performed the Expectation step we want to find the new parameter vector, which is obtained by maximization of the expected log-likelihood (Maximization step).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In our case we have new parameters on the r-th iteration θr ilk = ˆNilk �ri k=1 ˆNilk .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start algorithm with an arbitrary (for example, random) parameter configuration θ0 and iteratively perform two steps described above until the convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Dempster et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (1977) showed that, under certain regularity conditions, iterations of the expectation and maximization steps will converge to a local maximum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 Learning parameters for CTBNs The new method we propose in next chapters for learning CTBNs is capable of performing both tasks of parameter learning and structure learning simultaneously, although naturally these tasks can be performed separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this section we review selected methods focused only on parameter learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Data In this thesis we discuss both complete and incomplete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In essence, CTBN models the joint trajectories of its variables, hence having complete, or fully observed, data means that for each point in time of each trajectory, we know the full instantiation to all variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By D = {σ[1], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , σ[m]} we denote a data set of trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In case of complete data each σ[i] is a complete set of state transitions and the times at which they occurred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Another way to specify each trajectory is to assign a sequence of states xi ∈ Val(X), each with an associated duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In contrast to the definition of complete data, an incomplete data set can be repre- sented by a set of one or more partial trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A partially observed trajectory σ ∈ D can be specified as a sequence of subsystems Si of X, each with an associated duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A subsystem S describes the behaviour of the process over a subset of the full state space, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Val(S) ⊂ Val(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is simply a nonempty subset of states of X, in which we know the system stayed for the duration of the observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Some transitions are partially ob- served, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' we know only that they take us from one subsystem to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Transitions 49 from one state to another within the subsystem are fully unobserved, hence, we do not know how many transitions there are inside of a particular subsystem nor when they do occur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Learning parameters for complete data Recall, that CTBN N consists of two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first is an initial distribution P X 0 , specified as a Bayesian network over X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The second is a continuous transition model, specified as a directed (and possibly cyclic) graph and a set of conditional intensity matrices (CIM), one for each variable Xi in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the purposes of this section we abbreviate paG(Xi) as pa(Xi) and we denote CIMs as QXi|pa(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Recall that each QXi|pa(Xi) consists of intensity matrices QXi|pai, where pai is a single configuration of pa(Xi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Strictly speaking, pai is one of the possible parent configurations pa1 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , paqi i similar to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In terms of pure intensity parameterization we denote elements of these matrices as qxx′|pai and qx|pai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note, that by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='9 we can divide the set of parameters in terms of mixed intensity into two sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then for each variable Xi and each instantiation pai of its set of parents pa(Xi) the parameters of QXi|pa(Xi) will be qXi = {qx|pai : x ∈ Val(Xi)} and θXi = {θxx′|pai : x, x′ ∈ Val(Xi), x ̸= x′}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, for each Xi and every x ∈ Val(Xi) we have θxx′|pai = qxx′|pai � x′ qxx′|pai , x′ ∈ Val(Xi), x ̸= x′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The learning problem for the initial distribution is a Bayesian network learning task, which was discussed previously in this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, it remains to learn the vector of parameters (q, θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Likelihood estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let us start from a fully observed case and a single homo- geneous Markov process X(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As all the transitions are observed, the likelihood of D can be decomposed as a product of the likelihoods for individual transitions d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let d = ⟨xd, td, x′ d⟩ ∈ D be the transition where X transitions to state x′ d after spending the amount of time td in state xd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using the mixed intensity parameterization, we can write the likelihood for the single transition d as LX(q, θ : d) = LX(q : d)LX(θ : d) = qxd exp(−qxdtd) · θxdx′ d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then multiplying the likelihoods for each transition d in our data D we can summarize it in terms of sufficient statistics T[x] which describes the amount of time spent in each state x ∈ Val(X) and M[x, x′] which encodes the number of transitions from x to x′, where x ̸= x′ as follows: LX(q, θ : D) = �� d∈D LX(q : d) � �� d∈D LX(θ : d) � = �� x qM[x] x exp(−qxT[x]) � �� x � x′̸=x θM[x,x′] xx′ � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='16) 50 where M[x] = � x′ M[x, x′].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now in case of CTBNs, each variable X of the network N is conditioned on its par- ent set Pa = paG(X), and each transition of X must be considered in the context of the instantiation pa of Pa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' With complete data, we know the value of Pa during the en- tire trajectory, so at each point in time we know precisely which homogeneous intensity matrix QX|pa governed the dynamics of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, the likelihood decomposes into the product of likelihoods, each corresponding to the variable in the network, as LN(q, θ : D) = � Xi∈X LXi(qXi|Ui, θXi|Ui : D) = � Xi∈X LXi(qXi|Ui : D)LXi(θXi|Ui : D).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The term LX(θX|Pa : D) is the probability of the sequence of state transitions, disregarding the times between transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These state changes depend only on the value of the parents at the moment of the transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For each variable X ∈ X let M[x, x′ | pa] denote the number of transitions from X = x to X = x′ while Pa = pa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then, with this set of sufficient statistics M[x, x′ | pa], we have LX(θX|Pa : D) = � pa � x � x′̸=x θM[x,x′|pa] xx′|pa .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The computation of LX(qX|Pa : D) is more subtle since the duration in the state can be terminated not only due to a transition of X, but also due to a transition of one of its parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The total amount of time where X = x and Pa = pa can be decomposed into two different kinds of durations T[x | pa] = Tr[x | pa] + Tc[x | pa], where Tr[x | pa] is the total length of the time intervals that terminate with X remaining equal to x, and Tc[x | pa] is the total length of the time intervals that terminate with a change in the value of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, it is easy to show that we do not need to maintain the distinction between the two of them and we can use the set of T[x | pa] as sufficient statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Finally, we can write the log-likelihood as a sum of local variable likelihoods of the form ℓX(q, θ : D) = ℓX(q : D) + ℓX(θ : D) = = �� pa � x M[x | pa] log qx|pa − qx|paT[x | pa] � + �� pa � x � x′̸=x M[x, x′ | pa] log θxx′|pa � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='17) Now we can write the maximum-likelihood (MLE) parameters as functions of the sufficient statistics as follows (for the proof see Nodelman (2007)): ˆqx|pa = M[x | pa] T[x | pa] , ˆθxx′|pa = M[x, x′ | pa] M[x | pa] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Bayesian approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The other way to estimate parameters in case of fully ob- served data is the Bayesian approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To perform Bayesian parameter estimation, simi- larly to the case of Bayesian networks, for computational efficiency we use a conjugate 51 prior (one where the posterior after conditioning on the data is in the same parametric family as the prior) over the parameters of our CTBN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For a single Markov process we have two types of parameters, a vector of parameters θ for categorical distribution and q for exponential distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' An appropriate conjugate prior for the exponential parameter q is the Gamma distribution P(q) = Gamma(α, τ), and as we mentioned in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1, the standard conjugate prior to categorical distri- bution is a Dirichlet distribution P(θ) = Dir(αxx1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , αxxk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The posterior distribu- tions P(θ | D) and P(q | D) given data are Dirichlet and Gamma distributions, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In order to apply this idea to an entire CTBN we need to make two standard assump- tions for parameter priors in Bayesian networks, global parameter independence: P(q, θ) = � X∈X P(qX|paG(X), θX|paG(X)) and local parameter independence for each variable X in the network: P(qX|Pa, θX|Pa) = �� x � pa P(qx|pa) � �� x � pa P(θx|pa) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If our parameter prior satisfies these assumptions, so does our posterior, as it belongs to the same parametric family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, we can maintain our parameter distribution in the closed form, and update it using the obvious sufficient statistics M[x, x′ | pa] for θx|pa and M[x | pa], T[x | pa] for qx|pa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Given a parameter distribution, we can use it to predict the next event, averaging out the event probability over the possible values of the parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As usual, this prediction is equivalent to using “expected” parameter values, which have the same form as the MLE parameters, but account for the “imaginary counts” of the hyperparameters: ˆqx|pa = αx|pa + M[x | pa] τx|pa + T[x | pa] , ˆθxx′|pa = αxx′|pa + M[x, x′ | pa] αx|pa + M[x | pa] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that, in principle, this choice of parameters is only valid for predicting a single transition, after which we should update our parameter distribution accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, as is often done in other settings, we can approximate the exact Bayesian computation by “freezing” the parameters to these expected values, and use them for predicting an entire trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 Learning parameters for incomplete data Recall, that in case of Bayesian networks one of the methods to deal with missing data was Expectation-Maximization (EM) algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Here we provide a concise description of the algorithm based on EM for CTBNs presented in detail in Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start again with reviewing the EM scheme for a single Markov process X, which is the basis of the algorithm for CTBNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let D = {σ[1], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , σ[m]} denote the set of all partially observed trajectories of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 52 For each partial trajectory σ[i] ∈ D we can consider the space H[i] of possible comple- tions of this trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For every transition of σ[i] each completion h[i] ∈ H[i] specifies which underlying transition of X occurred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also it specifies all the entirely unobserved transitions of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Combining σ[i] and h[i] gives us a complete trajectory σ+[i] over X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that, in a partially observed trajectory, the number of possible unobserved transi- tions is unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, there are uncountably many times at which each transition can take place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Nevertheless, we can define the set D+ = {σ+[1], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , σ+[m]} of comple- tions of all of the partial trajectories in D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For examples of completions see Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we mentioned in the previous subsection, the sufficient statistics of the set of complete trajectories D+ for a Markov process are T[x], the total amount of time that X stays in x, and M[x, x′], the number of times in which X transitions from x to x′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Applying logarithm to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='16) we can write the log-likelihood ℓX(q, θ : D+) for X as an expression of these sufficient statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Let r be a probability density over each completion in H[i] which, in turn, yields a density over possible completions of the data D+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can write the expectations of the sufficient statistics with respect to the probability density over possible completions of the data as T[x], M[x, x′] and M[x].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These expected sufficient statistics allow us to write the expected log-likelihood for X as Er[ℓX(q, θ : D+)] = Er[ℓX(q : D+)] + Er[ℓX(θ : D+)] = = � x � M[x] ln(qx) − qxT[x] + � x′̸=x M[x, x′] ln(θxx′) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we can use the EM algorithm to find maximum-likelihood parameters q, θ of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The EM algorithm begins with an arbitrary initial parameter assignment, q0, θ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It then repeats the two steps, Expectation and Maximization, updating the parameter set, until convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' After the k-th iteration we start with parameters qk, θk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Expectation step goes as following: using the current set of parameters, we define for each σ[i] ∈ D, the probability density rk(h[i]) = p(h[i] | σ[i], qk, θk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We then compute expected suffi- cient statistics T[x], M[x, x′] and M[x] according to this posterior density over completions of the data given the data and the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using the expected sufficient statistics we just have computed as if they came from a complete data set, we set qk+1 and θk+1 to be the new maximum likelihood parameters for our model as follows qk+1 x = M[x] T[x] , θk+1 xx′ = M[x, x′] M[x] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='18) The difficult part in this algorithm is the Expectation Step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The space over which we are integrating is highly complex, and it is not clear how to compute the expected sufficient statistics in a tractable way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2012) and Nodelman (2007) authors provided in detail the algo- rithm on how to compute expected sufficient statistics for an n-state homogeneous Markov process Xt with intensity matrix QX with respect to the posterior probability density over 53 completions of the data given the observations and the current model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The statistics are computed for each partially observed trajectory σ ∈ D separately and then the results are combined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A partially observed trajectory σ is given as a sequence of N subsystems so that the state is restricted to subsystem Si during the interval [ti, ti+1) for 0 ≤ i ≤ N − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To conduct all the necessary computations, for each time t, the forward and backward probability vectors αt and βt are defined, which include evidence of any transition at time t, and also vectors α− t and β+ t , neither of which include evidence of a transition at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The total expected time E[T[j]] is obtained by summing the integrals over all intervals of constant evidence [v, w) with the subsystem S to which the state is restricted on that interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each integrand is an expression containing αv, βw and QS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The computations for each integral are performed via the Runge-Kutta method of fourth order with an adaptive step size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Regarding the expected number of transitions E[M[x, x′]] from the state x to x′ dis- crete time approximations of M[x, x′] are considered which in the limit as the size of the discretization goes to zero yields an exact equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As a result we get the sum of expressions where each summand is associated with a time interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The overall expres- sion for the expected number of transitions consists of two parts: the sum of products corresponding to intervals with partially observed transitions and containing α− t and β+ t for different time points t and the sum of integrals of practically identical form to those obtained for total expected time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In order to compute αt and βt a forward-backward style algorithm (Rabiner and Juang (1986)) over the entire trajectory is used to incorporate evidence and get distributions over the state of the system at every time ti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If needed it is possible to exclude incorporation of the evidence of the transition from either forward or backward vector and also obtain α− t and β+ t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can then write the distribution over the state of the system at time t given all the evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Continuous time Bayesian networks are a factored representation for homogeneous Markov processes, hence, extending the EM algorithm to them involves making it sensitive to a factored state space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As mentioned previously, the log-likelihood decomposes as the sum of local log-likelihoods for each variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' With the sufficient statistics T[x | pa], M[x, x′ | pa] and M[x | pa] of the set of complete trajectories D+ for each variable X in CTBN N the likelihood for each variable X further decomposes as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By linearity of expectation, the expected log-likelihood function also decomposes in the same way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So we can write the expected log-likelihood Er[ℓ(q, θ : D+)] as a sum of terms, one for each variable X, in a similar form as (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='17), but using the expected sufficient statistics T[x | pa], M[x, x′ | pa] and M[x | pa].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The EM algorithm for CTBNs is essentially the same as for homogeneous Markov processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We need only specify how evidence in the network induces evidence on the in- duced Markov process, and how expected sufficient statistics in the Markov process give us the necessary sufficient statistics for CTBN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Maximization step is practically the same as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='18), we just use proper expected 54 sufficient statistics for the CTBN case: qk+1 x|pa = M[x | pa] T[x | pa] , θk+1 xx′|pa = M[x, x′ | pa] M[x | pa] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The Expectation step is again more difficult and could be done by flattening the CTBN into a single homogeneous Markov process with a size of the state space exponential in the number of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we follow the method described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, as the number of variables in the CTBN grows the process becomes intractable, so we are forced to use approximate inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We want this approximate algorithm to be able to compute approximate versions of the forward and backward messages αt and βs and extract the relevant sufficient statistics from these messages efficiently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the next subsection we review a cluster graph infe- rence algorithm which can be used to perform this type of approximate inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Using obtained cluster beliefs (see below) we can compute αti+1 and βti and use them in the forward-backward message passing procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The cluster distributions are represented as local intensity matrices from which we can compute the expected sufficient statistics over families Xi, paG(Xi) as described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 Inference for CTBNs To gain the perspective on the whole concept of continuous time Bayesian networks and their power, similarly to Bayesian networks, we discuss the questions of inference although it is not the key subject of this thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We start with a discussion of the types of queries we might wish to answer and the difficulties of the exact inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Inference for CTBNs can take a number of forms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The common types of queries are: querying the marginal distribution of a variable at a particular time or also the time at which a variable first takes a particular value, querying the expected number of transitions for a variable during a fixed time in- terval, querying the expected amount of time a variable stayed in a particular state during an interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Previously we showed that we can view CTBN as a compact representation of a joint intensity matrix for a homogeneous Markov process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, at least in principle, we can use CTBN to answer any query that we can answer using an explicit representation of a Markov process: we can form the joint intensity matrix and then answer queries just as we would do for any homogeneous Markov process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The obvious flaw is that this approach for answering these queries requires us to generate the full joint intensity matrix for the system as a whole.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The size of the matrix is exponential in the number of variables, making this approach generally intractable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The 55 graphical structure of the CTBN immediately suggests that we perform the inference in a decomposed way, as in Bayesian networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Unfortunately, the problems are significantly more complex in this setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2002) the authors describe an approximate inference algorithm based on ideas from clique tree inference, but without any formal justification for the algo- rithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More importantly, the algorithm covers only point evidence, meaning observations of the value of a variable at a point in time, but in many applications we observe a variable for an interval or even for its entire trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Therefore, we shortly describe an approx- imate inference algorithm called Expectation Propagation (EP) presented in Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005) that allows both point and interval evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The algorithm uses message passing in a cluster graph (with clique tree algorithms as a special case), where the clus- ters do not contain distributions over the cluster variables at individual time points, but over trajectories of the variables through a duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we discussed in this chapter, in cluster graph algorithms we construct a graph whose nodes correspond to clusters of variables and then pass messages between these clusters to produce an alternative parameterization, in which the marginal distribution of the variables in each cluster can be read directly from the cluster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In discrete graphical models, when the cluster graph is a clique tree, two passes of the message passing algorithm produce the exact marginals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In generalized belief propagation (Yedidia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2001)), message passing is applied to a graph which is not a clique tree, in which case the algorithm may not converge, and produces only approximate solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There are several forms of message passing algorithm as we have discussed in Subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The algorithm of Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005) is based on multiply-marginalize-divide scheme of Lauritzen and Spiegelhalter (1988), which we now briefly review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' A cluster graph is defined in terms of a set of clusters Ci, whose scope is some subset of the variables X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Clusters are connected to each other by edges, along which messages are passed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The edges are annotated with a set of variables called a sepset Si,j, which is the set of variables in Ci ∩ Cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The messages passed over an edge between Ci and Cj are factors over the scope Si,j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each cluster Ci maintains a potential βi, which is a factor reflecting its current beliefs over the variables in its scope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each edge similarly maintains a message µi,j which encodes the last message sent over the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The potentials are initialized with a product of some subset of factors parameterizing the model (CIMs in our setting).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Messages are initialized to be uninformative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Clusters then send messages to each other, and use incoming messages to update their beliefs over the variables in their scope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The message mi→j from Ci to Cj is the marginal distribution Si,j according to βi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The neighbouring cluster Cj assimilates this message by multiplying it into βi, but avoids double-counting by first dividing by the stored message µi,j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Thus, the message update takes the form βj ←− βj · mi→j µi,j .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In the algorithm the cluster beliefs represent not the factors over values of random variables themselves, but rather cluster potentials and messages both encode measures over entire trajectories of the variables in their scope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The number of parameters grows exponentially with the size of the network, and thus we cannot pass messages exactly 56 without giving up the computational efficiency of the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To address this issue Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005) used the expectation propagation (EP) approach of Minka (2001), which performs approximate message passing in cluster graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In order to get an approx- imate message each message mi→j is projected into a compactly representable space so as to minimize the KL-divergence between the message and its approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To encode the cluster potentials CIMs are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In order to apply the EP algorithm to clusters of this form some basic operations over CIMs need to be defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They include CIM product and division, approximate CIM marginalization, as well as incorporating the evidence into CIM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The message propagation algorithm is first considered for one segment of the trajectory with constant continuous evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Exactly the same as for Bayesian networks, this process starts with constructing the cluster tree for the graph G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that cycles do not introduce new issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can simply moralize the graph connecting all parents of a node with undirected edges and then make all the remaining edges undirected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If there is a cycle, it simply turns into a loop in the resulting undirected graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Next we select a set of clusters Ci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These clusters can be selected so as to produce a clique tree for the graph, using any standard method for constructing such trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We can also construct a loopy cluster graph and use generalized belief propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We did not discuss this topic in the thesis (for more details see Koller and Friedman (2009)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The message passing scheme described in this section is the same in both cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The algorithm iteratively selects an edge connecting the clusters Ci and Cj in the clus- ter graph and passes the message from the former to the latter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In clique tree propagation the order in which we chose edges was basically fixed, meaning that we started from leaves to roots performing an upward pass and then going in the opposite direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In genera- lized belief propagation, we might use a variety of message passing schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Convergence occurs when messages cease to affect the potentials which means that neighboring clus- ters Ci and Cj agree on the approximate marginals over the variables from Si,j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now we can generalize the algorithm for a single segment to trajectories containing multiple segments of continuous evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Nodelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2005) applied this algorithm separately to every segment, passing information from one segment to the next one in the form of distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, consider a trajectory defining a sequence of time points t1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , tn, with constant continuous evidence on every interval [ti, ti+1) and possible point evidence or observed transition at each ti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then a sequence of cluster graphs over each segment is constructed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Starting from the initial segment EP inference is run on each cluster graph using the algorithm for a single segment described above, and the distribution at the end time point of the interval is computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The resulting distribution is then conditioned on any point evidence or the observed transition, and next used as the initial distribution for the next interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, there is one subtle difficulty relating to the propagation of messages from one interval to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' If a variable X appears in two clusters Ci and Cj in a cluster graph, the distribution over its values in these two clusters is not generally the same, even if the EP computation converges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The reason is that even calibrated clusters only agree 57 on the projected marginals over their sepset, not the true marginals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To address this issue and to obtain a coherent distribution which can be transmitted to the next cluster graph the individual cluster marginals and sepsets for the state variables at the end time point of the previous interval are recalibrated to form a coherent distribution (the conditioning on point evidence can be done at the same time if needed).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we can extract the new distribution as a set of calibrated cluster and sepset factors, and introduce each factor into the appropriate cluster or sepset in the cluster graph for the next time interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The above algorithm performs the propagation of beliefs forward in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is also pos- sible to do a similar propagation backwards and pass messages in reverse, where the cluster graph for one time interval passes a message to the cluster graph for the previous one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also to achieve more accurate beliefs we can repeat the forward-backward propagation until the entire network is calibrated, essentially treating the entire network as a single cluster graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that since one cluster graph is used for each segment of fixed con- tinuous evidence, then each cluster will approximate the trajectory of all the variables it contains as a homogeneous Markov process for the duration of the entire segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' There- fore, the choice of segments and the resulting subsets of variables, over which we compute the distribution, determine the quality of the approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 58 Chapter 4 Structure learning for Bayesian networks Recall the Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 of Bayesian Networks (BN), the notion of which combines the structure given by a Directed Acyclic Graph (DAG) and the probability distribu- tion encoded by Conditional Probability Distributions (CPD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' By far, in Chapter 3 we discussed the problem of finding CPDs and making the inference given the structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In this chapter we will discuss the problem of learning the structure of Bayesian networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 we briefly review known approaches to the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 we recall partition MCMC algorithm for learning the structure of the network, whose part concerning the division of the graph into layers will be the first step of our new method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In Sections 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='4 we present a novel approach to structure learning with the use of the above algorithm and LASSO approach for continuous and discrete data, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='5 is dedicated to numerical results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Problem of learning structure of Bayesian Networks Structure learning is known to be a hard problem, especially due to the superexponential growth of the DAG space when the number of nodes is increasing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Generally speaking the literature on the structure learning can be divided into three classes: constraint- based methods, score-and-search algorithms and the dynamic programming approach (as discussed for example in Koller and Friedman (2009)), even though this division is not that strict.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The contents of this section come mostly from Kuipers and Moffa (2017) and Daly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Constraint-based methods use conditional independence tests to obtain information about the underlying causal structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' They start from the full undirected graph and then make decisions about removing the edge in the network based on tests of conditional independence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The widely used algorithm of this nature, PC algorithm (Spirtes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2000)), and constraint-based methods in general are sensitive to the order in which they are run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However Colombo and Maathuis (2014) proposed some modifications for PC algorithm to remove either partially or altogether this dependence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These methods scale 59 well with the dimension but are sensitive to local errors of the independence tests which are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' One of the most widely studied ways of learning a Bayesian network structure has been the use of so-called ’score-and-search’ techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' These algorithms comprise of: a search space consisting of the various allowable states, each of which represents a Bayesian network structure;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' a mechanism to encode each of the states;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' a mechanism to move from state to state in the search space;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' a scoring function assigning some score to a state in the search space which describes the goodness of fit with the sample data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Also some hybrid methods combining ideas from both techniques were proposed, for example the max-min-hill-climbing of Tsamardinos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Within the family of search and score methods we can distinguish a separate class of MCMC methods for the graph space exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Their main and huge advantage is that they can provide a collection of samples from the posterior distribution of the graph given the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This means that rather than making the inference based on a single graphical model, we can account for model uncertainty by averaging over all the models in the obtained class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In particular, we can estimate the expectation of any given network feature, such as the posterior probability of an individual edge, by averaging the posterior distributions under each of the models, weighted by their posterior model probabilities (Madigan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (1995), Kuipers and Moffa (2017)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This is especially important in high dimensional domains with sparse data where the single best model cannot be clearly identified, so the inference relying on the best scoring model is not justified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The first MCMC algorithm over graph structures is due to Madigan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (1995), later refined by Giudici and Castelo (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' To improve on the mixing and convergence, Friedman and Koller (2001) instead suggested to build a Markov chain on the space of node orders, at the price of introducing a bias in the sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For smaller systems with smaller space and time complexity one of the efficient approaches is the dynamic prog- ramming (Koivisto and Sood (2004)), which can be further used to extend the proposals of standard structure MCMC approach in a hybrid method (Eaton and Murphy (2007)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Within the MCMC approach, to avoid the bias while keeping reasonable convergence rate, Grzegorczyk and Husmeier (2008) more recently proposed a new edge reversal move method combining ideas both of standard structure and order based MCMC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Recently Kuipers and Moffa (2017) presented another MCMC algorithm designed on the combi- natorial structure of DAGs, with the advantage of improving convergence with respect to structure MCMC, while still providing an unbiased sample since it acts directly on the space of DAGs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Moreover, it can also be combined with the algorithm of Grzegorczyk and Husmeier (2008) to improve the convergence rate even further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 60 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='2 Partition MCMC method In this section we describe the Partition MCMC algorithm of Kuipers and Moffa (2017), which will be the base of our novel method for learning the structure of BNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' This algo- rithm considers combinatorial representation of DAGs to build an efficient MCMC scheme directly on the space of DAGs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Its convergence is better than that of the structure MCMC and does not introduce bias as the order based MCMC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' As we mentioned, the authors also proposed a way to combine their method with the new edge reversal move approach of Grzegorczyk and Husmeier (2008) and improve upon their MCMC sampler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First we need to introduce the notion of layers and partitions for DAG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Given DAG G = (V, E) we define layers ℓi of the nodes (called interchangeably variables) in the network as follows: ℓ0 = {v ∈ V : paG(v) = ∅} is the layer of the nodes which do not have any parents;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' having defined the layer ℓi for i = 0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , k − 1 we define the next layer as ℓk = {v ∈ V : ∃w ∈ ℓk−1 such that w ∈ paG(v) and paG(v) ⊆ Lk−1}, where Lk−1 = � i≤k−1 ℓi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Note that variables from the same layer do not have arrows between them, and that each variable (except for the layer ℓ0) has at least one arrow directed towards it from any variable from the adjacent previous layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For instance, the graph in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 has three layers: ℓ0 = {1, 3, 5}, ℓ1 = {4} and ℓ2 = {2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Suppose that for some arbitrary graph we have q+1 layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Each layer ℓi has a certain amount ki of nodes, which in sum gives the total number of nodes d, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' q� i=0 ki = d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' In addition, with each layer representation there is associated a permutation of nodes, where we list nodes in the layer order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' More precisely, first we write nodes from the first layer, then from the second one, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' For the graph in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 we have the partition λ = [3, 1, 1] and the permutation πλ = [1, 3, 5, 4, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Together a pair (λ, πλ) is called a labelled partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Kuipers and Moffa (2017) proposed an efficient MCMC algorithm for exploring the spa- ce of partitions to find the most probable layer representation given the observed data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Although the full algorithm is suited for structure learning, we want to improve on this algorithm and replace the second part of it with the LASSO estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The authors define an MCMC algorithm on the space of node partitions avoiding in this way over- representation of certain DAGs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Compared to other MCMC methods mentioned above partition MCMC is faster than structure MCMC of Madigan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is slower than order MCMC of Friedman and Koller (2001) but does not introduce any bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The basic move consists of splitting one element of the partition (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' layer) into two parts or joining two adjacent elements (the authors also propose an additional move consisting of swapping two nodes in adjacent layers).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' All the partitions reachable from a given partition in 61 Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1: An example of partition representation of the DAG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' one basic move are called the neighbourhood of the partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' So the MCMC scheme consists of sampling a partition from the neighbourhood of the previous partition with a small probability to stay still defined by the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' The obtained partition is scored and the score coincides with the posterior probability of the labelled partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' After sampling the partition we sample a single DAG weighted according to its posterior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Then we can average the acquired DAGs in the MCMC chain and choose the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' However, we propose to change the step where we sample DAG from the posterior distribution and average DAGs from the MCMC chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' It is well suited for inference and estimation of network parameters but we believe that we can improve the Bayesian averaging approach in the case of structure learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We propose to use partition MCMC for finding the best scoring partition and next to use it for recovering arrows with the LASSO estimator where each parameter corresponds to a certain arrow in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3 The novel approach to structure learning We want to combine advantages of partition MCMC and LASSO for linear models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' First we find the best layer representation using partition MCMC algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Next we obtain the final DAG solving d LASSO problems, where d is the number of variables (nodes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Having found the most probable layer representation for a DAG we consider two models: one for continuous data and one for discrete data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1 Gaussian Bayesian Networks For the continuous case we consider Gaussian Bayesian Networks (GBN) introduced in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We denote as Xm i the m-th random variable in the i-th layer, where m ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , ki}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We assume that each ϵm i has the normal distribution N(0, σm i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' We also assume that each ϵm i is independent of all Xm i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Now given the partition [k0, k1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' , kq] we can write the problem of finding the DAG structure as a set of the following d linear 62 3 3 4 5model problems: X1 0 = β1 0,0 + ϵ1 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Xk0 0 = βk0 0,0 + ϵk0 0 X1 1 = β1 1,0 + β1,1 1,0X1 0 + · · · + β1,k0 1,0 Xk0 0 + ϵ1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Xk1 1 = βk1 1,0 + βk1,1 1,0 X1 0 + · · · + βk1,k0 1,0 Xk0 0 + ϵk1 1 X1 2 = β1 2,0 + β1,1 2,0X1 0 + · · · + β1,k0 2,0 Xk0 0 + β1,1 2,1X1 1 + · · · + β1,k1 2,1 Xk1 1 + ϵ1 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/A9FQT4oBgHgl3EQfMzZX/content/2301.13269v1.pdf'} +page_content=' Xkq q = βkq q,0 + � j 109M⊙ chemically evolve rapidly +to z = 9. Young stellar populations in the star-forming galaxies boost the [Oiii] line emission, rendering +the ratio of line luminosity to star formation rate larger than that for low-redshift galaxies, which is +consistent with recent observations. Measuring the flux ratios of rest-frame optical and far-infrared +lines allows us to estimate the physical conditions such as density and metallicity of the star-forming +gas in high-redshift [Oiii] emitters. +1. INTRODUCTION +Understanding the formation and evolution of the first +galaxies is one of the key scientific goals of new genera- +tion telescopes including James Webb Space Telescope +(JWST) and Atacama Large Millimetre/Submillimetre +Array (ALMA). High-redshift galaxies can be detected +and identified using strong emission lines, among which +[Oiii] 88µm line is thought to be promising (Inoue et al. +2014). A number of galaxies have been found at z > 7 by +ALMA observations targeting the [Oiii] 88µm line (e.g. +Inoue et al. 2016; Hashimoto et al. 2018), including the +most distant galaxy candidate at z = 13.27 with a 4σ +[Oiii] 88µm detection (Harikane et al. 2022). Since the +[Oiii] line emission originates from Hii regions around +young massive stars, it can be used to trace the star for- +mation activities and also the physical properties of the +inter-stellar medium (ISM) in the early galaxies. +JWST is opening a new window into the early universe +through its superb observational capability in near- +infrared. +For example, JWST Early Research Obser- +Corresponding author: Yurina Nakazato +yurina.nakazato@phys.s.u-tokyo.ac.jp +vation (ERO) in the lensing field SMACS 0723 already +reported three galaxies confirmed spectroscopically by +NIRSpec (Schaerer et al. 2022; Curti et al. 2022; Heintz +et al. 2022). NIRSpec instrument is capable of detect- +ing and identifying various rest-frame optical lines such +as [Oii] 3727˚A, [Oiii] 4959˚A and [Oiii] 5007˚A. The rela- +tively weak [Oiii] 4363˚A line has been detected for all the +three galaxies, enabling us to estimate the ISM metal- +licity in a direct manner. +Detailed numerical simulations are indispensable to +study the physical conditions of the ISM. There have +been several studies focusing on [Oiii] emission lines +from high-z galaxies (Hirschmann et al. 2017; Olsen +et al. 2017; Moriwaki et al. 2018; Katz et al. 2019; +Arata et al. 2020; Ceverino et al. 2021; Pallottini et al. +2022). Moriwaki et al. (2018) use a cosmological sim- +ulation with a large boxsize of 50 Mpc (Shimizu et al. +2016) to calculate the [Oiii] 88µm line intensities for a +few hundred galaxies with stellar masses of ∼ 108 M⊙. +High-resolution, zoom-in simulations have also been per- +formed to study the internal structure of early galaxies +(Katz et al. 2019; Arata et al. 2020). For the upcoming +observations conducted by JWST, it is urgently needed +to study the population of high-redshift galaxies with +arXiv:2301.02416v1 [astro-ph.GA] 6 Jan 2023 + +2 +Nakazato et al. +high resolution in a fully cosmological context. In this +Letter, we use the outputs of FirstLight simulation (Cev- +erino et al. 2017). +The simulation suite is motivated +to produce a statistically significant number of galaxies +with very high resolution at the epoch of reionization. +Thanks to the mass and volume complete sample of +more than 60 massive galaxies and to the high-resolution +of ∼ 20 pc, we can investigate the internal structure as +well as statistics of the high-redshift galaxies. +Throughout this Letter, we assume Z⊙ = 0.02 as the +solar metallicity (Anders & Grevesse 1989). +2. METHOD +2.1. Cosmological Simulation +We use mass-limited galaxy samples selected from the +FirstLight simulation suite (Ceverino et al. 2017). The +simulations are performed with ART code (Kravtsov +et al. 1997; Kravtsov 2003; Ceverino & Klypin 2009; +Ceverino et al. 2014), which follows gravitational N- +body dynamics and Eulerian hydrodynamics using an +adaptive mesh refinement method. +Besides the two +processes, the code incorporates astrophysical processes +relevant for galaxy formation. +The so-called subgrid +physics includes atomic and molecular cooling of hydro- +gen and helium, photoionization heating by a cosmolog- +ical UV background with partial self-shielding, and star +formation and the associated stellar feedback. Details +are described in Ceverino et al. (2017). The simulations +track metals released from SNe-Ia and from SNe-II, us- +ing supernovae yields from Woosley & Weaver (1995). +Our simulated galaxies are hosted by dark matter +haloes with maximum circular velocity (Vmax) higher +than 178 km/s at z = 5 in a cosmological volume of 40 +h−1Mpc on a side. The host haloes are selected in a low- +resolution N-body only simulation, for which refined ini- +tial conditions are generated using a standard zoom-in +technique (Klypin et al. 2011). The refinement achieves +the dark matter particle mass of mDM = 8 × 104 M⊙, +the minimum star particle mass of 103 M⊙, and the +maximum spatial resolution is a few tens proper parsec +depending on the refinement level. +We calculate the stellar mass distribution for the se- +lected 62 massive galaxies at z = 9, 8, 7, 6. The max- +imum stellar mass is 9.5, 9.7, 10.1, 10.7×109 M⊙, re- +spectively. +The sample allows us to study the evolu- +tion of more massive galaxies than in previous simula- +tions, e.g., Moriwaki et al. (2018), SERRA simulation +(Pallottini et al. 2022) and S´IGAME simulation (Olsen +et al. 2017), and thus is well-suited to compare with +observed massive galaxies by HST, ALMA, and JWST +(e.g. Tacchella et al. 2022; Graziani et al. 2020; Topping +et al. 2022; Trussler et al. 2022; Barrufet et al. 2022; +Leethochawalit et al. 2022). +2.2. Line emissivity calculation +We generate emission-line maps for our galaxy sam- +ples by choosing a region enclosed by 0.3 times the virial +radius of the host halo as same as Mandelker et al. +(2014, 2017). We configure a uniform 3D grid with a +side length of 100 pc. We locate the star particles and +gas elements within each grid, and store the mass of +stars younger than 10 Myr, the average density of the +gas with nH > 0.1 cm−3, and the average metallicity of +the cold/warm gas with T < 5 × 104 K. These physi- +cal quantities assigned to the individual grids are then +used to compute the line emissivities in a similar man- +ner to those in Hirschmann et al. (2017); Moriwaki et al. +(2018); Ceverino et al. (2021). We generate a library of +emission lines using CLOUDY (Ferland et al. 2013).The +library covers a wide range of gas metallicity Z and ion- +ization parameter U as given in Table 1. +The library lists the individual line luminosity, Lline, +normalized by the Hβ line luminosity calculated with +the case-B approximation (Dopita & Sutherland 2003), +LcaseB +Hβ +, as +Lline = (1 − fesc) Cline(Zgas, U, nHii) LcaseB +Hβ +, +(1) +LcaseB +Hβ += 4πjHβV = hνHβ +� +αeff +Hβ +αB +� +Q, +(2) +where fesc is the Lyman continuum escape fraction and +Cline is the line luminosity ratio. The Hβ emission rate +per unit volume per unit time per unit solid angle is +denoted as jHβ, and αeff +Hβ is an effective recombination +coefficient, Q is the production rate of ionizing photons +from each star particle, and αB is the case-B hydrogen +recombination coefficient given by +αB = 2.6 × 10−13 +� +Te +104 K +�−0.85 +cm3s−1 +(3) +with a constant electron temperature Te = 104 K. +We set fesc = 0.1, which is consistent with previous +radiative transfer simulations for massive galaxies with +Mhalo > 1010−11M⊙ (Yajima et al. 2011; Kimm & Cen +2014; Wise et al. 2014; Paardekooper et al. 2015; Xu +et al. 2016). It is also consistent with recent observa- +tional estimates at z ∼ 6 − 8 (Castellano et al. 2017; +Robertson et al. 2013). +We note that some galaxies +have been reported to have an even higher escape frac- +tion of over 20 percent (e.g. Marques-Chaves et al. 2022; +Vanzella et al. 2016; Fletcher et al. 2019; Bian & Fan +2020; Flury et al. 2022) at z < 4. + +[Oiii] Luminosity calculation in First Light +3 +Since individual Hii regions are not resolved in our +simulations, we resort to a physical model of the ISM +structure to calculate the line emissivities of Hii regions. +We characterize the ISM by the local gas density n and +metallicity Z, and also by a volume-averaged ionization +parameter +⟨U⟩ = 3α2/3 +B +4c +�3Qϵ2nHii +4π +�1/3 +. +(4) +Our fiducial model assumes a constant gas density nHii +in a spherical Hii region surrounding a star particle (see, +e.g. Panuzzo et al. 2003; Gutkin et al. 2016). We set the +Hii region density nHii = 100 cm−3 (e.g. Osterbrock & +Ferland 2006; Hirschmann et al. 2017, 2022). We define +the volume-filling factor of the gas as +ϵ = ngas,grid +nHii +, +(5) +where ngas,grid is the gas number density in each grid. +In rare cases where the volume-averaged gas density ex- +ceeds nHii (ϵ > 1), we set the filling factor to unity. Note +that a larger ngas,grid for a fixed nHii yields a larger fill- +ing factor ϵ. Hence the resulting line emissivity depends +only weakly on the assumed nHii in our model. We have +tested with a few variations with nHii = 50, 300 cm−3, +and explicitly checked that our main findings in the fol- +lowing sections are not sensitively affected by this choice. +log10 (Zgas/Z⊙) +-1.30, -0.70, -0.40, 0., 0.30 +log10 U +-4.0, -3.9, ..., -1.1, -1.0 +log10 (nHii/cm−3) +2.0 (fixed) +Table 1. The parameters used to calculate the line lumi- +nosities with CLOUDY. +We compute the production rate of ionizing photons +Q of the simulated galaxies using publicly available ta- +bles from the Binary Population and Spectral Synthesis +(BPASS) model (Byrne et al. 2022). Our simulations +adopt a stellar initial mass function represented by bro- +ken power laws as +N(M < Mmax) ∝ +� M1 +0.1 +� M +M⊙ +�α1 +dM + M α1 +1 +� Mmax +M1 +� M +M⊙ +�α2 +dM +(6) +with α1 = −1.3, α2 = −2.35, M1 = 0.5, Mmax = +300 M⊙ as in Ceverino et al. (2019). We use a grid of +13 values of metallicity, from Z = 10−5 to 0.04, and +50 logarithmic bins in stellar population ages between 1 +Myr and 100 Gyr. +We re-assign ”fine” ages to star particles in order to +mitigate the discreteness effect caused by our simula- +tion set up. Our simulations produce new star particles +with a fixed time step of ∆tSF = 5 Myr, and the simula- +tion output timings are not synchronized with ∆tSF. In +a snapshot, young stars typically have discretized ages +such like tage = 2 Myr, 7 Myr, etc. The apparently mi- +nor gap in stellar ages causes a large impact when we +compute the line emissivities because the ionization pho- +ton production rate quickly decreases with age. For in- +stance, in the BPASS SED of a single stellar population +that we use, the number of ionizing photons decreases +over a factor of 100 from age 1 Myr to 10 Myr (Xiao +et al. 2018). We thus re-assign the stellar age as follows. +We consider star particles younger than 15 Myr, with +stamped ages at T1, T2, T3 (T1 < T2 < T3) Myr. We do +random sampling within each age interval. For instance, +to a star with T1, we randomly draw a new age within +[1, T1] Myr and re-assign to it. Finally, we select star +particles younger than 10 Myr for our emission line cal- +culation. We calculate the ionizing photon production +rate Q for each stellar particle using the BPASS table. +We consider stellar atmosphere models with different +elemental compositions, i.e., different values of [α/Fe]. +In the BPASS v2.3 (Byrne et al. 2022), there are five +models with the mass fractions in α-elements relative to +iron of ∆(log(α/Fe)) = −0.2, +0.0, +0.2, +0.4 and +0.6. +For the calculation of [α/Fe], the α-element abundance +is approximated by the oxygen abundance (log NO) as- +suming that a half of the mass in metals produced by +SNII are in the form of oxygen atoms; +log NO = log(fOzSNII/AO), +(7) +where fO, zSNII are the fraction of oxygen released by +Type-II SNe, and the mass fraction of metals released +from Type-II SNe, respectively. Here, the atomic weight +of oxygen is AO = 16 and we assume fO = 0.5 (Woosley +& Weaver 1995). We calculate the iron abundance ratio +considering both contributions from Type-Ia and II SNe +as +NFe = (fFe,Ia zSNIa + fFe,II zSNIa) +AFe +, +(8) +where zSNIa is the mass fraction of metals released from +Type-Ia SNe and AFe = 56. We set the fractions fFe,Ia = +0.5 (Thielemann et al. 1986) and fFe,II = (0.026, 0.033) +for metal mass ratio between zero and solar metallicity +(Nomoto et al. 2006; Ceverino et al. 2019), respectively. +Finally, [α/Fe] is obtained from +[α/Fe] = log NO − log NFe − log(NO/NFe)⊙, +(9) +where (NO/NFe)⊙ = 1.17 is the solar value of O/Fe +abundance ratio. + +4 +Nakazato et al. +Figure 1. The [Oiii] 88 µm luminosity versus SFR for our 62 simulated galaxies at z = 9 (top left), z = 8 (top right), z = 7 +(bottom left), and z = 6 (bottom right). The solid circles are colored with the gas metallicity (see the colorbar on the right). +For comparison, we show the [Oiii] -SFR relation derived from observations of local galaxies by De Looze et al. (2014). Gray +points are the observational results of high-z (z > 6) galaxies from Hashimoto et al. (2018); Laporte et al. (2017); Tamura et al. +(2019), Inoue et al. (2016)(I16), Hashimoto et al. (2019)(H19), Carniani et al. (2017)(C17), Wong et al. (2022)(WG22), Witstok +et al. (2022)(WT22) and Harikane et al. (2020). +3. RESULTS +We focus on rest-frame sub-millimeter and optical +[Oiii] lines from high-redshift galaxies, which are de- +tected by ALMA and JWST. +3.1. L[Oiii] vs SFR +Figure 1 shows the [Oiii] 88µm luminosity against star +formation rate (SFR) for our galaxy samples. The color- +bar indicates the nebular metallicity Zneb, which is the +line luminosity-weighted gas metallicity. +We compare +with the observed local galaxies (De Looze et al. 2014) +and with the observed [Oiii] 88µm luminosities of high- +redshift galaxies (see the caption). At z = 9 to z = 7, +most of our simulated galaxies are located above the +local galaxy relation (solid line), similar to the results +of Moriwaki et al. (2018); Arata et al. (2020); Pallottini +et al. (2022). +At z = 7 − 9, our galaxy samples are distributed +around the observed galaxies. It is interesting that lu- +minous galaxies are already chemically enriched with +log(Z/Z⊙) ∼ −0.5 at the early epochs. +Our simula- +tions predict a slightly steeper relation at z = 7−9 than +the local relation: +L[Oiii] ,88 ∝ +� +SFR +M⊙ yr−1 +�0.9−1.2 +. +(10) +We find three galaxies with L[Oiii] > 109 L⊙ at z = 7, +which are as bright as several observed galaxies. +We +study the structure of one of them (sample FL964) in de- +tail. It has Mgas = 6.41×109 M⊙, M⋆ = 9.96×109 M⊙, +and a specific SFR of 11 Gyr at z = 7. Figure 2 shows +the projected maps of number density of gas, ionization +parameter, and [Oiii] 88µm. Clearly, regions with high +ionization parameters of log U ∼ −2 cause high emissiv- + +6=2 +1010 +0 +109 +L(88μm)[Lo ] +-0.5 +log10(ZIZo) +108 +-1.0 +107 +-1.5 +106 +MACS1149-JD1 +-2 +10-1 +100 +101 +102 +103 +SFR [M。 yr-1]z=8 +1010 +0 +A2744-YD4 +MACS0416-Y1 +109 +L(88μm)[L ] +-0.5 +log10(ZIZo) +108 +-1.0 +107 +-1.5 +106 +-2 +10-1 +100 +101 +102 +103 +SFR [M。 yr-1]Z=7 +1010 +0 +109 +L(88μm)[Lo] +-0.5 +log10(ZIZo) +108 +-1.0 +107 +} +116 +WG22 +-1.5 +H19 +WT22 +106 +C17 +-2 +10-1 +100 +101 +102 +103 +SFR [Mo yr-1]9=2 +1010 +0 +J0235-0532 +X +109 +L(88μm)[Lo ] +-0.5 +108 +-1.0 +107 +-1.5 +106 +-2 +10-1 +100 +101 +102 +103 +SFR [Mo yr-1][Oiii] Luminosity calculation in First Light +5 +Figure 2. Projected gas density (left), averaged ionization parameter (middle), and [Oiii] 88µm distribution (right) for a galaxy +sample FL964 at z = 7. Each panel shows a region with a side length and depth of 0.3Rvir(= 7.4 kpc). +ities, consistent with the observation by Harikane et al. +(2020) and also with recent simulations by Kohandel +et al. (2022). The total luminosity of [Oiii] 5007˚A of +FL964 is 7.60 × 109L⊙, which is about 5 times larger +than L[Oiii] ,88. +3.2. The mass-metallicity relation +It is important to examine the metallicity evolution +of our simulated galaxies. We study the so-called mass- +metallicity relation (MZR) by calculating the gas-phase +metallicity for individual galaxies. Figure 3 shows the +stellar mass-gas phase oxygen abundance relation. We +calculate the gas phase oxygen abundance by adopting +the conversion equation of Mandelker et al. (2014); +O +H = +fO +zSNII +XAO. +(11) +We set the hydrogen mass fraction X = 0.755 and +the other values of fO and AO are the same as those +of eq.(7), which adopts the solar oxygen abundance +12 + log(O/H) = 8.9. We then calculate the averaged +zSNII, weighted by the [Oiii] luminosity of each grid. +This weighting is compatible with observational meth- +ods such as direct method or strong line method, which +use oxygen emission lines (e.g. Bian et al. 2018; Izotov +et al. 2019). +We calculate the mass of stars within the region of 0.3 +Rvir. In Figure 3, we also plot the MZR for local galaxies +from Curti et al. (2020) (dashed line) and recent JWST +observation results of high-redshift galaxies (Sun et al. +2022; Curti et al. 2022; Langeroodi et al. 2022; Williams +et al. 2022). Curti et al. (2022) estimated metallicities +of SMACS field galaxies by direct method, Sun et al. +(2022) adopt strong line calibration by Bian et al. (2018) +using O32, and Langeroodi et al. (2022) and Williams +et al. (2022) adopt strong line method by Izotov et al. +(2019). +Our simulated galaxies have similar metallicities (oxy- +gen abundance) and stellar masses to the observed ones. +Note that Figure 3 shows the evolution for a fixed sam- +ple of simulated galaxies, rather than for all the galaxies +at respective epochs. Namely, we select the galaxies at +z = 5 by mass and plot their progenitors at z = 6 − 9. +Hence we likely miss low-mass, low-metallicity galax- +ies at z = 9 (see Langan et al. (2020) for the mass- +metallicity of low-mass galaxies in FirstLight). +Some +galaxies with M⋆ > 109 M⊙ have gas-phase metallici- +ties of 12 + log (O/H) ∼ 8.5 even at z = 9, suggesting +that metal-enrichment can proceed rapidly in the early +galaxies. +3.3. Far-IR/optical line ratios +It is interesting and timely to explore line-ratio di- +agnostics using three [Oiii] lines; 88 µm, 52 µm and +5007˚A. The former two fine-structure lines are observed +by ALMA whereas the latter is to be observed by JWST. +Hereafter we denote the line luminosity ratios using the +wavelength such as R5007/88 = L5007˚ +A/L88µm. Figure 4 +shows R5007/88 against R52/88 for our simulated galax- +ies. We also show the model line ratios obtained by our +set of CLOUDY calculations (Table 1). +The ratio R5007/88 is commonly thought to be a sensi- +tive temperature indicator (e.g. Fujimoto et al. 2022). +Interestingly, Figure 4 shows that R5007/88 may also +trace the mean gas metallicity of a galaxy. We argue +that it is a model-dependent, indirect indicator because + +1 kpc +0 +2 +3 +-5 +-4 +6 +8 +9 +10 +log 7 galaxies observed +in SMACS J0723 field (Curti et al. 2022), z ∼ 6 galaxies observed by JWST/ NIRCam WFSS mode (Sun et al. 2022), and +z = 8.1 − 9.5 galaxies observed in the cluster RX J2129.4+0009 field (two galaxies at z ∼ 8.15 from Langeroodi et al. (2022) +and one at z = 9.51 from Williams et al. (2022)), respectively. +of the complex dependence of the line emissivities on the +relevant physical quantities. Typically, the oxygen line +emissivity increases with increasing oxygen abundance +(metallicity), but there is a critical abundance beyond +which the emissivity decreases because of the tempera- +ture decrease of Hii regions owing to metal line cooling. +The critical ”peak” abundance is different for different +lines and thus line ratios vary non-trivially as metallicity +increases. +In Figure 4, we plot local metal-rich galaxies ob- +served with both FIR (Brauher et al. 2008) and op- +tical emission lines (Moustakas et al. 2006). +Most of +the plotted local galaxies have high metallicities with +Z > 1Z⊙ and are located in the lower portion (low +R52/88) in the figure. +Only NGC 1569, the left most +symbol with R5007/88 = 4.9, has a sub-solar metallic- +ity of log(Z/Z⊙) = −0.6 (Israel 1988), which is located +near the same metallicity line as our high-redshift galaxy +samples. The local planetary nebulae data from Din- +erstein et al. (1985) are also plotted as red stars. +It +can be easily estimated that the planetary nebulae have +electron densities of ne([Oiii] ) ≳ 103cm−3, which are +consistent with those derived from [Oii] line ratios. +The line emissivities and hence the ratios have im- +plicit dependence on ionization parameter through other +quantities such as electron temperature, but the de- +pendence is weak at log U ∼ (−3, −2). Our simulated +galaxies have generally high ionization parameter with +log U ≃ −2 (Figure 2), and thus we may use R5007/88 as +a metallicity indicator as well. +In our emission line model (Section 2.2), the Hii re- +gions have a fixed density of nHii = 100 cm−3. Hence +our galaxy samples are populated in the left-upper por- +tion with R52/88 ≲ 1. Since R52/88 varies weakly with +Z and U (Yang & Lidz 2020), galaxies with high Z and +high U are distributed toward bottom/right in Figure 4. +4. DISCUSSION +In this Letter, we have studied the chemical evolu- +tion of early star-forming galaxies from z = 9 to z = 6 +by using zoom-in hydrodynamics simulations. We find +that oxygen line emission galaxies with stellar masses +of M⋆ = 109−9.5 M⊙ have large ionization parameter of +log U = −2 and metallicity of log(Z/Z⊙) ∼ (−1, −0.5). +In these galaxies, metal-enrichment occurs early and +quickly over a few hundred million years. +We have examined line diagnostics using [Oiii] 5007˚A, +88 µm, and 52 µm for future observation synergies of +JWST and ALMA. There have already been a few inter- +esting observations of high-redshift galaxies. Killi et al. +(2022) use ALMA and detect [Oiii] 52 µm line from a +galaxy at z = 7 for the first time. The derived value +of R52/88 ∼ 0.7 is close to our galaxy samples (Fig- +ure 4), and indicates a relatively low electron density + +9.0 +Curti+20 (z = 0) +z=9 +z=8 +z= 8.50, S04590 +z=7 +z = 7.67, S06355 +8.5 +z=6 +z= 7.66, S10612 +- log(O/H) +P330E-z6.15 +P330E-z6.28 +8.0 +P330E-z6.35 ++ +z= 8.16, ID11002 +2 +7.5 +z= 8.15, ID11022 +z=9.51, ID11027 +7.0 +7 +8 +9 +10 +stellar mass 「 M*/Mo[Oiii] Luminosity calculation in First Light +7 +Figure 4. Line luminosity ratio R5007/52 against R52/88. Our simulated galaxies at z = 7 are represented by solid circles +colored with gas metallicity. Gray star symbols show the local galaxies from Brauher et al. (2008); Moustakas et al. (2006) and +red ones show the local planetary nebulae from Dinerstein et al. (1985). The results of CLOUDY calculations are represented +by lines colored with metallicity (log(Z/Z⊙) = −1.30, −0.70, −0.40, 0.0). Solid, dashed, and dotted lines are the case of log U = +−1.5, − 2, − 3 respectively. The number densities of Hii region log nHII[cm−3] = 1, 2, 3 are also marked by ticks from left to +right on each CLOUDY line. +of ne ∼ 50 − 260 cm−3. Observations of SMACS0723- +4590 at z = 8.5 by Fujimoto et al. (2022) show a +large line ratio R5007/88 = 15.8, which is slightly larger +than our galaxy samples, suggesting a low-metallicity +of Z ∼ 0.04 Z⊙. Combining R52/88 from future obser- +vation will constrain the values of metallicity and ion- +ization parameter at the same time according to Fig- +ure 4. Planned observations using JWST NIRSpec are +targetted to several [Oiii] 88 µm emitters (e.g., GO- +1740, PI: Harikane, and GO-1840, PI:´Alvarez-M´arquez +& Hashimoto). Multi-line diagnostics such as those pre- +sented in this Letter holds promise to reveal the physical +conditions of the ISM in the high-redshift galaxies. +Our simulations show rapid chemical evolution at +high redshift. The resulting MZR relation is consistent +with up-to-date JWST observations (Figure 3). +Lan- +gan et al. (2020) use 300 less massive galaxy samples +with M⋆ ≤ 108.5 M⊙ at z = 8 and derive MZR from +z = 8 to z = 5 (see also a similar study by Noel et al. +(2022)). Our galaxy samples with larger stellar masses +with M⋆ = 108.5−10.0 M⊙ show a steeper MZR, which +indicates rapid chemical evolution at the early epoch. It +would be highly interesting to study relatively massive +galaxies with JWST observations such as B14–65666 +(Roberts-Borsani et al. 2020), A2744–YD4 (Morishita +et al. 2022), and MACS1149–JD1 (Hashimoto et al. +2018). +There are a few caveats in our emission line model. +Most notably we do not account for dust extinction. Re- +cent ALMA surveys report the existence of a substantial +amount of dust in star-forming galaxies at z ∼ 6−8 (Fu- +damoto et al. 2020; Burgarella et al. 2022; Bakx et al. +2021; Schouws et al. 2022; Tamura et al. 2019; Inami +et al. 2022). Given the importance of emission line ra- +tios including [Oiii] 5007˚A, accurate modeling of dust +extinction may be needed for future studies. +We have studied the statistics of early emission-line +galaxies and compared with recent observations. It will +be possible and important to study the internal struc- +ture of galaxies using both JWST observations and nu- +merical simulations. We have shown in Figure 2 that +there are large variations/fluctuations of line emissiv- +ities, metal and density distributions within a galaxy. +Cameron et al. (2022) argue that unresolved variations +of the electron temperature within a galaxy results in a +biased estimate when the so-called Te-method is applied. + +0 +102 +planetary +nebulae +-0.5 +101 +L5007/L88 +-1.0 +100 +要 +★ ++ +10-1 +-1.5 + logU= -1.5 +-- logU= - 2.0 +local galaxies +logU= - 3.0 +10-2 +-2 +0.5 +1 +2 +3 +4 +5 +L52/L888 +Nakazato et al. +JWST’s NIRSpec IFU can resolve with a pixel scale of +0.1 [arcsec/pixel]1. For our configuration shown in Fig- +ure 2, the 7.4 kpc region at z = 7 can be resolved with +13 × 13 pixels. Gravitational lensing magnification will +greatly help resolving further the structure of individ- +ual galaxies. In our future work, we will generate mock +two-dimensional maps for our simulated galaxies with +the same resolution of NIRSpec IFU, and will address +how well the physical quantities such as gas density and +temperature distribution can be reconstructed. +5. ACKNOWLEDGEMENTS +We thank Kana Moriwaki and Yuichi Harikane for +fruitful discussions. +This work made use of v2.3 of +the Binary Population and Spectral Synthesis (BPASS) +models as described in Byrne et al. (2022) and Stanway +& Eldridge (2018). +The authors thankfully acknowl- +edges the computer resources at MareNostrum and the +technical support provided by the Barcelona Supercom- +puting Center (RES-AECT-2020-3-0019). +Numerical +analyses were carried out on the analysis servers at +Center for Computational Astrophysics, National As- +tronomical Observatory of Japan. +YN has been sup- +ported by International Graduate Program for Excel- +lence in Earth-Space Science (IGPEES) of the Univer- +sity of Tokyo. +DC is a Ramon-Cajal Researcher and +is supported by the Ministerio de Ciencia, Innovaci´on +y Universidades (MICIU/FEDER) under research grant +PGC2018-094975-C21. +REFERENCES +Anders, E., & Grevesse, N. 1989, GeoCoA, 53, 197, +doi: 10.1016/0016-7037(89)90286-X +Arata, S., Yajima, H., Nagamine, K., Abe, M., & Khochfar, +S. 2020, MNRAS, 498, 5541, +doi: 10.1093/mnras/staa2809 +Bakx, T. J. L. C., Sommovigo, L., Carniani, S., et al. 2021, +MNRAS, 508, L58, doi: 10.1093/mnrasl/slab104 +Barrufet, L., Oesch, P. A., Weibel, A., et al. 2022, arXiv +e-prints, arXiv:2207.14733. +https://arxiv.org/abs/2207.14733 +Bian, F., & Fan, X. 2020, MNRAS, 493, L65, +doi: 10.1093/mnrasl/slaa007 +Bian, F., Kewley, L. J., & Dopita, M. A. 2018, ApJ, 859, +175, doi: 10.3847/1538-4357/aabd74 +Brauher, J. R., Dale, D. A., & Helou, G. 2008, ApJS, 178, +280, doi: 10.1086/590249 +Burgarella, D., Bogdanoska, J., Nanni, A., et al. 2022, +A&A, 664, A73, doi: 10.1051/0004-6361/202142554 +Byrne, C. M., Stanway, E. R., Eldridge, J. J., McSwiney, +L., & Townsend, O. T. 2022, MNRAS, 512, 5329, +doi: 10.1093/mnras/stac807 +Cameron, A. J., Katz, H., & Rey, M. P. 2022, arXiv +e-prints, arXiv:2210.14234. +https://arxiv.org/abs/2210.14234 +Carniani, S., Maiolino, R., Pallottini, A., et al. 2017, A&A, +605, A42, doi: 10.1051/0004-6361/201630366 +Castellano, M., Pentericci, L., Fontana, A., et al. 2017, +ApJ, 839, 73, doi: 10.3847/1538-4357/aa696e +Ceverino, D., Glover, S. C. O., & Klessen, R. S. 2017, +MNRAS, 470, 2791, doi: 10.1093/mnras/stx1386 +1 https://jwst-docs.stsci.edu/jwst-near-infrared-spectrograph +Ceverino, D., Hirschmann, M., Klessen, R. S., et al. 2021, +MNRAS, 504, 4472, doi: 10.1093/mnras/stab1206 +Ceverino, D., Klessen, R. S., & Glover, S. C. O. 2019, +MNRAS, 484, 1366, doi: 10.1093/mnras/stz079 +Ceverino, D., & Klypin, A. 2009, ApJ, 695, 292, +doi: 10.1088/0004-637X/695/1/292 +Ceverino, D., Klypin, A., Klimek, E. S., et al. 2014, +MNRAS, 442, 1545, doi: 10.1093/mnras/stu956 +Curti, M., Mannucci, F., Cresci, G., & Maiolino, R. 2020, +MNRAS, 491, 944, doi: 10.1093/mnras/stz2910 +Curti, M., D’Eugenio, F., Carniani, S., et al. 2022, arXiv +e-prints, arXiv:2207.12375. +https://arxiv.org/abs/2207.12375 +De Looze, I., Cormier, D., Lebouteiller, V., et al. 2014, +A&A, 568, A62, doi: 10.1051/0004-6361/201322489 +Dinerstein, H. L., Lester, D. F., & Werner, M. W. 1985, +ApJ, 291, 561, doi: 10.1086/163096 +Dopita, M. A., & Sutherland, R. S. 2003, Astrophysics of +the diffuse universe +Ferland, G. J., Porter, R. L., van Hoof, P. A. M., et al. +2013, RMxAA, 49, 137. https://arxiv.org/abs/1302.4485 +Fletcher, T. J., Tang, M., Robertson, B. E., et al. 2019, +ApJ, 878, 87, doi: 10.3847/1538-4357/ab2045 +Flury, S. R., Jaskot, A. E., Ferguson, H. C., et al. 2022, +ApJS, 260, 1, doi: 10.3847/1538-4365/ac5331 +Fudamoto, Y., Oesch, P. A., Faisst, A., et al. 2020, A&A, +643, A4, doi: 10.1051/0004-6361/202038163 +Fujimoto, S., Ouchi, M., Nakajima, K., et al. 2022, arXiv +e-prints, arXiv:2212.06863. +https://arxiv.org/abs/2212.06863 +Graziani, L., Schneider, R., Ginolfi, M., et al. 2020, +MNRAS, 494, 1071, doi: 10.1093/mnras/staa796 + +[Oiii] Luminosity calculation in First Light +9 +Gutkin, J., Charlot, S., & Bruzual, G. 2016, MNRAS, 462, +1757, doi: 10.1093/mnras/stw1716 +Harikane, Y., Ouchi, M., Inoue, A. K., et al. 2020, ApJ, +896, 93, doi: 10.3847/1538-4357/ab94bd +Harikane, Y., Inoue, A. K., Mawatari, K., et al. 2022, ApJ, +929, 1, doi: 10.3847/1538-4357/ac53a9 +Hashimoto, T., Laporte, N., Mawatari, K., et al. 2018, +Nature, 557, 392, doi: 10.1038/s41586-018-0117-z +Hashimoto, T., Inoue, A. K., Mawatari, K., et al. 2019, +PASJ, 71, 71, doi: 10.1093/pasj/psz049 +Heintz, K. E., Gim´enez-Arteaga, C., Fujimoto, S., et al. +2022, arXiv e-prints, arXiv:2212.06877. +https://arxiv.org/abs/2212.06877 +Hirschmann, M., Charlot, S., Feltre, A., et al. 2017, +MNRAS, 472, 2468, doi: 10.1093/mnras/stx2180 +—. 2022, arXiv e-prints, arXiv:2212.02522. +https://arxiv.org/abs/2212.02522 +Inami, H., Algera, H. S. B., Schouws, S., et al. 2022, +MNRAS, 515, 3126, doi: 10.1093/mnras/stac1779 +Inoue, A. K., Shimizu, I., Tamura, Y., et al. 2014, ApJL, +780, L18, doi: 10.1088/2041-8205/780/2/L18 +Inoue, A. K., Tamura, Y., Matsuo, H., et al. 2016, Science, +352, 1559, doi: 10.1126/science.aaf0714 +Israel, F. P. 1988, A&A, 194, 24 +Izotov, Y. I., Guseva, N. G., Fricke, K. J., & Henkel, C. +2019, A&A, 623, A40, doi: 10.1051/0004-6361/201834768 +Katz, H., Galligan, T. P., Kimm, T., et al. 2019, MNRAS, +487, 5902, doi: 10.1093/mnras/stz1672 +Killi, M., Watson, D., Fujimoto, S., et al. 2022, arXiv +e-prints, arXiv:2211.01424. +https://arxiv.org/abs/2211.01424 +Kimm, T., & Cen, R. 2014, ApJ, 788, 121, +doi: 10.1088/0004-637X/788/2/121 +Klypin, A. A., Trujillo-Gomez, S., & Primack, J. 2011, +ApJ, 740, 102, doi: 10.1088/0004-637X/740/2/102 +Kohandel, M., Ferrara, A., Pallottini, A., et al. 2022, arXiv +e-prints, arXiv:2212.02519. +https://arxiv.org/abs/2212.02519 +Kravtsov, A. V. 2003, ApJL, 590, L1, doi: 10.1086/376674 +Kravtsov, A. V., Klypin, A. A., & Khokhlov, A. M. 1997, +ApJS, 111, 73, doi: 10.1086/313015 +Langan, I., Ceverino, D., & Finlator, K. 2020, MNRAS, +494, 1988, doi: 10.1093/mnras/staa880 +Langeroodi, D., Hjorth, J., Chen, W., et al. 2022, arXiv +e-prints, arXiv:2212.02491. +https://arxiv.org/abs/2212.02491 +Laporte, N., Ellis, R. S., Boone, F., et al. 2017, ApJL, 837, +L21, doi: 10.3847/2041-8213/aa62aa +Leethochawalit, N., Trenti, M., Santini, P., et al. 2022, +arXiv e-prints, arXiv:2207.11135. +https://arxiv.org/abs/2207.11135 +Mandelker, N., Dekel, A., Ceverino, D., et al. 2017, +MNRAS, 464, 635, doi: 10.1093/mnras/stw2358 +—. 2014, MNRAS, 443, 3675, doi: 10.1093/mnras/stu1340 +Marques-Chaves, R., Schaerer, D., ´Alvarez-M´arquez, J., +et al. 2022, MNRAS, 517, 2972, +doi: 10.1093/mnras/stac2893 +Morishita, T., Roberts-Borsani, G., Treu, T., et al. 2022, +arXiv e-prints, arXiv:2211.09097. +https://arxiv.org/abs/2211.09097 +Moriwaki, K., Yoshida, N., Shimizu, I., et al. 2018, +MNRAS, 481, L84, doi: 10.1093/mnrasl/sly167 +Moustakas, J., Kennicutt, Robert C., J., & Tremonti, C. A. +2006, ApJ, 642, 775, doi: 10.1086/500964 +Noel, I., Zhu, H., & Gnedin, N. 2022, arXiv e-prints, +arXiv:2210.16750. https://arxiv.org/abs/2210.16750 +Nomoto, K., Tominaga, N., Umeda, H., Kobayashi, C., & +Maeda, K. 2006, NuPhA, 777, 424, +doi: 10.1016/j.nuclphysa.2006.05.008 +Olsen, K., Greve, T. R., Narayanan, D., et al. 2017, ApJ, +846, 105, doi: 10.3847/1538-4357/aa86b4 +Osterbrock, D. E., & Ferland, G. J. 2006, Astrophysics of +gaseous nebulae and active galactic nuclei +Paardekooper, J.-P., Khochfar, S., & Dalla Vecchia, C. +2015, MNRAS, 451, 2544, doi: 10.1093/mnras/stv1114 +Pallottini, A., Ferrara, A., Gallerani, S., et al. 2022, +MNRAS, 513, 5621, doi: 10.1093/mnras/stac1281 +Panuzzo, P., Bressan, A., Granato, G. L., Silva, L., & +Danese, L. 2003, A&A, 409, 99, +doi: 10.1051/0004-6361:20031094 +Roberts-Borsani, G. W., Ellis, R. S., & Laporte, N. 2020, +MNRAS, 497, 3440, doi: 10.1093/mnras/staa2085 +Robertson, B. E., Furlanetto, S. R., Schneider, E., et al. +2013, ApJ, 768, 71, doi: 10.1088/0004-637X/768/1/71 +Schaerer, D., Marques-Chaves, R., Oesch, P., et al. 2022, +arXiv e-prints, arXiv:2207.10034. +https://arxiv.org/abs/2207.10034 +Schouws, S., Stefanon, M., Bouwens, R., et al. 2022, ApJ, +928, 31, doi: 10.3847/1538-4357/ac4605 +Shimizu, I., Inoue, A. K., Okamoto, T., & Yoshida, N. +2016, MNRAS, 461, 3563, doi: 10.1093/mnras/stw1423 +Stanway, E. R., & Eldridge, J. J. 2018, MNRAS, 479, 75, +doi: 10.1093/mnras/sty1353 +Sun, F., Egami, E., Pirzkal, N., et al. 2022, arXiv e-prints, +arXiv:2209.03374. https://arxiv.org/abs/2209.03374 +Tacchella, S., Finkelstein, S. L., Bagley, M., et al. 2022, +ApJ, 927, 170, doi: 10.3847/1538-4357/ac4cad + +10 +Nakazato et al. +Tamura, Y., Mawatari, K., Hashimoto, T., et al. 2019, ApJ, +874, 27, doi: 10.3847/1538-4357/ab0374 +Thielemann, F. K., Nomoto, K., & Yokoi, K. 1986, A&A, +158, 17 +Topping, M. W., Stark, D. P., Endsley, R., et al. 2022, +MNRAS, 516, 975, doi: 10.1093/mnras/stac2291 +Trussler, J. A. A., Adams, N. J., Conselice, C. J., et al. +2022, arXiv e-prints, arXiv:2207.14265. +https://arxiv.org/abs/2207.14265 +Vanzella, E., de Barros, S., Vasei, K., et al. 2016, ApJ, 825, +41, doi: 10.3847/0004-637X/825/1/41 +Williams, H., Kelly, P. L., Chen, W., et al. 2022, arXiv +e-prints, arXiv:2210.15699. +https://arxiv.org/abs/2210.15699 +Wise, J. H., Demchenko, V. G., Halicek, M. T., et al. 2014, +MNRAS, 442, 2560, doi: 10.1093/mnras/stu979 +Witstok, J., Smit, R., Maiolino, R., et al. 2022, MNRAS, +doi: 10.1093/mnras/stac1905 +Wong, Y. H. V., Wang, P., Hashimoto, T., et al. 2022, ApJ, +929, 161, doi: 10.3847/1538-4357/ac5cc7 +Woosley, S. E., & Weaver, T. A. 1995, ApJS, 101, 181, +doi: 10.1086/192237 +Xiao, L., Stanway, E. R., & Eldridge, J. J. 2018, MNRAS, +477, 904, doi: 10.1093/mnras/sty646 +Xu, H., Wise, J. H., Norman, M. L., Ahn, K., & O’Shea, +B. W. 2016, ApJ, 833, 84, +doi: 10.3847/1538-4357/833/1/84 +Yajima, H., Choi, J.-H., & Nagamine, K. 2011, MNRAS, +412, 411, doi: 10.1111/j.1365-2966.2010.17920.x +Yang, S., & Lidz, A. 2020, MNRAS, 499, 3417, +doi: 10.1093/mnras/staa3000 + diff --git a/C9E0T4oBgHgl3EQfgQG4/content/tmp_files/load_file.txt b/C9E0T4oBgHgl3EQfgQG4/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab28556596b3834d8f98b8449d9aa8bb55d52650 --- /dev/null +++ b/C9E0T4oBgHgl3EQfgQG4/content/tmp_files/load_file.txt @@ -0,0 +1,1080 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf,len=1079 +page_content='Draft version January 9,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2023 Typeset using LATEX twocolumn style in AASTeX631 Simulations of high-redshift [OIII] emitters: Chemical evolution and multi-line diagnostics Yurina Nakazato,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1 Naoki Yoshida,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 3 and Daniel Ceverino4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 5 1Department of Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The University of Tokyo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 7-3-1 Hongo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Bunkyo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Tokyo 113-0033,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Japan 2Kavli Institute for the Physics and Mathematics of the Universe (WPI),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' UT Institute for Advanced Study,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The University of Tokyo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Kashiwa,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Chiba 277-8583,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Japan 3Research Center for the Early Universe,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' School of Science,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The University of Tokyo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 7-3-1 Hongo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Bunkyo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Tokyo 113-0033,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Japan 4Universidad Autonoma de Madrid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ciudad Universitaria de Cantoblanco,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' E-28049 Madrid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Spain 5CIAFF,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Facultad de Ciencias,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Universidad Autonoma de Madrid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' E-28049 Madrid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Spain ABSTRACT Recent observations by James Webb Space Telescope discovered a number of high-redshift galaxies with strong emission lines from doubly ionized oxygen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Combined with ALMA observations of far- infrared lines, multi-line diagnostics can be applied to the high-redshift galaxies in order to probe the physical conditions of the inter-stellar medium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We study the formation and evolution of galaxies using the FirstLight simulation suite, which provides outputs of 62 high-resolution, zoom-in galaxy simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We devise a physical model of Hii regions and calculate spatially resolved [Oiii] line emission.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We show that massive galaxies with stellar masses of M∗ > 109M⊙ chemically evolve rapidly to z = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Young stellar populations in the star-forming galaxies boost the [Oiii] line emission, rendering the ratio of line luminosity to star formation rate larger than that for low-redshift galaxies, which is consistent with recent observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Measuring the flux ratios of rest-frame optical and far-infrared lines allows us to estimate the physical conditions such as density and metallicity of the star-forming gas in high-redshift [Oiii] emitters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' INTRODUCTION Understanding the formation and evolution of the first galaxies is one of the key scientific goals of new genera- tion telescopes including James Webb Space Telescope (JWST) and Atacama Large Millimetre/Submillimetre Array (ALMA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' High-redshift galaxies can be detected and identified using strong emission lines, among which [Oiii] 88µm line is thought to be promising (Inoue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' A number of galaxies have been found at z > 7 by ALMA observations targeting the [Oiii] 88µm line (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Inoue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Hashimoto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2018), including the most distant galaxy candidate at z = 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='27 with a 4σ [Oiii] 88µm detection (Harikane et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Since the [Oiii] line emission originates from Hii regions around young massive stars, it can be used to trace the star for- mation activities and also the physical properties of the inter-stellar medium (ISM) in the early galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' JWST is opening a new window into the early universe through its superb observational capability in near- infrared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For example, JWST Early Research Obser- Corresponding author: Yurina Nakazato yurina.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='nakazato@phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='u-tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='jp vation (ERO) in the lensing field SMACS 0723 already reported three galaxies confirmed spectroscopically by NIRSpec (Schaerer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Curti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Heintz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' NIRSpec instrument is capable of detect- ing and identifying various rest-frame optical lines such as [Oii] 3727˚A, [Oiii] 4959˚A and [Oiii] 5007˚A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The rela- tively weak [Oiii] 4363˚A line has been detected for all the three galaxies, enabling us to estimate the ISM metal- licity in a direct manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Detailed numerical simulations are indispensable to study the physical conditions of the ISM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' There have been several studies focusing on [Oiii] emission lines from high-z galaxies (Hirschmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Olsen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Moriwaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Katz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Arata et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Pallottini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Moriwaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018) use a cosmological sim- ulation with a large boxsize of 50 Mpc (Shimizu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2016) to calculate the [Oiii] 88µm line intensities for a few hundred galaxies with stellar masses of ∼ 108 M⊙.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' High-resolution, zoom-in simulations have also been per- formed to study the internal structure of early galaxies (Katz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Arata et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For the upcoming observations conducted by JWST, it is urgently needed to study the population of high-redshift galaxies with arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='02416v1 [astro-ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='GA] 6 Jan 2023 2 Nakazato et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' high resolution in a fully cosmological context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' In this Letter, we use the outputs of FirstLight simulation (Cev- erino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The simulation suite is motivated to produce a statistically significant number of galaxies with very high resolution at the epoch of reionization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Thanks to the mass and volume complete sample of more than 60 massive galaxies and to the high-resolution of ∼ 20 pc, we can investigate the internal structure as well as statistics of the high-redshift galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Throughout this Letter, we assume Z⊙ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='02 as the solar metallicity (Anders & Grevesse 1989).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' METHOD 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Cosmological Simulation We use mass-limited galaxy samples selected from the FirstLight simulation suite (Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The simulations are performed with ART code (Kravtsov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 1997;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Kravtsov 2003;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ceverino & Klypin 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2014), which follows gravitational N- body dynamics and Eulerian hydrodynamics using an adaptive mesh refinement method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Besides the two processes, the code incorporates astrophysical processes relevant for galaxy formation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The so-called subgrid physics includes atomic and molecular cooling of hydro- gen and helium, photoionization heating by a cosmolog- ical UV background with partial self-shielding, and star formation and the associated stellar feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Details are described in Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The simulations track metals released from SNe-Ia and from SNe-II, us- ing supernovae yields from Woosley & Weaver (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Our simulated galaxies are hosted by dark matter haloes with maximum circular velocity (Vmax) higher than 178 km/s at z = 5 in a cosmological volume of 40 h−1Mpc on a side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The host haloes are selected in a low- resolution N-body only simulation, for which refined ini- tial conditions are generated using a standard zoom-in technique (Klypin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The refinement achieves the dark matter particle mass of mDM = 8 × 104 M⊙, the minimum star particle mass of 103 M⊙, and the maximum spatial resolution is a few tens proper parsec depending on the refinement level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We calculate the stellar mass distribution for the se- lected 62 massive galaxies at z = 9, 8, 7, 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The max- imum stellar mass is 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5, 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='7, 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1, 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='7×109 M⊙, re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The sample allows us to study the evolu- tion of more massive galaxies than in previous simula- tions, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=', Moriwaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018), SERRA simulation (Pallottini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022) and S´IGAME simulation (Olsen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017), and thus is well-suited to compare with observed massive galaxies by HST, ALMA, and JWST (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Tacchella et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Graziani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Topping et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Trussler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Barrufet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Leethochawalit et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Line emissivity calculation We generate emission-line maps for our galaxy sam- ples by choosing a region enclosed by 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3 times the virial radius of the host halo as same as Mandelker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2014, 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We configure a uniform 3D grid with a side length of 100 pc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We locate the star particles and gas elements within each grid, and store the mass of stars younger than 10 Myr, the average density of the gas with nH > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1 cm−3, and the average metallicity of the cold/warm gas with T < 5 × 104 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' These physi- cal quantities assigned to the individual grids are then used to compute the line emissivities in a similar man- ner to those in Hirschmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2017);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Moriwaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We generate a library of emission lines using CLOUDY (Ferland et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='The library covers a wide range of gas metallicity Z and ion- ization parameter U as given in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The library lists the individual line luminosity, Lline, normalized by the Hβ line luminosity calculated with the case-B approximation (Dopita & Sutherland 2003), LcaseB Hβ , as Lline = (1 − fesc) Cline(Zgas, U, nHii) LcaseB Hβ , (1) LcaseB Hβ = 4πjHβV = hνHβ � αeff Hβ αB � Q, (2) where fesc is the Lyman continuum escape fraction and Cline is the line luminosity ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The Hβ emission rate per unit volume per unit time per unit solid angle is denoted as jHβ, and αeff Hβ is an effective recombination coefficient, Q is the production rate of ionizing photons from each star particle, and αB is the case-B hydrogen recombination coefficient given by αB = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='6 × 10−13 � Te 104 K �−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='85 cm3s−1 (3) with a constant electron temperature Te = 104 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We set fesc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1, which is consistent with previous radiative transfer simulations for massive galaxies with Mhalo > 1010−11M⊙ (Yajima et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2011;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Kimm & Cen 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Wise et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Paardekooper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' It is also consistent with recent observa- tional estimates at z ∼ 6 − 8 (Castellano et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Robertson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We note that some galaxies have been reported to have an even higher escape frac- tion of over 20 percent (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Marques-Chaves et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Vanzella et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Fletcher et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Bian & Fan 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Flury et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022) at z < 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' [Oiii] Luminosity calculation in First Light 3 Since individual Hii regions are not resolved in our simulations, we resort to a physical model of the ISM structure to calculate the line emissivities of Hii regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We characterize the ISM by the local gas density n and metallicity Z, and also by a volume-averaged ionization parameter ⟨U⟩ = 3α2/3 B 4c �3Qϵ2nHii 4π �1/3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (4) Our fiducial model assumes a constant gas density nHii in a spherical Hii region surrounding a star particle (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Panuzzo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2003;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Gutkin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We set the Hii region density nHii = 100 cm−3 (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Osterbrock & Ferland 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Hirschmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2017, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We define the volume-filling factor of the gas as ϵ = ngas,grid nHii , (5) where ngas,grid is the gas number density in each grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' In rare cases where the volume-averaged gas density ex- ceeds nHii (ϵ > 1), we set the filling factor to unity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Note that a larger ngas,grid for a fixed nHii yields a larger fill- ing factor ϵ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Hence the resulting line emissivity depends only weakly on the assumed nHii in our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We have tested with a few variations with nHii = 50, 300 cm−3, and explicitly checked that our main findings in the fol- lowing sections are not sensitively affected by this choice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' log10 (Zgas/Z⊙) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='30, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='70, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='40, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=', 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='30 log10 U 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0, -3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='9, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=', -1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1, -1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 log10 (nHii/cm−3) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 (fixed) Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The parameters used to calculate the line lumi- nosities with CLOUDY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We compute the production rate of ionizing photons Q of the simulated galaxies using publicly available ta- bles from the Binary Population and Spectral Synthesis (BPASS) model (Byrne et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Our simulations adopt a stellar initial mass function represented by bro- ken power laws as N(M < Mmax) ∝ � M1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1 � M M⊙ �α1 dM + M α1 1 � Mmax M1 � M M⊙ �α2 dM (6) with α1 = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3, α2 = −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='35, M1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5, Mmax = 300 M⊙ as in Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We use a grid of 13 values of metallicity, from Z = 10−5 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='04, and 50 logarithmic bins in stellar population ages between 1 Myr and 100 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We re-assign ”fine” ages to star particles in order to mitigate the discreteness effect caused by our simula- tion set up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Our simulations produce new star particles with a fixed time step of ∆tSF = 5 Myr, and the simula- tion output timings are not synchronized with ∆tSF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' In a snapshot, young stars typically have discretized ages such like tage = 2 Myr, 7 Myr, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The apparently mi- nor gap in stellar ages causes a large impact when we compute the line emissivities because the ionization pho- ton production rate quickly decreases with age.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For in- stance, in the BPASS SED of a single stellar population that we use, the number of ionizing photons decreases over a factor of 100 from age 1 Myr to 10 Myr (Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We thus re-assign the stellar age as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We consider star particles younger than 15 Myr, with stamped ages at T1, T2, T3 (T1 < T2 < T3) Myr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We do random sampling within each age interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For instance, to a star with T1, we randomly draw a new age within [1, T1] Myr and re-assign to it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Finally, we select star particles younger than 10 Myr for our emission line cal- culation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We calculate the ionizing photon production rate Q for each stellar particle using the BPASS table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We consider stellar atmosphere models with different elemental compositions, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=', different values of [α/Fe].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' In the BPASS v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3 (Byrne et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022), there are five models with the mass fractions in α-elements relative to iron of ∆(log(α/Fe)) = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='2, +0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0, +0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='2, +0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='4 and +0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For the calculation of [α/Fe], the α-element abundance is approximated by the oxygen abundance (log NO) as- suming that a half of the mass in metals produced by SNII are in the form of oxygen atoms;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' log NO = log(fOzSNII/AO), (7) where fO, zSNII are the fraction of oxygen released by Type-II SNe, and the mass fraction of metals released from Type-II SNe, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Here, the atomic weight of oxygen is AO = 16 and we assume fO = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 (Woosley & Weaver 1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We calculate the iron abundance ratio considering both contributions from Type-Ia and II SNe as NFe = (fFe,Ia zSNIa + fFe,II zSNIa) AFe , (8) where zSNIa is the mass fraction of metals released from Type-Ia SNe and AFe = 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We set the fractions fFe,Ia = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 (Thielemann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 1986) and fFe,II = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='026, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='033) for metal mass ratio between zero and solar metallicity (Nomoto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Ceverino et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2019), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Finally, [α/Fe] is obtained from [α/Fe] = log NO − log NFe − log(NO/NFe)⊙, (9) where (NO/NFe)⊙ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='17 is the solar value of O/Fe abundance ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 4 Nakazato et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The [Oiii] 88 µm luminosity versus SFR for our 62 simulated galaxies at z = 9 (top left), z = 8 (top right), z = 7 (bottom left), and z = 6 (bottom right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The solid circles are colored with the gas metallicity (see the colorbar on the right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' For comparison, we show the [Oiii] -SFR relation derived from observations of local galaxies by De Looze et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Gray points are the observational results of high-z (z > 6) galaxies from Hashimoto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Laporte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2017);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Tamura et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2019), Inoue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2016)(I16), Hashimoto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2019)(H19), Carniani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2017)(C17), Wong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022)(WG22), Witstok et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022)(WT22) and Harikane et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' RESULTS We focus on rest-frame sub-millimeter and optical [Oiii] lines from high-redshift galaxies, which are de- tected by ALMA and JWST.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' L[Oiii] vs SFR Figure 1 shows the [Oiii] 88µm luminosity against star formation rate (SFR) for our galaxy samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The color- bar indicates the nebular metallicity Zneb, which is the line luminosity-weighted gas metallicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We compare with the observed local galaxies (De Looze et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2014) and with the observed [Oiii] 88µm luminosities of high- redshift galaxies (see the caption).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' At z = 9 to z = 7, most of our simulated galaxies are located above the local galaxy relation (solid line), similar to the results of Moriwaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Arata et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Pallottini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' At z = 7 − 9, our galaxy samples are distributed around the observed galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' It is interesting that lu- minous galaxies are already chemically enriched with log(Z/Z⊙) ∼ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 at the early epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Our simula- tions predict a slightly steeper relation at z = 7−9 than the local relation: L[Oiii] ,88 ∝ � SFR M⊙ yr−1 �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='9−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (10) We find three galaxies with L[Oiii] > 109 L⊙ at z = 7, which are as bright as several observed galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We study the structure of one of them (sample FL964) in de- tail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' It has Mgas = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='41×109 M⊙, M⋆ = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='96×109 M⊙, and a specific SFR of 11 Gyr at z = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Figure 2 shows the projected maps of number density of gas, ionization parameter, and [Oiii] 88µm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Clearly, regions with high ionization parameters of log U ∼ −2 cause high emissiv- 6=2 1010 0 109 L(88μm)[Lo ] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 log10(ZIZo) 108 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 107 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 106 MACS1149-JD1 2 10-1 100 101 102 103 SFR [M。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' yr-1]z=8 1010 0 A2744-YD4 MACS0416-Y1 109 L(88μm)[L ] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 log10(ZIZo) 108 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 107 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 106 2 10-1 100 101 102 103 SFR [M。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' yr-1]Z=7 1010 0 109 L(88μm)[Lo] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 log10(ZIZo) 108 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 107 } 116 WG22 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 H19 WT22 106 C17 2 10-1 100 101 102 103 SFR [Mo yr-1]9=2 1010 0 J0235-0532 X 109 L(88μm)[Lo ] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 108 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='0 107 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 106 2 10-1 100 101 102 103 SFR [Mo yr-1][Oiii] Luminosity calculation in First Light 5 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Projected gas density (left), averaged ionization parameter (middle), and [Oiii] 88µm distribution (right) for a galaxy sample FL964 at z = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Each panel shows a region with a side length and depth of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3Rvir(= 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='4 kpc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' ities, consistent with the observation by Harikane et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2020) and also with recent simulations by Kohandel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The total luminosity of [Oiii] 5007˚A of FL964 is 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='60 × 109L⊙, which is about 5 times larger than L[Oiii] ,88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The mass-metallicity relation It is important to examine the metallicity evolution of our simulated galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We study the so-called mass- metallicity relation (MZR) by calculating the gas-phase metallicity for individual galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Figure 3 shows the stellar mass-gas phase oxygen abundance relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We calculate the gas phase oxygen abundance by adopting the conversion equation of Mandelker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2014);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' O H = fO zSNII XAO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (11) We set the hydrogen mass fraction X = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='755 and the other values of fO and AO are the same as those of eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (7), which adopts the solar oxygen abundance 12 + log(O/H) = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We then calculate the averaged zSNII, weighted by the [Oiii] luminosity of each grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' This weighting is compatible with observational meth- ods such as direct method or strong line method, which use oxygen emission lines (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Bian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Izotov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We calculate the mass of stars within the region of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3 Rvir.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' In Figure 3, we also plot the MZR for local galaxies from Curti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2020) (dashed line) and recent JWST observation results of high-redshift galaxies (Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Curti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Langeroodi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Williams et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Curti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022) estimated metallicities of SMACS field galaxies by direct method, Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022) adopt strong line calibration by Bian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2018) using O32, and Langeroodi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022) and Williams et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2022) adopt strong line method by Izotov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Our simulated galaxies have similar metallicities (oxy- gen abundance) and stellar masses to the observed ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Note that Figure 3 shows the evolution for a fixed sam- ple of simulated galaxies, rather than for all the galaxies at respective epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Namely, we select the galaxies at z = 5 by mass and plot their progenitors at z = 6 − 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Hence we likely miss low-mass, low-metallicity galax- ies at z = 9 (see Langan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' (2020) for the mass- metallicity of low-mass galaxies in FirstLight).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Some galaxies with M⋆ > 109 M⊙ have gas-phase metallici- ties of 12 + log (O/H) ∼ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='5 even at z = 9, suggesting that metal-enrichment can proceed rapidly in the early galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Far-IR/optical line ratios It is interesting and timely to explore line-ratio di- agnostics using three [Oiii] lines;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 88 µm, 52 µm and 5007˚A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The former two fine-structure lines are observed by ALMA whereas the latter is to be observed by JWST.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Hereafter we denote the line luminosity ratios using the wavelength such as R5007/88 = L5007˚ A/L88µm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Figure 4 shows R5007/88 against R52/88 for our simulated galax- ies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We also show the model line ratios obtained by our set of CLOUDY calculations (Table 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' The ratio R5007/88 is commonly thought to be a sensi- tive temperature indicator (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Fujimoto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' Interestingly, Figure 4 shows that R5007/88 may also trace the mean gas metallicity of a galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/C9E0T4oBgHgl3EQfgQG4/content/2301.02416v1.pdf'} +page_content=' We argue that it is a model-dependent, indirect indicator because 1 kpc 0 2 3 5 4 6 8 9 10 log