Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- -9E4T4oBgHgl3EQfEAsg/content/2301.04873v1.pdf +3 -0
- -9E4T4oBgHgl3EQfEAsg/vector_store/index.faiss +3 -0
- -9E4T4oBgHgl3EQfEAsg/vector_store/index.pkl +3 -0
- -dE2T4oBgHgl3EQfmQc9/vector_store/index.pkl +3 -0
- -dFIT4oBgHgl3EQf9CsF/content/tmp_files/2301.11404v1.pdf.txt +1230 -0
- -dFIT4oBgHgl3EQf9CsF/content/tmp_files/load_file.txt +0 -0
- -tFAT4oBgHgl3EQfqR1A/content/2301.08646v1.pdf +3 -0
- -tFAT4oBgHgl3EQfqR1A/vector_store/index.pkl +3 -0
- .gitattributes +55 -0
- 0tE2T4oBgHgl3EQfiQfL/content/tmp_files/2301.03957v1.pdf.txt +894 -0
- 0tE2T4oBgHgl3EQfiQfL/content/tmp_files/load_file.txt +0 -0
- 1dFLT4oBgHgl3EQfpy-5/vector_store/index.pkl +3 -0
- 1tE3T4oBgHgl3EQfngpH/content/tmp_files/load_file.txt +0 -0
- 2NAzT4oBgHgl3EQfuP1K/content/2301.01687v1.pdf +3 -0
- 2NAzT4oBgHgl3EQfuP1K/vector_store/index.pkl +3 -0
- 2NE4T4oBgHgl3EQfzw00/content/2301.05276v1.pdf +3 -0
- 2NE4T4oBgHgl3EQfzw00/vector_store/index.pkl +3 -0
- 39AyT4oBgHgl3EQfcPct/vector_store/index.faiss +3 -0
- 39E1T4oBgHgl3EQf6AV3/content/2301.03518v1.pdf +3 -0
- 39E1T4oBgHgl3EQf6AV3/vector_store/index.faiss +3 -0
- 39E1T4oBgHgl3EQf6AV3/vector_store/index.pkl +3 -0
- 3dE4T4oBgHgl3EQfbAyT/vector_store/index.faiss +3 -0
- 49A0T4oBgHgl3EQfNv86/content/tmp_files/2301.02150v1.pdf.txt +1735 -0
- 49A0T4oBgHgl3EQfNv86/content/tmp_files/load_file.txt +0 -0
- 69E3T4oBgHgl3EQfRQm6/content/2301.04420v1.pdf +3 -0
- 69E3T4oBgHgl3EQfRQm6/vector_store/index.faiss +3 -0
- 69E3T4oBgHgl3EQfRQm6/vector_store/index.pkl +3 -0
- 69FJT4oBgHgl3EQfmCy-/content/2301.11586v1.pdf +3 -0
- 69FJT4oBgHgl3EQfmCy-/vector_store/index.faiss +3 -0
- 69FJT4oBgHgl3EQfmCy-/vector_store/index.pkl +3 -0
- 79AzT4oBgHgl3EQfE_pK/content/2301.01002v1.pdf +3 -0
- 79AzT4oBgHgl3EQfE_pK/vector_store/index.faiss +3 -0
- 8NE4T4oBgHgl3EQf2w06/vector_store/index.faiss +3 -0
- 8dAzT4oBgHgl3EQfSPuZ/content/2301.01230v1.pdf +3 -0
- 8dAzT4oBgHgl3EQfSPuZ/vector_store/index.faiss +3 -0
- AdFAT4oBgHgl3EQfrh5X/content/tmp_files/2301.08653v1.pdf.txt +1303 -0
- AdFAT4oBgHgl3EQfrh5X/content/tmp_files/load_file.txt +0 -0
- B9E4T4oBgHgl3EQf5Q72/content/tmp_files/2301.05323v1.pdf.txt +1535 -0
- B9E4T4oBgHgl3EQf5Q72/content/tmp_files/load_file.txt +0 -0
- EdFRT4oBgHgl3EQfBDfd/content/tmp_files/2301.13464v1.pdf.txt +3033 -0
- EdFRT4oBgHgl3EQfBDfd/content/tmp_files/load_file.txt +0 -0
- FdFKT4oBgHgl3EQfay5f/content/2301.11809v1.pdf +3 -0
- FdFKT4oBgHgl3EQfay5f/vector_store/index.faiss +3 -0
- FdFKT4oBgHgl3EQfay5f/vector_store/index.pkl +3 -0
- G9A0T4oBgHgl3EQfBv-A/content/2301.01980v1.pdf +3 -0
- G9A0T4oBgHgl3EQfBv-A/vector_store/index.pkl +3 -0
- HdE5T4oBgHgl3EQfWQ_B/content/2301.05558v1.pdf +3 -0
- HdE5T4oBgHgl3EQfWQ_B/vector_store/index.faiss +3 -0
- HdE5T4oBgHgl3EQfWQ_B/vector_store/index.pkl +3 -0
- I9A0T4oBgHgl3EQfCP_M/vector_store/index.pkl +3 -0
-9E4T4oBgHgl3EQfEAsg/content/2301.04873v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a7288c6e06e27cc92072bd70d5e99d0441e03270d661e73c5570b78a1a0da9f
|
3 |
+
size 2332427
|
-9E4T4oBgHgl3EQfEAsg/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e60a98092cdafb2071f24c7863424b0c45f7b24922868740b89f85b240884404
|
3 |
+
size 4063277
|
-9E4T4oBgHgl3EQfEAsg/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57d4d7edfb6870c59e4ccc348244dca0718f68db63131de3214104a4ea67df12
|
3 |
+
size 147758
|
-dE2T4oBgHgl3EQfmQc9/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39adf4f498b41e6ce367e9c34d5552ee6b0555e7c67fb149e9a9dcb8aa2671c3
|
3 |
+
size 67288
|
-dFIT4oBgHgl3EQf9CsF/content/tmp_files/2301.11404v1.pdf.txt
ADDED
@@ -0,0 +1,1230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
A Quantum Monte Carlo study of the structural, energetic, and magnetic
|
2 |
+
properties of two-dimensional (2D) H and T phase VSe2
|
3 |
+
Daniel Wines,1, a) Juha Tiihonen,2 Kayahan Saritas,3 Jaron Krogel,3 and Can Ataca4, b)
|
4 |
+
1)Materials Science and Engineering Division, National Institute of Standards and Technology (NIST), Gaithersburg,
|
5 |
+
MD 20899
|
6 |
+
2)Department of Physics, Nanoscience Center, University of Jyväskylä, P.O. Box 35, Finland
|
7 |
+
3)Material Science and Technology Division, Oak Ridge National Laboratory, Oak Ridge,
|
8 |
+
Tennessee 37831
|
9 |
+
4)Department of Physics, University of Maryland Baltimore County, Baltimore MD 21250
|
10 |
+
(Dated: 30 January 2023)
|
11 |
+
Previous works have controversially claimed near-room temperature ferromagnetism in two-dimensional (2D) VSe2,
|
12 |
+
with conflicting results throughout the literature. These discrepancies in magnetic properties between both phases (T
|
13 |
+
and H phase) of 2D VSe2 are most likely due to the structural parameters being coupled to the magnetic properties.
|
14 |
+
Specifically, both phases have a close lattice match and similar total energies, which makes it difficult to determine
|
15 |
+
which phase is being observed experimentally. In this study, we used a combination of density functional theory
|
16 |
+
(DFT), highly accurate diffusion Monte Carlo (DMC) and a surrogate Hessian line-search optimization technique to
|
17 |
+
resolve the previously reported discrepancy in structural parameters and relative phase stability. With DMC accuracy,
|
18 |
+
we determined the freestanding geometry of both phases and constructed a phase diagram. Our findings demonstrate
|
19 |
+
the successes of the DMC method coupled with the surrogate Hessian structural optimization technique when applied
|
20 |
+
to a 2D magnetic system.
|
21 |
+
I.
|
22 |
+
INTRODUCTION
|
23 |
+
One of the most promising two-dimensional (2D) mag-
|
24 |
+
netic materials that has been extensively studied experimen-
|
25 |
+
tally and theoretically is 2D VSe2. Similar to other 2D tran-
|
26 |
+
sition metal dichalcogenides (such as MoS2)1, VSe2 exists
|
27 |
+
in two phases, the T (octahedral phase (1T)-centered honey-
|
28 |
+
combs) phase which is metallic and the H (the trigonal pris-
|
29 |
+
matic phase (2H)-hexagonal honeycombs, see Fig. 1) phase
|
30 |
+
which is semiconducting. Several experimental and theoret-
|
31 |
+
ical studies have controversially claimed near-room tempera-
|
32 |
+
ture ferromagnetism in VSe2, with conflicting results through-
|
33 |
+
out the literature. Density functional theory (DFT) along with
|
34 |
+
classical Monte Carlo simulations have been used to obtain
|
35 |
+
a)
|
36 |
+
b)
|
37 |
+
1T-VSe2
|
38 |
+
2H-VSe2
|
39 |
+
FIG. 1. Top and side view of the atomic structure of monolayer VSe2
|
40 |
+
in the a) 1T and b) 2H phase.
|
41 |
+
a)Electronic mail: [email protected]
|
42 |
+
b)Electronic mail: [email protected]
|
43 |
+
an estimate of the Curie temperature of H-VSe2 (291 K)2, but
|
44 |
+
the model Ising Hamiltonian used did not take into account the
|
45 |
+
magnetic anisotropy energies, which are essential for an accu-
|
46 |
+
rate estimation of the Curie temperature of a 2D lattice. The
|
47 |
+
Curie temperature of multilayered 2D H-VSe2 has been ex-
|
48 |
+
perimentally measured to be 425 K, with the ferromagnetism
|
49 |
+
softening as the thickness of the sample increases3. Addi-
|
50 |
+
tionally, the experimental Curie temperature for monolayer T-
|
51 |
+
VSe2 has ranged from 300 K to 470 K4,5 depending on which
|
52 |
+
substrate is used (MoS2, graphite, SiO2-coated silicon). The
|
53 |
+
experimental magnetization of T-VSe2 has also been met with
|
54 |
+
controversy, with values of 15 µB and 5 µB (per formula unit)
|
55 |
+
being reported from two separate studies4,6. Insight has also
|
56 |
+
been reported with regards to how the ferromagnetism is en-
|
57 |
+
hanced with defects, molecular adsorption and the choice of
|
58 |
+
substrate for VSe24,5,7. A wide range of values have also been
|
59 |
+
reported for the charge density wave (CDW) transition tem-
|
60 |
+
perature for T-VSe2, ranging from 120 K to 350 K3,6,8–10.
|
61 |
+
These discrepancies in the electronic and magnetic proper-
|
62 |
+
ties of either phase of 2D VSe2 arise from the structural pa-
|
63 |
+
rameters of each phase being coupled closely to the magnetic
|
64 |
+
and electronic properties and the external factors (substrates,
|
65 |
+
defects) of the individual samples. One example of this has
|
66 |
+
been a reported discrepancy on which phase (T or H) is en-
|
67 |
+
ergetically more favorable. Both the T and H phases have a
|
68 |
+
close lattice match and similar total energies, which makes it
|
69 |
+
difficult to determine which phase is being observed experi-
|
70 |
+
mentally. Recently, it has been reported experimentally that
|
71 |
+
the T phase is favored for bulk VSe2, but with dimension-
|
72 |
+
ality decrease, the H phase is favored3,11. It has also been
|
73 |
+
reported that a T-to-H phase transition can be realized by ther-
|
74 |
+
mal annealing11. This same structural phase transition has
|
75 |
+
even been reported by applying a biaxial strain of ≈ 3 % (from
|
76 |
+
calculated results)7,11,12. Researchers have proposed that this
|
77 |
+
lattice strain can be induced by the mismatch that occurs from
|
78 |
+
arXiv:2301.11404v1 [cond-mat.str-el] 26 Jan 2023
|
79 |
+
|
80 |
+
b
|
81 |
+
C
|
82 |
+
aC
|
83 |
+
a
|
84 |
+
bb2
|
85 |
+
putting 2D VSe2 on a substrate7,12.
|
86 |
+
From a computational perspective, results for VSe2 depend
|
87 |
+
heavily on which methodology is employed. In most cases,
|
88 |
+
DFT with an empirical Hubbard correction (+U) for corre-
|
89 |
+
lated electrons is used13. For example, if the U correction is
|
90 |
+
applied for T and H-VSe2, the T phase is more energetically
|
91 |
+
favorable, while if no U correction is applied, the H phase
|
92 |
+
is more favorable14. In addition to the discrepancies in re-
|
93 |
+
sults calculated with DFT+U, results between van der Waals
|
94 |
+
(vdW) corrected functionals and hybrid functionals are also
|
95 |
+
inconclusive14 in terms of predicting the relative phase stabil-
|
96 |
+
ity. In order to alleviate the uncertainty in DFT methods, more
|
97 |
+
sophisticated methods can be used such as Diffusion Monte
|
98 |
+
Carlo (DMC)15. DMC is a correlated, many-body electronic
|
99 |
+
structure method that has demonstrated success for the elec-
|
100 |
+
tronic and magnetic properties of a variety of bulk and 2D
|
101 |
+
systems16–24. This method has a weaker dependence on the
|
102 |
+
starting density functional and U parameter and can success-
|
103 |
+
fully achieve results with an accuracy beyond the DFT+U15.
|
104 |
+
Due to the fact that T and H-VSe2 have structural parame-
|
105 |
+
ters that are coupled to their electronic and magnetic proper-
|
106 |
+
ties, it makes it difficult to produce conclusive results that rely
|
107 |
+
solely on DFT or DFT+U. For this reason, we employed our
|
108 |
+
recently developed energy-based surrogate Hessian method
|
109 |
+
for structural optimization with stochastic electronic structure
|
110 |
+
theories (such as DMC)22 to obtain the geometry of T and
|
111 |
+
H-VSe2 with DMC accuracy, resulting in high-accuracy bond
|
112 |
+
lengths that resolve previous functional dependent structural
|
113 |
+
discrepancies. After obtaining an accurate geometry for both
|
114 |
+
structures, we constructed a phase diagram between T and H-
|
115 |
+
VSe2 using DMC calculated energies and obtained accurate
|
116 |
+
magnetic properties of each structure. The accurate estimates
|
117 |
+
for lattice geometry, relative phase energy and the DMC phase
|
118 |
+
diagram assist in clarifying previously inconclusive theoreti-
|
119 |
+
cal and experimental results regarding T and H phase VSe2.
|
120 |
+
For full details of the computational methods used, see the
|
121 |
+
Supporting Information (SI).
|
122 |
+
As an initial starting point for our study, we performed
|
123 |
+
benchmarking DFT and DFT+U calculations using a variety
|
124 |
+
of density functionals (local density approximation (LDA)25,
|
125 |
+
Perdew-Burke-Ernzerhof (PBE)26, and strongly constrained
|
126 |
+
and appropriately normed (SCAN)27 meta-GGA functionals,
|
127 |
+
see SI for more details) and the Vienna Ab initio Simulation
|
128 |
+
Package (VASP) code for monolayer T-VSe2 and H-VSe2.
|
129 |
+
The goal of these simulations were to assess how sensitive
|
130 |
+
the relative energy between the T and H phase is with re-
|
131 |
+
spect to functional and material geometry. Another goal of
|
132 |
+
these simulations was to benchmark the structural parameters
|
133 |
+
of each material with respect to several density functionals. It
|
134 |
+
is advantageous to perform these reference calculations with
|
135 |
+
VASP and PAW pseudopotentials as a precursor to the more
|
136 |
+
expensive DMC calculations due to the fact that they require
|
137 |
+
a much smaller cutoff energy and are more cost effective for
|
138 |
+
a large number of simulations. It is important to note that
|
139 |
+
for all DFT and DMC simulations, we assumed a ferromag-
|
140 |
+
netic ground state for both T and H-VSe2. Although recent
|
141 |
+
reports have suggested that T-VSe2 could be experimentally
|
142 |
+
paramagnetic3, we infer that this paramagnetism can be in-
|
143 |
+
duced by magnetic anisotropy. In addition, the modeling of
|
144 |
+
paramagnetism with computational methods imposes a great
|
145 |
+
challenge, which is why we focus on the freestanding ferro-
|
146 |
+
magnetic ground states of both phases. A more robust treat-
|
147 |
+
ment of the magnetic structure can be explored in future work,
|
148 |
+
but is beyond the scope of this work which primarily focuses
|
149 |
+
on determining the geometric structure and phase stability of
|
150 |
+
2D T and H-VSe2.
|
151 |
+
In Fig. 2 we present a comprehensive look at the difference
|
152 |
+
in total energy between T-VSe2 and H-VSe2, using several
|
153 |
+
DFT functionals under different geometric constraints. We
|
154 |
+
performed these calculations for a variety of U values in three
|
155 |
+
different ways: fully relaxing the structure at each value of U
|
156 |
+
(Fig. 2 a) ), fixing the lattice and atomic positions to the U
|
157 |
+
= 0 eV relaxed geometry of that particular functional and cal-
|
158 |
+
culating the static energy at each value of U (Fig 2 b)), fixing
|
159 |
+
the lattice to the U = 0 eV relaxed geometry of that particular
|
160 |
+
functional and relaxing just the atomic positions at each value
|
161 |
+
of U (Fig. 2 c)). The results in Fig. 2 indicate that there is
|
162 |
+
a significant disagreement between DFT functionals, U value
|
163 |
+
used, and material geometries, with all three factors playing
|
164 |
+
a significant role in the energy difference between T and H
|
165 |
+
phase. Specifically, regardless of relaxation method, all bare
|
166 |
+
(no U correction) SCAN, PBE, and PBEsol functionals pre-
|
167 |
+
dict H favorable, while bare LDA predicts T favorable. For
|
168 |
+
all functionals, there is a critical value of U that reverses the
|
169 |
+
relative phase stability, which is dependent on functional and
|
170 |
+
relaxation method. The SCAN functional with a U correction
|
171 |
+
predicts T phase favorable, with larger energy differences. As
|
172 |
+
seen in Fig. 2, the trends in the relative phase stability be-
|
173 |
+
tween Fig. 2 b) and c) are nearly identical, but significantly
|
174 |
+
vary from Fig. a). This implies that the density functional is
|
175 |
+
strongly coupled to material geometry, but the lattice constant
|
176 |
+
change has more of an effect on phase stability than atomic
|
177 |
+
positions and bond distances. This is most prevalent for higher
|
178 |
+
U values (> 2 eV), where the relaxed geometry changes more
|
179 |
+
drastically with U. The interrelated nature of the material’s
|
180 |
+
geometry, density functional, and value of U are reasons to
|
181 |
+
seek out higher levels of theory beyond DFT/DFT+U such as
|
182 |
+
DMC to accurately determine the optimal geometry and rela-
|
183 |
+
tive energy between the phases of 2D VSe2.
|
184 |
+
The relaxed lattice constants, V-Se distances, and T - H en-
|
185 |
+
ergies from Fig. 2 a) are presented in Table I and Fig. 3,
|
186 |
+
along with additional VASP reference calculations performed
|
187 |
+
with the vdW corrected functionals (PBE-D228, PBE-D329,
|
188 |
+
SCAN+rvv1030). The DMC computed parameters are also
|
189 |
+
given for comparison in Table I and Fig. 3 (more discussion
|
190 |
+
to follow). We observe a ≈ 7 % variability in lattice constant
|
191 |
+
across the different methods for T-VSe2 and a ≈ 4 % variabil-
|
192 |
+
ity in lattice constant across the different methods for H-VSe2.
|
193 |
+
Between both phases, we observe a ≈ 3 % variability in V-Se
|
194 |
+
distance (dV−Se). Most strikingly, the energy difference be-
|
195 |
+
tween the T and H phases (ET−H) drastically varies depend-
|
196 |
+
ing on the material geometry and computational methodology,
|
197 |
+
ranging from -0.2 eV/f.u. to 0.06 eV/f.u.. Due to the fact
|
198 |
+
that a strain-induced phase transition has been reported be-
|
199 |
+
tween T- and H-VSe27,11,12, we decided to perform additional
|
200 |
+
VASP benchmarking calculations that involved the applica-
|
201 |
+
|
202 |
+
3
|
203 |
+
0
|
204 |
+
1
|
205 |
+
2
|
206 |
+
3
|
207 |
+
4
|
208 |
+
U (eV)
|
209 |
+
0
|
210 |
+
1
|
211 |
+
2
|
212 |
+
3
|
213 |
+
4
|
214 |
+
U (eV)
|
215 |
+
0
|
216 |
+
1
|
217 |
+
2
|
218 |
+
3
|
219 |
+
4
|
220 |
+
U (eV)
|
221 |
+
-0.6
|
222 |
+
-0.5
|
223 |
+
-0.4
|
224 |
+
-0.3
|
225 |
+
-0.2
|
226 |
+
-0.1
|
227 |
+
0
|
228 |
+
0.1
|
229 |
+
T - H Energy (eV/f.u.)
|
230 |
+
-0.3
|
231 |
+
-0.2
|
232 |
+
-0.1
|
233 |
+
0
|
234 |
+
0.1
|
235 |
+
-0.05
|
236 |
+
0.05
|
237 |
+
-0.15
|
238 |
+
-0.25
|
239 |
+
-0.35
|
240 |
+
T - H Energy (eV/f.u.)
|
241 |
+
-0.3
|
242 |
+
-0.2
|
243 |
+
-0.1
|
244 |
+
0
|
245 |
+
0.1
|
246 |
+
-0.05
|
247 |
+
0.05
|
248 |
+
-0.15
|
249 |
+
-0.25
|
250 |
+
-0.35
|
251 |
+
T - H Energy (eV/f.u.)
|
252 |
+
PBE
|
253 |
+
LDA
|
254 |
+
SCAN
|
255 |
+
PBESOL
|
256 |
+
Full relaxation
|
257 |
+
Fixed lattice/relaxed positions
|
258 |
+
Fixed lattice/positions
|
259 |
+
a)
|
260 |
+
b)
|
261 |
+
c)
|
262 |
+
FIG. 2. Relative (T - H) energy between T and H phase 2D VSe2 as a function of U parameter for several density functionals and methods of
|
263 |
+
atomic relaxation: a) fully relaxing the structure, b) fixing the lattice and atomic positions to the U = 0 eV relaxed geometry of that particular
|
264 |
+
functional and calculating the static energy, c) fixing the lattice to the U = 0 eV relaxed geometry of that particular functional and relaxing just
|
265 |
+
the atomic positions. The dotted line indicates 0 eV.
|
266 |
+
TABLE I. Tabulated results for lattice constant, V-Se distance, and relative energy (T - H) for both T and H phase 2D VSe2 for several
|
267 |
+
computational methods. DMC error bars (standard error about the mean) are included in parenthesis.
|
268 |
+
T-VSe2
|
269 |
+
H-VSe2
|
270 |
+
Method
|
271 |
+
a (Å)
|
272 |
+
dV−Se (Å) a (Å)
|
273 |
+
dV−Se (Å) ET−H (eV/f.u.)
|
274 |
+
PBE
|
275 |
+
3.336
|
276 |
+
2.489
|
277 |
+
3.333
|
278 |
+
2.502
|
279 |
+
0.045
|
280 |
+
PBE+U=2
|
281 |
+
3.435
|
282 |
+
2.526
|
283 |
+
3.364
|
284 |
+
2.520
|
285 |
+
-0.008
|
286 |
+
LDA
|
287 |
+
3.228
|
288 |
+
2.438
|
289 |
+
3.229
|
290 |
+
2.445
|
291 |
+
-0.026
|
292 |
+
LDA+U=2
|
293 |
+
3.277
|
294 |
+
2.455
|
295 |
+
3.266
|
296 |
+
2.464
|
297 |
+
0.045
|
298 |
+
SCAN
|
299 |
+
3.387
|
300 |
+
2.486
|
301 |
+
3.329
|
302 |
+
2.486
|
303 |
+
0.045
|
304 |
+
SCAN+U=2
|
305 |
+
3.462
|
306 |
+
2.524
|
307 |
+
3.353
|
308 |
+
2.502
|
309 |
+
-0.202
|
310 |
+
PBEsol
|
311 |
+
3.262
|
312 |
+
2.458
|
313 |
+
3.272
|
314 |
+
2.471
|
315 |
+
0.013
|
316 |
+
PBEsol+U=2 3.323
|
317 |
+
2.483
|
318 |
+
3.301
|
319 |
+
2.487
|
320 |
+
0.025
|
321 |
+
PBE-D2
|
322 |
+
3.323
|
323 |
+
2.484
|
324 |
+
3.318
|
325 |
+
2.496
|
326 |
+
0.010
|
327 |
+
PBE-D3
|
328 |
+
3.315
|
329 |
+
2.485
|
330 |
+
3.319
|
331 |
+
2.497
|
332 |
+
0.042
|
333 |
+
SCAN+rvv10 3.379
|
334 |
+
2.481
|
335 |
+
3.319
|
336 |
+
2.482
|
337 |
+
0.051
|
338 |
+
DMC
|
339 |
+
3.414(12) 2.505(7)
|
340 |
+
3.335(8) 2.503(5)
|
341 |
+
0.06(2)
|
342 |
+
tion of tensile and compressive strain for each monolayer. We
|
343 |
+
performed these calculations for PBE, SCAN, and LDA (with
|
344 |
+
U = 0 eV and U = 2 eV), starting from the U = 0 eV geom-
|
345 |
+
etry for each functional. The resulting equations of state are
|
346 |
+
depicted in Fig. S3. As seen in the figure, the equation of
|
347 |
+
state and resulting strain-induced phase transition is entirely
|
348 |
+
dependent on the functional and U value, with no consistent
|
349 |
+
trend.
|
350 |
+
The strong sensitivity of each monolayer with respect to
|
351 |
+
geometry and functional are grounds for using a higher-order
|
352 |
+
method such as DMC to obtain a statistically accurate estimate
|
353 |
+
of the lattice parameters and relative energy between phases.
|
354 |
+
Prior to performing the DMC/line-search calculations, we op-
|
355 |
+
timized our nodal surface (orbitals selected for DFT wave-
|
356 |
+
function generation). Since DMC has the zero-variance prop-
|
357 |
+
erty, it means that as the trial wave function approaches the
|
358 |
+
|
359 |
+
4
|
360 |
+
ET-H - ET-H
|
361 |
+
(DMC)(eV/f.u.)
|
362 |
+
dV-Se - dV-Se
|
363 |
+
(DMC) (Å)
|
364 |
+
dV-Se - dV-Se
|
365 |
+
(DMC) (Å)
|
366 |
+
a - aDMC (Å)
|
367 |
+
a - aDMC (Å)
|
368 |
+
T-VSe2
|
369 |
+
H-VSe2
|
370 |
+
a)
|
371 |
+
b)
|
372 |
+
c)
|
373 |
+
FIG. 3. A summary of the deviation of the geometric properties rel-
|
374 |
+
ative to the DMC calculated geometric properties for a) T-VSe2 and
|
375 |
+
b) H-VSe2 and c) the the deviation of T - H energy relative to the
|
376 |
+
DMC calculated T - H energy for a variety of DFT functionals (U =
|
377 |
+
2 eV), where the DMC error bar (standard error about the mean) is
|
378 |
+
represented by the red bars.
|
379 |
+
exact ground state, the statistical fluctuations in the energy
|
380 |
+
reduce to zero15. Although there have been instances where
|
381 |
+
various sophisticated methods have been used to optimize the
|
382 |
+
nodal surface31–34, we employed the PBE+U approach, where
|
383 |
+
the Hubbard (U) value was used as a variational parameter
|
384 |
+
to optimize the nodal surface using DMC (similar to other
|
385 |
+
successful DMC studies of magnetic materials16,20,21,24,35–37).
|
386 |
+
We performed these calculations for both T and H-VSe2 (24
|
387 |
+
atom supercells), where we tuned the U value from (1 to 4) eV
|
388 |
+
while creating the trial wavefunction and computed the DMC
|
389 |
+
energy. The results of these calculations are depicted in Fig.
|
390 |
+
S4, where we observe that U = 2 eV yields the lowest energy
|
391 |
+
for both phases. It is important to note that for the H phase,
|
392 |
+
the DMC energies for U = 1 and U = 2 eV are statistically
|
393 |
+
identical. Based on this, we created the trial wavefunction us-
|
394 |
+
ing PBE+U (U = 2 eV) for all subsequent DMC calculations
|
395 |
+
within the surrogate Hessian line-search for both phases (all
|
396 |
+
52 DMC energy evaluations). Since we obtained an optimal
|
397 |
+
U value of 2 eV for both materials, we focused our DFT+U
|
398 |
+
benchmarking efforts more on U = 2 eV (Fig. 3, Fig 5, Table
|
399 |
+
I, Fig. 2, Fig. S3).
|
400 |
+
Based on the DMC line-search results, we determined ac-
|
401 |
+
curate bounds on the lattice parameter (a) and off-plane dis-
|
402 |
+
placement of Se (z), within an error tolerance of 0.018 Å or
|
403 |
+
lower for both parameters. This translates to within ≈ 0.5%
|
404 |
+
accuracy in a parameter set of a and dV−Se with 95% con-
|
405 |
+
fidence. Convergence (absence of significant displacements
|
406 |
+
2.45
|
407 |
+
2.50
|
408 |
+
2.55
|
409 |
+
2.60
|
410 |
+
V-Se distance (˚A)
|
411 |
+
T-phase
|
412 |
+
H-phase
|
413 |
+
Fit eqm. (T)
|
414 |
+
Fit eqm. (H)
|
415 |
+
LS eqm. (T)
|
416 |
+
LS eqm. (H)
|
417 |
+
3.2
|
418 |
+
3.3
|
419 |
+
3.4
|
420 |
+
3.5
|
421 |
+
3.6
|
422 |
+
3.7
|
423 |
+
3.8
|
424 |
+
Lattice constant (˚A)
|
425 |
+
−2459.5
|
426 |
+
−2459.0
|
427 |
+
−2458.5
|
428 |
+
Energy/f.u. (eV)
|
429 |
+
PES (T)
|
430 |
+
PES (H)
|
431 |
+
FIG. 4. (Top) The phase diagram of 2D VSe2 in terms of a and
|
432 |
+
dV−Se. The phase boundary (solid line, black) is estimated from
|
433 |
+
bicubic fits. To assure quality of the fits, the estimated ±0.01 eV
|
434 |
+
error contours (dotted line) and the minima from the fits (’x’) and the
|
435 |
+
line-search (’o’) are all well separated. (Bottom) Slices of the PES at
|
436 |
+
dV−Se = 2.505 Å.
|
437 |
+
outside of the error tolerance) was achieved after two parallel
|
438 |
+
line-search iterations for both phases. This convergence is il-
|
439 |
+
lustrated in Fig. S5, where the convergence of the parameter
|
440 |
+
offsets of a and z and the convergence of the total energy per
|
441 |
+
f.u. are depicted for both T and H phase 2D VSe2 for the ini-
|
442 |
+
tial DFT relaxed structure (1) and both subsequent iterations
|
443 |
+
of DMC (2 - 3). In addition, the final energy of both of the
|
444 |
+
fitted structures (square points) are given.
|
445 |
+
The final geometric parameters and relative phase energies
|
446 |
+
determined with DMC are given in Table I and Fig. 3. For
|
447 |
+
T-VSe2, we determined a lattice constant of 3.414(12) Å and
|
448 |
+
a V-Se distance of 2.505(7) Å . For H-VSe2, we determined a
|
449 |
+
lattice constant of 3.335(8) Å and a V-Se distance of 2.503(5)
|
450 |
+
Å . The DMC finite-size extrapolated energy difference (T
|
451 |
+
- H) between the two phases was determined to be 0.06(2)
|
452 |
+
eV/f.u., indicating that in freestanding form at the equilibrium
|
453 |
+
geometry, H-VSe2 is favored over T-VSe2. When comparing
|
454 |
+
these DMC results to the other DFT functionals in Table I and
|
455 |
+
Fig. 3, it is clear that very few DFT functionals can repro-
|
456 |
+
duce the DMC results for lattice constant, V-Se distance and
|
457 |
+
relative energy difference. The SCAN functional comes the
|
458 |
+
closest to reproducing all three simultaneous DMC values, but
|
459 |
+
still falls slightly short for the V-Se distances of both phases
|
460 |
+
and the lattice constant of T-VSe2. The fact that SCAN+U
|
461 |
+
successfully predicts the structural properties (for H-VSe2)
|
462 |
+
and the fact that SCAN+rvv10 produces an energy difference
|
463 |
+
closest to the average DMC energy difference for both phases
|
464 |
+
loosely implies that a simultaneous description of correlated
|
465 |
+
magnetism and vdW interactions are both needed to correctly
|
466 |
+
represent the physics of VSe2. Experimental measurements of
|
467 |
+
|
468 |
+
5
|
469 |
+
the lattice constant and V-Se distance of freestanding mono-
|
470 |
+
layer VSe2 are scarce and often times dependent on external
|
471 |
+
factors such as the substrate (more discussion to follow) and
|
472 |
+
sample preparation technique4,5,38,39. However, Chen et al.38
|
473 |
+
have recently reported a lattice constant of 3.4 Å for thin films
|
474 |
+
of T-VSe2 and Liu et al.39 have recently reported a lattice
|
475 |
+
constant of 3.3 Å for epitaxially grown monolayer H-VSe2.
|
476 |
+
Both of these measured values are in excellent agreement with
|
477 |
+
our DMC computed lattice constants. Additionally, we deter-
|
478 |
+
mined the near-equilibrium PES of both T and H 2D VSe2
|
479 |
+
with DMC accuracy, which are both depicted in Fig. S6.
|
480 |
+
The phase diagram presented in Fig. 4 is based on similar
|
481 |
+
fits to data, where the z displacement has been remapped to
|
482 |
+
dV−Se. This DMC phase diagram can directly be compared to
|
483 |
+
the energy vs. strain DFT benchmarking calculations in Fig.
|
484 |
+
S3, which emphasizes the need for an accurate representation
|
485 |
+
of the phase boundary between the two phases. The freestand-
|
486 |
+
ing geometries of both T and H lie in the energetic H phase,
|
487 |
+
but a slice of the phase diagram along dV−Se = 2.505 Å in-
|
488 |
+
dicates that the T phase becomes favorable over H at biaxial
|
489 |
+
strain of a ≳ 3.5 Å. This implies that in freestanding form,
|
490 |
+
once T-VSe2 is positively strained at least ≈ 2.5 %, T phase is
|
491 |
+
favored over H. Alternatively, if freestanding H-VSe2 is pos-
|
492 |
+
itively strained at least ≈ 5 %, T phase is also favored over
|
493 |
+
H This strain can easily be accomplished by placing mono-
|
494 |
+
layer VSe2 on a substrate with significant lattice mismatch. In
|
495 |
+
fact, this type of mismatch has been reported to alter the mate-
|
496 |
+
rial properties4,5,40,41, significantly contributing to the contro-
|
497 |
+
versies of T and H-VSe2 (for energetic favorability, magnetic
|
498 |
+
properties). Whether or not the changes in energetic favorabil-
|
499 |
+
ity or magnetic properties with respect to the substrate are due
|
500 |
+
to lattice mismatch or more complicated interactions between
|
501 |
+
the substrate and the monolayer remains to be answered and
|
502 |
+
is beyond the scope of this work, which has focused solely on
|
503 |
+
the freestanding forms of T and H-VSe2. However, such cal-
|
504 |
+
culations can be employed for future work using higher order
|
505 |
+
methods such as DMC. The proximity of the phase boundary
|
506 |
+
between T and H phase (Fig. 4) is emphasized by the small en-
|
507 |
+
ergy difference between the two phases (0.06(2) eV/f.u., at the
|
508 |
+
equilibrium geometry) between the two curves. Since this en-
|
509 |
+
ergy difference is so close to room temperature (≈ 0.024 eV),
|
510 |
+
this implies that a process such as thermal annealing can eas-
|
511 |
+
ily induce a phase transition. In fact, recently it was demon-
|
512 |
+
strated that a structural phase transition of multilayer VSe2
|
513 |
+
from T to H occurs through annealing at 650 K, along with a
|
514 |
+
metal-insulator transition11.
|
515 |
+
To gain a deeper understanding of the magnetic properties
|
516 |
+
of 2D T and H-VSe2, we extracted the spin densities (using a
|
517 |
+
trial wavefunction at U = 2 eV and 24 atom supercell at the
|
518 |
+
final equilibrium geometry predicted by DMC/line-search).
|
519 |
+
The spin density isosurfaces of each phase (ρup - ρdown) are
|
520 |
+
depicted in the insets of Fig. 5 a) and c) for T-VSe2 and H-
|
521 |
+
VSe2 respectively. For both phases, we observe the V atoms
|
522 |
+
are highly spin-polarized, while the Se atoms are slightly an-
|
523 |
+
tiparallel with respect to the V atoms. For more calculation
|
524 |
+
details regarding spin density, see SI.
|
525 |
+
We went on to plot the radial averaged spin densities as a
|
526 |
+
function of distance, separately for V and Se for T and H-VSe2
|
527 |
+
(depicted in Fig. 5 a) - d)). This allows us to view the spa-
|
528 |
+
tial variations in spin density. Additionally, we benchmarked
|
529 |
+
these V and Se radially averaged densities with PBE+U (U
|
530 |
+
= 2 eV) using NC pseudopotentials at the equilibrium geom-
|
531 |
+
etry (the calculation required to create the trial WF for the
|
532 |
+
subsequent DMC runs). As seen in Fig. 5 a) and c), there is
|
533 |
+
a substantial difference in the V spin density between DMC
|
534 |
+
and PBE+U (U = 2 eV) for both T and H phase. This same
|
535 |
+
substantial difference between DMC and PBE+U also occurs
|
536 |
+
for the total charge density. This discrepancy is most preva-
|
537 |
+
lent near the radial density peak (peak of d orbital) and can
|
538 |
+
be attributed to the fact that DFT functionals (even with the
|
539 |
+
added Hubbard correction) tend to delocalize and unsuccess-
|
540 |
+
fully capture 3d orbitals. This large discrepancy in the spin
|
541 |
+
densities highlights the need for more accurate, many-body
|
542 |
+
computational methodologies for correlated materials such as
|
543 |
+
VSe2, where DFT fails. In contrast, there is closer agreement
|
544 |
+
between the DMC and PBE+U spin densities for Se in T and
|
545 |
+
H-VSe2 (see Fig. 5 b) and d).
|
546 |
+
Finally, we estimated the site-averaged atomic magnetic
|
547 |
+
moments per V and Se for both T and H phase by integrating
|
548 |
+
the DMC and PBE+U spin densities depicted in Fig. 5. At the
|
549 |
+
DMC level, we estimated a magnetic moment of 1.06(2) µB
|
550 |
+
for V and -0.09(2) µB for Se in T-VSe2 and a magnetic mo-
|
551 |
+
ment of 1.02(1) µB for V and -0.14(1) µB for Se in H-VSe2.
|
552 |
+
At the PBE+U (U = 2 eV) level, we estimated a magnetic mo-
|
553 |
+
ment of 1.30 µB for V and -0.12 µB for Se in T-VSe2 and a
|
554 |
+
magnetic moment of 1.40 µB for V and -0.15 µB for Se in H-
|
555 |
+
VSe2. Consistent with the radial spin density results in Fig.
|
556 |
+
5, we find that the DMC and PBE+U magnetic moments for
|
557 |
+
Se are in much closer agreement than for V (for both T and
|
558 |
+
H phase). By analyzing the spin densities and obtaining the
|
559 |
+
on-site magnetic moments, we obtain a clear picture of how
|
560 |
+
the magnetization of each ion depends on the computational
|
561 |
+
method used, serving as a benchmark for the magnetic prop-
|
562 |
+
erties of 2D VSe2.
|
563 |
+
In this work, we used a combination of DFT, DMC and
|
564 |
+
a recently developed surrogate Hessian line-search optimiza-
|
565 |
+
tion technique to resolve the previously reported discrepancy
|
566 |
+
in structural parameters and relative phase stability of mono-
|
567 |
+
layer T-VSe2 and H-VSe2. Using these methods, we deter-
|
568 |
+
mined the lattice constant and V-Se distance (with DMC ac-
|
569 |
+
curacy) to be 3.414(12) Å and 2.505(7) Å respectively for T-
|
570 |
+
VSe2 and 3.335(8) Å and 2.503(5) respectively for H-VSe2.
|
571 |
+
In addition, we find the relative energy between the phases (T
|
572 |
+
- H) to be 0.06(2) eV/f.u. at the DMC level, indicating that
|
573 |
+
in freestanding form, H-VSe2 is more energetically favorable
|
574 |
+
than T-VSe2. We went on to obtain a phase diagram between
|
575 |
+
T and H phase from the PES and determined that a phase tran-
|
576 |
+
sition can be induced by strain or mechanisms such as ther-
|
577 |
+
mal annealing. Additionally, we benchmarked the magnetic
|
578 |
+
properties such as spin density and on-site magnetic moment
|
579 |
+
for both phases and find substantial differences between DMC
|
580 |
+
and DFT. The results of this study demonstrate the successes
|
581 |
+
of the DMC method coupled with the surrogate Hessian line-
|
582 |
+
search structural optimization technique when applied to a 2D
|
583 |
+
magnetic system.
|
584 |
+
The estimates for lattice constant, bond
|
585 |
+
distance, relative phase energy and the extracted structural-
|
586 |
+
|
587 |
+
6
|
588 |
+
a)
|
589 |
+
b)
|
590 |
+
c)
|
591 |
+
d)
|
592 |
+
4̟r2[ρup - ρdown ] (Ne/Å)
|
593 |
+
4̟r2[ρup - ρdown ] (Ne/Å)
|
594 |
+
4̟r2[ρup - ρdown ] (Ne/Å)
|
595 |
+
4̟r2[ρup - ρdown ] (Ne/Å)
|
596 |
+
T-VSe2 (V)
|
597 |
+
T-VSe2 (Se)
|
598 |
+
H-VSe2 (V)
|
599 |
+
H-VSe2 (Se)
|
600 |
+
r (Å)
|
601 |
+
r (Å)
|
602 |
+
r (Å)
|
603 |
+
r (Å)
|
604 |
+
MV= 1.30 µB
|
605 |
+
MV= 1.06(2) µB
|
606 |
+
MV= 1.40 µB
|
607 |
+
MV= 1.02(1) µB
|
608 |
+
MSe= -0.12 µB
|
609 |
+
MSe= -0.09(2) µB
|
610 |
+
MSe= -0.15 µB
|
611 |
+
MSe= -0.14(1) µB
|
612 |
+
FIG. 5. The radially averaged spin density (ρup - ρdown) as a function of distance, calculated with DMC and PBE+U (U = 2 eV) of a) V and
|
613 |
+
b) Se for 2D T-VSe2 and c) V and d) Se for 2D H-VSe2. The inset of a) and c) depicts the spin isosurface density of T-VSe2 and H-VSe2
|
614 |
+
respectively, where the isosurface value was set to 6 x 10−3 e/Å3. The standard error about the mean for DMC is indicated by error bars in
|
615 |
+
blue.
|
616 |
+
dependent phase diagram assist in clarifying previously incon-
|
617 |
+
clusive theoretical and experimental results regarding T and H
|
618 |
+
phase VSe2.
|
619 |
+
II.
|
620 |
+
CODE AVAILABILITY STATEMENT
|
621 |
+
Software packages mentioned in the article can be found at
|
622 |
+
https://github.com/usnistgov/jarvis. Please note that the use of
|
623 |
+
commercial software (VASP) does not imply recommendation
|
624 |
+
by the National Institute of Standards and Technology.
|
625 |
+
III.
|
626 |
+
COMPETING INTERESTS
|
627 |
+
The authors declare no competing interests.
|
628 |
+
IV.
|
629 |
+
ACKNOWLEDGMENTS
|
630 |
+
The authors thank the National Institute of Standards
|
631 |
+
and Technology for funding,
|
632 |
+
computational,
|
633 |
+
and data-
|
634 |
+
management resources.
|
635 |
+
The authors thank Dr.
|
636 |
+
Kamal
|
637 |
+
Choudhary and Dr.
|
638 |
+
Francesca Tavazza for fruitful discus-
|
639 |
+
sions.
|
640 |
+
We acknowledge grants of computer capacity from
|
641 |
+
the Finnish Grid and Cloud Infrastructure (persistent identi-
|
642 |
+
fier urn:nbn:fi:research-infras-2016072533).
|
643 |
+
REFERENCES
|
644 |
+
1C. Ataca, H. ¸Sahin, and S. Ciraci, “Stable, single-layer MX2 transition-
|
645 |
+
metal oxides and dichalcogenides in a honeycomb-like structure,” The Jour-
|
646 |
+
nal of Physical Chemistry C 116, 8983–8999 (2012).
|
647 |
+
2H.-R. Fuh, C.-R. Chang, Y.-K. Wang, R. F. L. Evans, R. W. Chantrell, and
|
648 |
+
H.-T. Jeng, “Newtype single-layer magnetic semiconductor in transition-
|
649 |
+
metal dichalcogenides vx2 (x = s, se and te),” Scientific Reports 6, 32625
|
650 |
+
(2016).
|
651 |
+
3X. Wang, D. Li, Z. Li, C. Wu, C.-M. Che, G. Chen, and X. Cui, “Ferromag-
|
652 |
+
netism in 2d vanadium diselenide,” ACS Nano, ACS Nano 15, 16236–16241
|
653 |
+
(2021).
|
654 |
+
4M. Bonilla, S. Kolekar, Y. Ma, H. C. Diaz, V. Kalappattil, R. Das, T. Eggers,
|
655 |
+
H. R. Gutierrez, M.-H. Phan, and M. Batzill, “Strong room-temperature
|
656 |
+
ferromagnetism in VSe2 monolayers on van der Waals substrates,” Nature
|
657 |
+
Nanotechnology 13, 289–293 (2018).
|
658 |
+
5W. Yu, J. Li, T. S. Herng, Z. Wang, X. Zhao, X. Chi, W. Fu, I. Abdelwahab,
|
659 |
+
J. Zhou, J. Dan, Z. Chen, Z. Chen, Z. Li, J. Lu, S. J. Pennycook, Y. P.
|
660 |
+
Feng, J. Ding,
|
661 |
+
and K. P. Loh, “Chemically exfoliated vse2 monolayers
|
662 |
+
with room-temperature ferromagnetism,” Advanced Materials 31, 1903779
|
663 |
+
(2019), https://onlinelibrary.wiley.com/doi/pdf/10.1002/adma.201903779.
|
664 |
+
6G. Duvjir, B. K. Choi, I. Jang, S. Ulstrup, S. Kang, T. Thi Ly, S. Kim, Y. H.
|
665 |
+
Choi, C. Jozwiak, A. Bostwick, E. Rotenberg, J.-G. Park, R. Sankar, K.-S.
|
666 |
+
Kim, J. Kim, and Y. J. Chang, “Emergence of a metal–insulator transition
|
667 |
+
and high-temperature charge-density waves in vse2 at the monolayer limit,”
|
668 |
+
Nano Letters, Nano Letters 18, 5432–5438 (2018).
|
669 |
+
|
670 |
+
7
|
671 |
+
7D. W. Boukhvalov and A. Politano, “Unveiling the origin of room-
|
672 |
+
temperature ferromagnetism in monolayer vse2: the role of extrinsic ef-
|
673 |
+
fects,” Nanoscale 12, 20875–20882 (2020).
|
674 |
+
8S. Sahoo, U. Dutta, L. Harnagea, A. K. Sood, and S. Karmakar, “Pressure-
|
675 |
+
induced suppression of charge density wave and emergence of supercon-
|
676 |
+
ductivity in 1t −vse2,” Phys. Rev. B 101, 014514 (2020).
|
677 |
+
9J. Feng, D. Biswas, A. Rajan, M. D. Watson, F. Mazzola, O. J. Clark, K. Un-
|
678 |
+
derwood, I. Markovi´c, M. McLaren, A. Hunter, D. M. Burn, L. B. Duffy,
|
679 |
+
S. Barua, G. Balakrishnan, F. Bertran, P. Le Fèvre, T. K. Kim, G. van der
|
680 |
+
Laan, T. Hesjedal, P. Wahl, and P. D. C. King, “Electronic structure and
|
681 |
+
enhanced charge-density wave order of monolayer vse2,” Nano Letters 18,
|
682 |
+
4493–4499 (2018).
|
683 |
+
10P. Chen, W. W. Pai, Y.-H. Chan, V. Madhavan, M. Y. Chou, S.-K. Mo,
|
684 |
+
A.-V. Fedorov, and T.-C. Chiang, “Unique gap structure and symmetry of
|
685 |
+
the charge density wave in single-layer vse2,” Phys. Rev. Lett. 121, 196402
|
686 |
+
(2018).
|
687 |
+
11D. Li, X. Wang, C.-m. Kan, D. He, Z. Li, Q. Hao, H. Zhao, C. Wu, C. Jin,
|
688 |
+
and X. Cui, “Structural phase transition of multilayer vse2,” ACS Applied
|
689 |
+
Materials & Interfaces 12, 25143–25149 (2020).
|
690 |
+
12G. V. Pushkarev, V. G. Mazurenko, V. V. Mazurenko,
|
691 |
+
and D. W.
|
692 |
+
Boukhvalov, “Structural phase transitions in vse2: energetics, electronic
|
693 |
+
structure and magnetism,” Phys. Chem. Chem. Phys. 21, 22647–22653
|
694 |
+
(2019).
|
695 |
+
13S. L. Dudarev, G. A. Botton, S. Y. Savrasov, C. J. Humphreys, and A. P.
|
696 |
+
Sutton, “Electron-energy-loss spectra and the structural stability of nickel
|
697 |
+
oxide: An LSDA+U study,” Phys. Rev. B 57, 1505–1509 (1998).
|
698 |
+
14Z. I. Popov, N. S. Mikhaleva, M. A. Visotin, A. A. Kuzubov, S. Entani,
|
699 |
+
H. Naramoto, S. Sakai, P. B. Sorokin, and P. V. Avramov, “The electronic
|
700 |
+
structure and spin states of 2d graphene/vx2 (x = s, se) heterostructures,”
|
701 |
+
Phys. Chem. Chem. Phys. 18, 33047–33052 (2016).
|
702 |
+
15W. M. C. Foulkes, L. Mitas, R. J. Needs, and G. Rajagopal, “Quantum
|
703 |
+
Monte Carlo simulations of solids,” Rev. Mod. Phys. 73, 33–83 (2001).
|
704 |
+
16K. Foyevtsova, J. T. Krogel, J. Kim, P. R. C. Kent, E. Dagotto, and F. A.
|
705 |
+
Reboredo, “Ab initio Quantum Monte Carlo calculations of spin superex-
|
706 |
+
change in cuprates: The benchmarking case of Ca2CuO3,” Phys. Rev. X 4,
|
707 |
+
031003 (2014).
|
708 |
+
17B. Busemeyer, M. Dagrada, S. Sorella, M. Casula,
|
709 |
+
and L. K. Wagner,
|
710 |
+
“Competing collinear magnetic structures in superconducting FeSe by first-
|
711 |
+
principles quantum Monte Carlo calculations,” Phys. Rev. B 94, 035108
|
712 |
+
(2016).
|
713 |
+
18D. Wines, K. Saritas, and C. Ataca, “A first-principles Quantum Monte
|
714 |
+
Carlo study of two-dimensional (2D) GaSe,” The Journal of Chemical
|
715 |
+
Physics 153, 154704 (2020).
|
716 |
+
19D. Wines, K. Saritas, and C. Ataca, “A pathway toward high-throughput
|
717 |
+
quantum Monte Carlo simulations for alloys:
|
718 |
+
A case study of two-
|
719 |
+
dimensional (2d) GaSxSe1−x,” The Journal of Chemical Physics 155,
|
720 |
+
194112 (2021).
|
721 |
+
20D. Wines, K. Saritas,
|
722 |
+
and C. Ataca, “Intrinsic ferromagnetism of two-
|
723 |
+
dimensional (2d) mno2 revisited: A many-body quantum monte carlo and
|
724 |
+
dft+u study,” The Journal of Physical Chemistry C 126, 5813–5821 (2022).
|
725 |
+
21D. Wines, K. Choudhary, and F. Tavazza, “A systematic dft+u and quantum
|
726 |
+
monte carlo benchmark of magnetic two-dimensional (2d) crx3 (x = i, br,
|
727 |
+
cl, f),” (2022).
|
728 |
+
22J. Tiihonen,
|
729 |
+
P. R. C. Kent,
|
730 |
+
and J. T. Krogel, “Surrogate hes-
|
731 |
+
sian accelerated structural optimization for stochastic electronic struc-
|
732 |
+
ture theories,” The Journal of Chemical Physics 156, 054104 (2022),
|
733 |
+
https://doi.org/10.1063/5.0079046.
|
734 |
+
23H. Shin, J. T. Krogel, K. Gasperich, P. R. C. Kent, A. Benali,
|
735 |
+
and
|
736 |
+
O. Heinonen, “Optimized structure and electronic band gap of mono-
|
737 |
+
layer GeSe from Quantum Monte Carlo methods,” Phys. Rev. Materials
|
738 |
+
5, 024002 (2021).
|
739 |
+
24D. Staros, G. Hu, J. Tiihonen, R. Nanguneri, J. Krogel, M. C. Bennett,
|
740 |
+
O. Heinonen, P. Ganesh, and B. Rubenstein, “A combined first principles
|
741 |
+
study of the structural, magnetic, and phonon properties of monolayer cri3,”
|
742 |
+
The Journal of Chemical Physics, The Journal of Chemical Physics 156,
|
743 |
+
014707 (2021).
|
744 |
+
25P. Hohenberg and W. Kohn, “Inhomogeneous electron gas,” Phys. Rev. 136,
|
745 |
+
B864–B871 (1964).
|
746 |
+
26J. P. Perdew, K. Burke, and M. Ernzerhof, “Generalized gradient approxi-
|
747 |
+
mation made simple,” Phys. Rev. Lett. 77, 3865–3868 (1996).
|
748 |
+
27J. Sun, A. Ruzsinszky, and J. P. Perdew, “Strongly constrained and appro-
|
749 |
+
priately normed semilocal density functional,” Phys. Rev. Lett. 115, 036402
|
750 |
+
(2015).
|
751 |
+
28S. Grimme, “Semiempirical GGA-type density functional constructed with
|
752 |
+
a long-range dispersion correction,” Journal of Computational Chemistry
|
753 |
+
27, 1787–1799 (2006).
|
754 |
+
29S. Grimme,
|
755 |
+
S. Ehrlich,
|
756 |
+
and L. Goerigk, “Effect of the damp-
|
757 |
+
ing
|
758 |
+
function
|
759 |
+
in
|
760 |
+
dispersion
|
761 |
+
corrected
|
762 |
+
density
|
763 |
+
functional
|
764 |
+
theory,”
|
765 |
+
Journal
|
766 |
+
of
|
767 |
+
Computational
|
768 |
+
Chemistry
|
769 |
+
32,
|
770 |
+
1456–1465
|
771 |
+
(2011),
|
772 |
+
https://onlinelibrary.wiley.com/doi/pdf/10.1002/jcc.21759.
|
773 |
+
30H. Peng, Z.-H. Yang, J. P. Perdew, and J. Sun, “Versatile van der Waals
|
774 |
+
density functional based on a meta-generalized gradient approximation,”
|
775 |
+
Phys. Rev. X 6, 041005 (2016).
|
776 |
+
31Y. Kwon, D. M. Ceperley, and R. M. Martin, “Effects of three-body and
|
777 |
+
backflow correlations in the two-dimensional electron gas,” Phys. Rev. B
|
778 |
+
48, 12037–12046 (1993).
|
779 |
+
32Y. Kwon, D. M. Ceperley, and R. M. Martin, “Effects of backflow corre-
|
780 |
+
lation in the three-dimensional electron gas: Quantum monte carlo study,”
|
781 |
+
Phys. Rev. B 58, 6800–6806 (1998).
|
782 |
+
33P. López Ríos, A. Ma, N. D. Drummond, M. D. Towler, and R. J. Needs,
|
783 |
+
“Inhomogeneous backflow transformations in quantum monte carlo calcu-
|
784 |
+
lations,” Phys. Rev. E 74, 066701 (2006).
|
785 |
+
34M. Bajdich, M. L. Tiago, R. Q. Hood, P. R. C. Kent, and F. A. Reboredo,
|
786 |
+
“Systematic reduction of sign errors in many-body calculations of atoms
|
787 |
+
and molecules,” Phys. Rev. Lett. 104, 193001 (2010).
|
788 |
+
35T. Ichibha, A. L. Dzubak, J. T. Krogel, V. R. Cooper, and F. A. Reboredo,
|
789 |
+
“cri3 revisited with a many-body ab initio theoretical approach,” Phys. Rev.
|
790 |
+
Materials 5, 064006 (2021).
|
791 |
+
36K. Saritas, J. T. Krogel, S. Okamoto, H. N. Lee, and F. A. Reboredo, “Struc-
|
792 |
+
tural, electronic, and magnetic properties of bulk and epitaxial LaCoO3
|
793 |
+
through Diffusion Monte Carlo,” Phys. Rev. Materials 3, 124414 (2019).
|
794 |
+
37K. Saritas, J. T. Krogel, P. R. C. Kent, and F. A. Reboredo, “Diffusion
|
795 |
+
monte carlo: A pathway towards an accurate theoretical description of man-
|
796 |
+
ganese oxides,” Phys. Rev. Materials 2, 085801 (2018).
|
797 |
+
38G. Chen, S. T. Howard, A. B. Maghirang, K. Nguyen Cong, R. A. B.
|
798 |
+
Villaos, L.-Y. Feng, K. Cai, S. C. Ganguli, W. Swiech, E. Morosan, I. I.
|
799 |
+
Oleynik, F.-C. Chuang, H. Lin, and V. Madhavan, “Correlating structural,
|
800 |
+
electronic, and magnetic properties of epitaxial Vse2 thin films,” Phys. Rev.
|
801 |
+
B 102, 115149 (2020).
|
802 |
+
39Z.-L. Liu, X. Wu, Y. Shao, J. Qi, Y. Cao, L. Huang, C. Liu, J.-O. Wang,
|
803 |
+
Q. Zheng, Z.-L. Zhu, K. Ibrahim, Y.-L. Wang, and H.-J. Gao, “Epitaxially
|
804 |
+
grown monolayer vse2: an air-stable magnetic two-dimensional material
|
805 |
+
with low work function at edges,” Science Bulletin 63, 419–425 (2018).
|
806 |
+
40A. Karn, Y. H. Chan, U. Chazarin, P. Chen, and W. W. Pai, “Modification
|
807 |
+
of monolayer 1t-vse2 by selective deposition of vanadium and tellurium,”
|
808 |
+
AIP Advances 12, 035240 (2022), https://doi.org/10.1063/6.0001402.
|
809 |
+
41D. Song, Y. Zhou, M. Zhang, X. He, and X. Li, “Structural and transport
|
810 |
+
properties of 1t-vse2 single crystal under high pressures,” Frontiers in Ma-
|
811 |
+
terials 8 (2021), 10.3389/fmats.2021.710849.
|
812 |
+
|
813 |
+
Supporting Information: A Quantum Monte
|
814 |
+
Carlo study of the structural, energetic, and
|
815 |
+
magnetic properties of two-dimensional (2D) H
|
816 |
+
and T phase VSe2
|
817 |
+
Daniel Wines,∗,† Juha Tiihonen,‡ Kayahan Saritas,¶ Jaron Krogel,§ and Can
|
818 |
+
Ataca∗,∥
|
819 |
+
†Materials Science and Engineering Division, National Institute of Standards and
|
820 |
+
Technology (NIST), Gaithersburg, MD 20899
|
821 |
+
‡Department of Physics, Nanoscience Center, University of Jyv¨askyl¨a, P.O. Box 35,
|
822 |
+
Finland
|
823 |
+
¶ Department of Applied Physics, Yale University, New Haven CT 06520
|
824 |
+
§ Material Science and Technology Division, Oak Ridge National Laboratory, Oak Ridge,
|
825 |
+
Tennessee 37831
|
826 |
+
∥Department of Physics, University of Maryland Baltimore County, Baltimore MD 21250
|
827 |
+
E-mail: [email protected]; [email protected]
|
828 |
+
Computational Methods
|
829 |
+
Density functional theory (DFT) benchmarks for the T and H phase of 2D VSe2 were per-
|
830 |
+
formed using the Vienna Ab initio Simulation Package (VASP) code with projector aug-
|
831 |
+
mented wave (PAW) pseudopotentials.1,2 For these calculations, the local density approxi-
|
832 |
+
S1
|
833 |
+
arXiv:2301.11404v1 [cond-mat.str-el] 26 Jan 2023
|
834 |
+
|
835 |
+
mation (LDA),3 Perdew-Burke-Ernzerhof (PBE),4 and strongly constrained and appropri-
|
836 |
+
ately normed (SCAN)5 meta-GGA functionals were used with the added Hubbard correction
|
837 |
+
(U)6 to treat the on-site Coulomb interaction of the 3d orbitals of the V atoms. At least 20
|
838 |
+
˚A of vacuum was given between periodic layers of VSe2 in the c-direction. In addition, we
|
839 |
+
used a reciprocal grid of 24x24x1 and a kinetic energy cutoff of 400 eV.
|
840 |
+
Our Quantum Monte Carlo (QMC) simulations used DFT-PBE to generate the trial
|
841 |
+
wavefunction for fixed-node diffusion Monte Carlo (DMC) calculations.
|
842 |
+
The Quantum
|
843 |
+
Espresso (QE)7 code was used for our DFT calculations to create the trial wavefunction.
|
844 |
+
This trial wavefunction was created for the ferromagnetic configuration of 2D VSe2 using
|
845 |
+
different U values with the goal of variationally determining the optimal nodal surface (U
|
846 |
+
value that yields the lowest total energy). For V, we used norm-conserving (NC) RRKJ
|
847 |
+
(OPT) pseudopotentials8 and for Se, we used NC Burkatzki-Fillipi-Dolg (BFD) pseudopo-
|
848 |
+
tentials.9 After testing at the DFT level, a kinetic energy cutoff of 4,080 eV (300 Ry) and
|
849 |
+
a k-grid of 6x6x1 was used (see Fig. S1 and S2) to generate trial wavefunctions for DMC.
|
850 |
+
To accelerate the line-search method convergence for the metallic T phase, we increased the
|
851 |
+
k-grid to 12x12x1.
|
852 |
+
After the trial wavefunction was generated with DFT, Variational Monte Carlo (VMC)
|
853 |
+
and DMC10,11 calculations were performed using the QMCPACK12,13 code. The single de-
|
854 |
+
terminant DFT wavefunction is converted into a many-body wavefunction by use of the Jas-
|
855 |
+
trow parameters,14,15 which assist in modeling electron correlation with the goal of reducing
|
856 |
+
the statistical uncertainty in DMC calculations.16,17 Up to two-body Jastrow18 correlation
|
857 |
+
functions were included, where the linear method19 was used to minimize the variance and
|
858 |
+
energy of the VMC energies. The cost function of the variance optimization is 100 % vari-
|
859 |
+
ance minimization and the cost function of the energy optimization is split as 95 % energy
|
860 |
+
minimization and 5 % variance minimization, which has been proven to reduce the uncer-
|
861 |
+
tainty of DMC calculated results.16 The Nexus20 software suite was used to automate the
|
862 |
+
DFT-VMC-DMC workflow. The locality approximation17 was used to evaluate the nonlocal
|
863 |
+
S2
|
864 |
+
|
865 |
+
part of the pseudopotentials in DMC and an optimal timestep of 0.01 Ha−1 was determined
|
866 |
+
for DMC simulations due to the fact that it yielded an acceptance ratio greater than 99 %
|
867 |
+
(see Table S1). A full summary of the VMC and DMC methods can be found in reference.10
|
868 |
+
The total charge density and spin density was extracted from our DMC calculations.
|
869 |
+
The spin density is defined as the difference between the spin-up contribution to the total
|
870 |
+
charge density and the spin-down contribution to the total charge density (ρup − ρdown). We
|
871 |
+
used an extrapolation scheme on the DMC charge densities with the goal of eliminating the
|
872 |
+
bias that occurs from using a mixed estimator. Since the charge density estimator does not
|
873 |
+
commute with the fixed-node Hamiltonian, the DMC charge density was obtained from a
|
874 |
+
mixed estimator between the pure fixed-node DMC and VMC densities. The extrapolation
|
875 |
+
formula takes the form:10
|
876 |
+
ρ1 = 2ρDMC − ρVMC + O[(Φ − ΨT)2]
|
877 |
+
(1)
|
878 |
+
where ρDMC and ρVMC are the DMC and VMC charge densities respectively. Φ is the trial
|
879 |
+
wavefunction from the DMC Hamiltonian and ΨT is the trial wavefunction from VMC.
|
880 |
+
In addition, we integrated the DFT+U and DMC spin densities up to a cutoff radius
|
881 |
+
rcut (which we define as 1.34 ˚A , due to the fact that it is approximately half of the V-Se
|
882 |
+
bond distance in 2D T and H-VSe2) in order to estimate the site-averaged atomic magnetic
|
883 |
+
moment per V and Se. To obtain these magnetic moments per atom (MA), we sum over the
|
884 |
+
spherically interpolated spin densities:
|
885 |
+
MA = 4π
|
886 |
+
� rcut
|
887 |
+
0
|
888 |
+
r2ρs(r)dr ≈ 4π
|
889 |
+
rcut/∆r
|
890 |
+
�
|
891 |
+
i=0
|
892 |
+
r2
|
893 |
+
i ρs(ri)∆r
|
894 |
+
(2)
|
895 |
+
where ri is the distance from the center of the atom to a given point on the grid and ∆r is
|
896 |
+
the radial grid size.
|
897 |
+
To optimize the structural parameters of both T and H-VSe2 according to the DMC po-
|
898 |
+
tential energy surface (PES), we use a surrogate Hessian accelerated optimization method.21
|
899 |
+
S3
|
900 |
+
|
901 |
+
In the method, we consider the PES around equilibrium as the second-order expansion in
|
902 |
+
Wyckoff parameter space, p:
|
903 |
+
E(p) = E0 + 1
|
904 |
+
2(p − p0)THp(p − p0),
|
905 |
+
(3)
|
906 |
+
where Hp is the Hessian, or the force-constant matrix, E0 is the energy minimum and p0
|
907 |
+
the energy-minimizing parameters. Diagonalizing the parameter Hessian, i.e., Hp = U TΛU,
|
908 |
+
forms an optimal basis for a conjugate line-search in the parameter space, namely the eigen-
|
909 |
+
vectors U. The line-searches along U can be conducted in parallel, and ideally, they locate
|
910 |
+
the minimum in just one parallel iteration within the quadratic region. Here, we conduct
|
911 |
+
the line-search according to a set of 2 parameters: the lattice constant a and the Wyckoff
|
912 |
+
parameter z, which is the unsigned displacement of the Se atoms along the z axis (see Fig.
|
913 |
+
1). For reporting purposes, the line-search parameters a and z are remapped to a and d,
|
914 |
+
where d is the V-Se distance.
|
915 |
+
In the surrogate Hessian scheme, we obtain a cheap but relatively accurate Hessian from
|
916 |
+
DFT, and use it to the inform line-search on the DMC PES, in particular by providing the
|
917 |
+
search directions. We also resample the DFT PES to predict fitting errors. Thus, we may
|
918 |
+
minimize the computational cost of the DMC runs, while maintaining an error tolerance.
|
919 |
+
The surrogate DFT PES was based on QE with a 4,080 eV (300 Ry) cutoff using PBE with
|
920 |
+
no DFT+U correction. The DMC PES was based on DFT-PBE with U = 2 eV orbitals
|
921 |
+
and finite-size extrapolation through supercell sizes of 9 and 24 atoms. Each line-search was
|
922 |
+
based on a 3rd order polynomial fit and set to contain 7 points, or displaced geometries,
|
923 |
+
totaling 13 energy evaluations per phase, per iteration. However, alternative techniques,
|
924 |
+
including (bi)polynomial fitting, were used in some parts to incorporate auxiliary DMC
|
925 |
+
data and ensure convergence to the quadratic region. Effectively, two parallel line-search
|
926 |
+
iterations for both phases were carried out, and the convergence was claimed in the absence
|
927 |
+
of significant displacements.
|
928 |
+
S4
|
929 |
+
|
930 |
+
a)
|
931 |
+
b)
|
932 |
+
Figure S1: The total energy per atom of the unit cell (3 atoms) of 2D a) T-VSe2 and b)
|
933 |
+
H-VSe2 as a function of plane wave cutoff energy for the norm-conserving pseudopotentials
|
934 |
+
calculated with DFT using the PBE functional at a k-point grid of 6x6x1. The results show
|
935 |
+
a converged cutoff energy of 4,080 eV (300 Ry) for both phases.
|
936 |
+
a)
|
937 |
+
b)
|
938 |
+
Figure S2: The total energy per atom of the unit cell (3 atoms) of 2D a) T-VSe2 and b)
|
939 |
+
H-VSe2 as a function of K-point grid for the norm-conserving pseudopotentials calculated
|
940 |
+
with DFT (PBE) at the converged cutoff energy (see Fig. S1). The results show a converged
|
941 |
+
k-point grid of 6x6x1 (36) for both monolayers. The number of K-points was scaled appro-
|
942 |
+
priately to obtain the converged grid depending on the supercell size and shape for all DFT
|
943 |
+
and DMC calculations.
|
944 |
+
S5
|
945 |
+
|
946 |
+
PBE (U = 0)
|
947 |
+
PBE (U = 2)
|
948 |
+
SCAN (U = 0)
|
949 |
+
SCAN (U = 2)
|
950 |
+
3.15
|
951 |
+
3.25
|
952 |
+
3.35
|
953 |
+
3.45
|
954 |
+
3.55
|
955 |
+
3.15
|
956 |
+
3.25
|
957 |
+
3.35
|
958 |
+
3.45
|
959 |
+
3.55
|
960 |
+
3.15
|
961 |
+
3.25
|
962 |
+
3.35
|
963 |
+
3.45
|
964 |
+
3.55
|
965 |
+
3.15
|
966 |
+
3.25
|
967 |
+
3.35
|
968 |
+
3.45
|
969 |
+
3.55
|
970 |
+
-18.04
|
971 |
+
-18.00
|
972 |
+
-17.96
|
973 |
+
-17.92
|
974 |
+
-17.88
|
975 |
+
-17.84
|
976 |
+
-16.00
|
977 |
+
-15.95
|
978 |
+
-15.90
|
979 |
+
-15.85
|
980 |
+
-15.80
|
981 |
+
-15.65
|
982 |
+
-15.75
|
983 |
+
-15.70
|
984 |
+
-59.90
|
985 |
+
-59.85
|
986 |
+
-59.80
|
987 |
+
-59.75
|
988 |
+
-58.70
|
989 |
+
-59.65
|
990 |
+
-58.10
|
991 |
+
-58.00
|
992 |
+
-57.90
|
993 |
+
-57.80
|
994 |
+
-57.70
|
995 |
+
-57.60
|
996 |
+
Total Energy (eV)
|
997 |
+
Total Energy (eV)
|
998 |
+
Total Energy (eV)
|
999 |
+
Total Energy (eV)
|
1000 |
+
Lattice Constant (Å)
|
1001 |
+
Lattice Constant (Å)
|
1002 |
+
Lattice Constant (Å)
|
1003 |
+
Lattice Constant (Å)
|
1004 |
+
3.05
|
1005 |
+
3.15
|
1006 |
+
3.25
|
1007 |
+
3.35
|
1008 |
+
-18.04
|
1009 |
+
-18.00
|
1010 |
+
-17.90
|
1011 |
+
-17.80
|
1012 |
+
Total Energy (eV)
|
1013 |
+
Lattice Constant (Å)
|
1014 |
+
LDA (U = 0)
|
1015 |
+
3.05
|
1016 |
+
3.15
|
1017 |
+
3.25
|
1018 |
+
3.35
|
1019 |
+
Lattice Constant (Å)
|
1020 |
+
LDA (U = 2)
|
1021 |
+
-20.30
|
1022 |
+
-20.20
|
1023 |
+
-20.15
|
1024 |
+
-20.10
|
1025 |
+
Total Energy (eV)
|
1026 |
+
-20.25
|
1027 |
+
-20.05
|
1028 |
+
T
|
1029 |
+
H
|
1030 |
+
Figure S3: Total energy as a function of lattice strain for T (blue) and H (red) phase 2D
|
1031 |
+
VSe2, calculated with various functionals and U values. Density functionals include LDA,
|
1032 |
+
PBE, and SCAN.
|
1033 |
+
S6
|
1034 |
+
|
1035 |
+
Table S1: Tabulated results for the DMC timestep convergence of a 12 atom cell of 2D
|
1036 |
+
T-VSe2 and H-VSe2. The acceptance ratio of 0.99 indicates that 0.01 Ha−1 is an appropriate
|
1037 |
+
timestep to use for all subsequent DMC simulations.
|
1038 |
+
T-VSe2
|
1039 |
+
Timestep (Ha−1)
|
1040 |
+
DMC Total Energy (Ha)
|
1041 |
+
Error (Ha)
|
1042 |
+
Acceptance Ratio
|
1043 |
+
0.02
|
1044 |
+
-361.730
|
1045 |
+
0.001
|
1046 |
+
0.985
|
1047 |
+
0.01
|
1048 |
+
-361.709
|
1049 |
+
0.002
|
1050 |
+
0.994
|
1051 |
+
0.005
|
1052 |
+
-361.709
|
1053 |
+
0.003
|
1054 |
+
0.997
|
1055 |
+
0.002
|
1056 |
+
-361.702
|
1057 |
+
0.002
|
1058 |
+
0.999
|
1059 |
+
H-VSe2
|
1060 |
+
Timestep (Ha−1)
|
1061 |
+
DMC Total Energy (Ha)
|
1062 |
+
Error (Ha)
|
1063 |
+
Acceptance Ratio
|
1064 |
+
0.02
|
1065 |
+
-361.673
|
1066 |
+
0.001
|
1067 |
+
0.985
|
1068 |
+
0.01
|
1069 |
+
-361.657
|
1070 |
+
0.002
|
1071 |
+
0.994
|
1072 |
+
0.005
|
1073 |
+
-361.654
|
1074 |
+
0.002
|
1075 |
+
0.998
|
1076 |
+
0.002
|
1077 |
+
-361.657
|
1078 |
+
0.003
|
1079 |
+
0.999
|
1080 |
+
1
|
1081 |
+
2
|
1082 |
+
3
|
1083 |
+
4
|
1084 |
+
U (eV)
|
1085 |
+
-2460.30
|
1086 |
+
-2460.25
|
1087 |
+
-2460.20
|
1088 |
+
-2460.15
|
1089 |
+
-2460.10
|
1090 |
+
-2460.05
|
1091 |
+
-2460.00
|
1092 |
+
-2459.95
|
1093 |
+
Total Energy (eV/f.u.)
|
1094 |
+
T
|
1095 |
+
H
|
1096 |
+
Figure S4: DMC calculated total energies of a 24-atom supercell (normalized per formula
|
1097 |
+
unit (f.u.))
|
1098 |
+
of 2D T (blue) and H (red) phase VSe2 calculated as a function of the U
|
1099 |
+
parameter used to variationally determine the optimal trial wave function. The DMC error
|
1100 |
+
bars represent the standard error about the mean.
|
1101 |
+
S7
|
1102 |
+
|
1103 |
+
1.0
|
1104 |
+
1.5
|
1105 |
+
2.0
|
1106 |
+
2.5
|
1107 |
+
3.0
|
1108 |
+
−0.02
|
1109 |
+
−0.01
|
1110 |
+
0.00
|
1111 |
+
0.01
|
1112 |
+
a (˚A )
|
1113 |
+
1.0
|
1114 |
+
1.5
|
1115 |
+
2.0
|
1116 |
+
2.5
|
1117 |
+
3.0
|
1118 |
+
−0.010
|
1119 |
+
−0.005
|
1120 |
+
0.000
|
1121 |
+
0.005
|
1122 |
+
0.010
|
1123 |
+
z (˚A )
|
1124 |
+
1
|
1125 |
+
2
|
1126 |
+
3
|
1127 |
+
Iteration
|
1128 |
+
-2459.55
|
1129 |
+
-2459.6
|
1130 |
+
-2459.65
|
1131 |
+
-2459.7
|
1132 |
+
E/f.u. (eV )
|
1133 |
+
T
|
1134 |
+
H
|
1135 |
+
Figure S5: The convergence of the a and z parameters and DMC energies per f.u. for both
|
1136 |
+
T (blue) and H (red) phase of 2D VSe2 based on parallel line-search iterations along the
|
1137 |
+
DMC PES. The starting parameters (iteration 1) are from DFT, the zero offset is the mean
|
1138 |
+
over iterations 2 and 3, and dotted lines indicate the error tolerances for each case (95 %
|
1139 |
+
confidence). The DMC energies from respective equilibrium geometries are plotted with
|
1140 |
+
1SEM (one standard error of the mean) uncertainties, with extra squares marking energies
|
1141 |
+
from the predicted minimum geometry.
|
1142 |
+
S8
|
1143 |
+
|
1144 |
+
3.00
|
1145 |
+
3.25
|
1146 |
+
3.50
|
1147 |
+
3.75
|
1148 |
+
4.00
|
1149 |
+
Lattice constant (˚A )
|
1150 |
+
1.0
|
1151 |
+
1.2
|
1152 |
+
1.4
|
1153 |
+
1.6
|
1154 |
+
1.8
|
1155 |
+
2.0
|
1156 |
+
Z-offset (˚A )
|
1157 |
+
LS eqm .
|
1158 |
+
Fit eqm .
|
1159 |
+
LS # 0
|
1160 |
+
LS # 1
|
1161 |
+
3.00
|
1162 |
+
3.25
|
1163 |
+
3.50
|
1164 |
+
3.75
|
1165 |
+
4.00
|
1166 |
+
Lattice constant (˚A )
|
1167 |
+
0.00
|
1168 |
+
0.02
|
1169 |
+
0.04
|
1170 |
+
0.06
|
1171 |
+
0.08
|
1172 |
+
0.10
|
1173 |
+
Figure S6: Contour reconstructions of the DMC PESs (eV) of T (left) and H (right) phases of
|
1174 |
+
2D VSe2 with respect to a and z parameters. The contours are based on bicubic fits to sparse
|
1175 |
+
data, and thus, subject to biases and statistical uncertainties not indicated in the figures.
|
1176 |
+
The markers (’x’ and ’+’) indicate data points from two parallel line-search iterations.
|
1177 |
+
References
|
1178 |
+
(1) Kresse, G.; Furthm¨uller, J. Efficient iterative schemes for ab initio total-energy calcu-
|
1179 |
+
lations using a plane-wave basis set. Phys. Rev. B 1996, 54, 11169–11186.
|
1180 |
+
(2) Kresse, G.; Joubert, D. From ultrasoft pseudopotentials to the projector augmented-
|
1181 |
+
wave method. Phys. Rev. B 1999, 59, 1758–1775.
|
1182 |
+
(3) Hohenberg, P.; Kohn, W. Inhomogeneous Electron Gas. Phys. Rev. 1964, 136, B864–
|
1183 |
+
B871.
|
1184 |
+
(4) Perdew, J. P.; Burke, K.; Ernzerhof, M. Generalized Gradient Approximation Made
|
1185 |
+
Simple. Phys. Rev. Lett. 1996, 77, 3865–3868.
|
1186 |
+
(5) Sun, J.; Ruzsinszky, A.; Perdew, J. P. Strongly Constrained and Appropriately Normed
|
1187 |
+
Semilocal Density Functional. Phys. Rev. Lett. 2015, 115, 036402.
|
1188 |
+
(6) Dudarev, S. L.; Botton, G. A.; Savrasov, S. Y.; Humphreys, C. J.; Sutton, A. P.
|
1189 |
+
S9
|
1190 |
+
|
1191 |
+
Electron-energy-loss spectra and the structural stability of nickel oxide: An LSDA+U
|
1192 |
+
study. Phys. Rev. B 1998, 57, 1505–1509.
|
1193 |
+
(7) Giannozzi, P. et al. QUANTUM ESPRESSO: a modular and open-source software
|
1194 |
+
project for quantum simulations of materials. Journal of Physics: Condensed Matter
|
1195 |
+
2009, 21, 395502.
|
1196 |
+
(8) Krogel, J. T.; Santana, J. A.; Reboredo, F. A. Pseudopotentials for quantum Monte
|
1197 |
+
Carlo studies of transition metal oxides. Phys. Rev. B 2016, 93, 075143.
|
1198 |
+
(9) Burkatzki, M.; Filippi, C.; Dolg, M. Energy-consistent pseudopotentials for Quantum
|
1199 |
+
Monte Carlo calculations. The Journal of Chemical Physics 2007, 126, 234105.
|
1200 |
+
(10) Foulkes, W. M. C.; Mitas, L.; Needs, R. J.; Rajagopal, G. Quantum Monte Carlo
|
1201 |
+
simulations of solids. Rev. Mod. Phys. 2001, 73, 33–83.
|
1202 |
+
(11) Needs, R. J.; Towler, M. D.; Drummond, N. D.; R´ıos, P. L. Continuum Variational and
|
1203 |
+
Diffusion Quantum Monte Carlo calculations. Journal of Physics: Condensed Matter
|
1204 |
+
2009, 22, 023201.
|
1205 |
+
(12) Kim, J. et al. QMCPACK: an open source ab initio quantum Monte Carlo package for
|
1206 |
+
the electronic structure of atoms, molecules and solids. Journal of Physics: Condensed
|
1207 |
+
Matter 2018, 30, 195901.
|
1208 |
+
(13) Kent, P. R. C. et al. QMCPACK: Advances in the development, efficiency, and applica-
|
1209 |
+
tion of auxiliary field and real-space Variational and Diffusion Quantum Monte Carlo.
|
1210 |
+
The Journal of Chemical Physics 2020, 152, 174105.
|
1211 |
+
(14) Slater, J. C. The Theory of Complex Spectra. Phys. Rev. 1929, 34, 1293–1322.
|
1212 |
+
(15) Jastrow, R. Many-Body Problem with Strong Forces. Phys. Rev. 1955, 98, 1479–1484.
|
1213 |
+
(16) Umrigar, C. J.; Filippi, C. Energy and Variance Optimization of Many-Body Wave
|
1214 |
+
Functions. Phys. Rev. Lett. 2005, 94, 150201.
|
1215 |
+
S10
|
1216 |
+
|
1217 |
+
(17) Mitas, L.; Shirley, E. L.; Ceperley, D. M. Nonlocal pseudopotentials and Diffusion
|
1218 |
+
Monte Carlo. The Journal of Chemical Physics 1991, 95, 3467–3475.
|
1219 |
+
(18) Drummond, N. D.; Towler, M. D.; Needs, R. J. Jastrow correlation factor for atoms,
|
1220 |
+
molecules, and solids. Phys. Rev. B 2004, 70, 235119.
|
1221 |
+
(19) Umrigar, C. J.; Toulouse, J.; Filippi, C.; Sorella, S.; Hennig, R. G. Alleviation of the
|
1222 |
+
Fermion-Sign Problem by Optimization of Many-Body Wave Functions. Phys. Rev.
|
1223 |
+
Lett. 2007, 98, 110201.
|
1224 |
+
(20) Krogel, J. T. Nexus: A modular workflow management system for quantum simulation
|
1225 |
+
codes. Computer Physics Communications 2016, 198, 154 – 168.
|
1226 |
+
(21) Tiihonen, J.; Kent, P. R. C.; Krogel, J. T. Surrogate Hessian accelerated structural op-
|
1227 |
+
timization for stochastic electronic structure theories. The Journal of Chemical Physics
|
1228 |
+
2022, 156, 054104.
|
1229 |
+
S11
|
1230 |
+
|
-dFIT4oBgHgl3EQf9CsF/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
-tFAT4oBgHgl3EQfqR1A/content/2301.08646v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21ba3ea900e55c140223fefc32418adfc43e6b4c79787398b90fcb69a3b0d4a3
|
3 |
+
size 272164
|
-tFAT4oBgHgl3EQfqR1A/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d984db5efd8099db3d817fde91c01f370290a104be25fd20c20d85af2f7ca792
|
3 |
+
size 30864
|
.gitattributes
CHANGED
@@ -2633,3 +2633,58 @@ X9FRT4oBgHgl3EQfODd9/content/2301.13512v1.pdf filter=lfs diff=lfs merge=lfs -tex
|
|
2633 |
3dE4T4oBgHgl3EQfbAyT/content/2301.05069v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2634 |
n9E5T4oBgHgl3EQfjw-t/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2635 |
M9E4T4oBgHgl3EQf9A55/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2633 |
3dE4T4oBgHgl3EQfbAyT/content/2301.05069v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2634 |
n9E5T4oBgHgl3EQfjw-t/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2635 |
M9E4T4oBgHgl3EQf9A55/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2636 |
+
8dAzT4oBgHgl3EQfSPuZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2637 |
+
2NAzT4oBgHgl3EQfuP1K/content/2301.01687v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2638 |
+
ztE0T4oBgHgl3EQftwFw/content/2301.02596v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2639 |
+
vNAzT4oBgHgl3EQfdPyu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2640 |
+
S9AzT4oBgHgl3EQfJfsf/content/2301.01079v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2641 |
+
xtFIT4oBgHgl3EQf0StA/content/2301.11368v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2642 |
+
itAzT4oBgHgl3EQfpP1U/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2643 |
+
XtE3T4oBgHgl3EQfcAoE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2644 |
+
rtE0T4oBgHgl3EQfawAZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2645 |
+
OdFIT4oBgHgl3EQfeCsB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2646 |
+
XtE3T4oBgHgl3EQfcAoE/content/2301.04520v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2647 |
+
2NE4T4oBgHgl3EQfzw00/content/2301.05276v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2648 |
+
39AyT4oBgHgl3EQfcPct/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2649 |
+
3dE4T4oBgHgl3EQfbAyT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2650 |
+
8dAzT4oBgHgl3EQfSPuZ/content/2301.01230v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2651 |
+
YNFOT4oBgHgl3EQf9jQ2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2652 |
+
79AzT4oBgHgl3EQfE_pK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2653 |
+
69FJT4oBgHgl3EQfmCy-/content/2301.11586v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2654 |
+
ItAzT4oBgHgl3EQfVPwp/content/2301.01279v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2655 |
+
OdFIT4oBgHgl3EQfeCsB/content/2301.11272v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2656 |
+
kNE3T4oBgHgl3EQfJglg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2657 |
+
xtFIT4oBgHgl3EQf0StA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2658 |
+
mdE1T4oBgHgl3EQfhATk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2659 |
+
i9E4T4oBgHgl3EQfSwzD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2660 |
+
79AzT4oBgHgl3EQfE_pK/content/2301.01002v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2661 |
+
FdFKT4oBgHgl3EQfay5f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2662 |
+
_9AzT4oBgHgl3EQfFvrv/content/2301.01018v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2663 |
+
8NE4T4oBgHgl3EQf2w06/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2664 |
+
stE3T4oBgHgl3EQf9Quo/content/2301.04815v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2665 |
+
_9AzT4oBgHgl3EQfFvrv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2666 |
+
69E3T4oBgHgl3EQfRQm6/content/2301.04420v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2667 |
+
XtE1T4oBgHgl3EQfcAQQ/content/2301.03178v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2668 |
+
ZNAzT4oBgHgl3EQf2P6Y/content/2301.01812v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2669 |
+
69FJT4oBgHgl3EQfmCy-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2670 |
+
39E1T4oBgHgl3EQf6AV3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2671 |
+
HdE5T4oBgHgl3EQfWQ_B/content/2301.05558v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2672 |
+
-9E4T4oBgHgl3EQfEAsg/content/2301.04873v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2673 |
+
HdE5T4oBgHgl3EQfWQ_B/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2674 |
+
39E1T4oBgHgl3EQf6AV3/content/2301.03518v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2675 |
+
KNAyT4oBgHgl3EQf6Ppp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2676 |
+
Q9FPT4oBgHgl3EQfpTVX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2677 |
+
JtE3T4oBgHgl3EQfXgpN/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2678 |
+
ZNAzT4oBgHgl3EQf2P6Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2679 |
+
69E3T4oBgHgl3EQfRQm6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2680 |
+
eNA0T4oBgHgl3EQfHP9d/content/2301.02058v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2681 |
+
vdAyT4oBgHgl3EQfafdn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2682 |
+
FdFKT4oBgHgl3EQfay5f/content/2301.11809v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2683 |
+
u9AyT4oBgHgl3EQfafcR/content/2301.00241v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2684 |
+
Q9FPT4oBgHgl3EQfpTVX/content/2301.13137v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2685 |
+
-tFAT4oBgHgl3EQfqR1A/content/2301.08646v1.pdf filter=lfs diff=lfs merge=lfs -text
|
2686 |
+
ItAzT4oBgHgl3EQfVPwp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2687 |
+
-9E4T4oBgHgl3EQfEAsg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2688 |
+
XtE2T4oBgHgl3EQfYgfk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2689 |
+
eNA0T4oBgHgl3EQfHP9d/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
|
2690 |
+
G9A0T4oBgHgl3EQfBv-A/content/2301.01980v1.pdf filter=lfs diff=lfs merge=lfs -text
|
0tE2T4oBgHgl3EQfiQfL/content/tmp_files/2301.03957v1.pdf.txt
ADDED
@@ -0,0 +1,894 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
AI based approach to Trailer Generation for Online
|
2 |
+
Educational Courses
|
3 |
+
1st Prakhar Mishra
|
4 |
+
IIIT
|
5 |
+
Bangalore, India
|
6 | |
7 |
+
2nd Chaitali Diwan
|
8 |
+
IIIT
|
9 |
+
Bangalore, India
|
10 | |
11 |
+
3rd Srinath Srinivasa
|
12 |
+
IIIT
|
13 |
+
Bangalore, India
|
14 | |
15 |
+
4th G. Srinivasaraghavan
|
16 |
+
IIIT
|
17 |
+
Bangalore, India
|
18 | |
19 |
+
Abstract—In this paper, we propose an AI based approach
|
20 |
+
to Trailer Generation in the form of short videos for online
|
21 |
+
educational courses. Trailers give an overview of the course to
|
22 |
+
the learners and help them make an informed choice about the
|
23 |
+
courses they want to learn. It also helps to generate curiosity
|
24 |
+
and interest among the learners and encourages them to pursue
|
25 |
+
a course. While it is possible to manually generate the trailers, it
|
26 |
+
requires extensive human efforts and skills over a broad spectrum
|
27 |
+
of design, span selection, video editing, domain knowledge, etc.,
|
28 |
+
thus making it time-consuming and expensive, especially in an
|
29 |
+
academic setting. The framework we propose in this work is a
|
30 |
+
template based method for video trailer generation, where most of
|
31 |
+
the textual content of the trailer is auto-generated and the trailer
|
32 |
+
video is automatically generated, by leveraging Machine Learning
|
33 |
+
and Natural Language Processing techniques. The proposed
|
34 |
+
trailer is in the form of a timeline consisting of various frag-
|
35 |
+
ments created by selecting, para-phrasing or generating content
|
36 |
+
using various proposed techniques. The fragments are further
|
37 |
+
enhanced by adding voice-over text, subtitles, animations, etc., to
|
38 |
+
create a holistic experience. Finally, we perform user evaluation
|
39 |
+
with 63 human evaluators for evaluating the trailers generated
|
40 |
+
by our system and the results obtained were encouraging.
|
41 |
+
Index Terms—Video Trailer Generation, Machine Learning,
|
42 |
+
Natural Language Processing
|
43 |
+
I. INTRODUCTION
|
44 |
+
The growth of the internet has significantly increased the
|
45 |
+
amount of free instructional content. These resources are
|
46 |
+
offered not only by big institutions but also by individual
|
47 |
+
content creators over various platforms such as Coursera,
|
48 |
+
Udemy, YouTube, etc. This increase in content production rate
|
49 |
+
has resulted in the creation of redundant courses and tutoring
|
50 |
+
videos for many topics over time. In spite of advantages
|
51 |
+
like on-demand accessibility, the abundance of options has
|
52 |
+
increased confusion and made it more challenging to select
|
53 |
+
a course that might be in line with learner’s interests. And
|
54 |
+
often, enrolling to a course that doesn’t meet the learner’s
|
55 |
+
expectations for a course’s curriculum and other aspects such
|
56 |
+
as expected level of commitment, the availability of support,
|
57 |
+
etc., causes the learner to lose motivation and eventually drop
|
58 |
+
the course. [1], [2].
|
59 |
+
This problem can be tackled to a certain extent by presenting
|
60 |
+
a video trailer to the learners before the start of the course
|
61 |
+
(learning pathway) to help them quickly glance through the
|
62 |
+
pathway and get an overall idea of the course content and its
|
63 |
+
format [3]–[5].
|
64 |
+
The idea of Trailers is not brand-new, and the film industry
|
65 |
+
has been using them extensively for a while. Trailers, in
|
66 |
+
context of movies are mostly about advertising. They notify
|
67 |
+
viewers about an upcoming movie while generating interest
|
68 |
+
among them. Often the effectiveness of a trailer affects the
|
69 |
+
perception of the movie, even before it is released publicly.
|
70 |
+
The course trailers serve a greater purpose in the educational
|
71 |
+
context than simple course promotion. Before beginning the
|
72 |
+
learning journey, they aid in helping learners set realistic
|
73 |
+
expectations for their learning outcomes and competency
|
74 |
+
mastery.
|
75 |
+
Concept of trailers might resemble with that of summariza-
|
76 |
+
tion [6]–[8], but apart from incorporating a few elements of
|
77 |
+
summarization like shortening and abstracting out information
|
78 |
+
from substantial sized input source, trailers are different in
|
79 |
+
terms of their motivation, purpose and the impact they cre-
|
80 |
+
ate on the end users. Unlike summaries, trailers need not
|
81 |
+
be complete in their coverage. Also, they are designed to
|
82 |
+
give glimpses of a few interesting segments of the narrative
|
83 |
+
without revealing the main plot or climax of the underlying
|
84 |
+
narrative [9]. Although there is no clear demarcation of what
|
85 |
+
a climax is in academic narratives, based on our analysis of
|
86 |
+
many academic course trailers in popular MOOCs (Massive
|
87 |
+
Open Online Courses) such as Udemy1 and Coursera2, we
|
88 |
+
see prevalence of a common pattern in trailer timelines. The
|
89 |
+
timeline starts with an introduction about the course and the
|
90 |
+
instructor and ends with a call-to-action (CTA) which offers
|
91 |
+
opportunity to the learners to take action or start the course.
|
92 |
+
In between, there are several elements and factoids about the
|
93 |
+
course and its contents, that aim to arouse viewer interest.
|
94 |
+
The current approach of generating trailers is manual,
|
95 |
+
cumbersome and time-consuming, it requires someone with
|
96 |
+
relevant skills like designing, video editing, and a subject
|
97 |
+
matter expert to help in curating the trailer content. Although,
|
98 |
+
there are software products like Apple iMovie3, Windows
|
99 |
+
Movie Maker4 and others that people can use for generating
|
100 |
+
trailers by performing basic editing like cuts, merging frames,
|
101 |
+
1https://www.udemy.com
|
102 |
+
2https://www.coursera.org
|
103 |
+
3https://www.apple.com/in/imovie
|
104 |
+
4https://www.microsoft.com/en-us/p/movie-maker-video-editor/
|
105 |
+
9mvfq4lmz6c9
|
106 |
+
arXiv:2301.03957v1 [cs.CL] 10 Jan 2023
|
107 |
+
|
108 |
+
Fig. 1. Trailer Structure
|
109 |
+
etc. Yet the content to be placed in the trailer has to be curated
|
110 |
+
entirely by a human expert.
|
111 |
+
In our work, we propose a semi-automatic template based
|
112 |
+
framework for generating video trailers for learning pathways,
|
113 |
+
which are a sequence of related educational documents of
|
114 |
+
various forms [10]–[12]. Here, most of the content that is
|
115 |
+
placed in the trailer is auto-generated with a scope for taking
|
116 |
+
inputs from the creator. The framework for trailer generation
|
117 |
+
consists of various essential trailer fragments arranged as
|
118 |
+
a timeline of the trailer. Each fragment is composed of a
|
119 |
+
sequence of frames that are coherent within themselves in
|
120 |
+
terms of the topical information they present. And inherently,
|
121 |
+
each frame is composed of various types of elements and their
|
122 |
+
properties like font size, text styling, image size, etc. Fig. 1
|
123 |
+
shows the illustration for the same.
|
124 |
+
Once all the elements are generated and placed at their
|
125 |
+
respective positions within a frame of a trailer fragment,
|
126 |
+
a template is applied to it. The template consists of the
|
127 |
+
multi-modal experiences such as voice-over, subtitles, sounds,
|
128 |
+
animations, etc. It also determines the elements of the trailer
|
129 |
+
design such as number and ordering of fragments, frames and
|
130 |
+
elements. Fig. 2 shows the visual view of some of the frames
|
131 |
+
for one of the templates with it’s corresponding elements and
|
132 |
+
their positioning in the frames.
|
133 |
+
II. RELATED WORK
|
134 |
+
There are studies that discuss the idea, use and motivation
|
135 |
+
of having trailers for academic courses [3]–[5]. Also, there are
|
136 |
+
online educational platforms like Coursera and Udemy which
|
137 |
+
have course trailers. However, we could not find literature on
|
138 |
+
approaches to generating trailers for academic courses. Hence,
|
139 |
+
in the following paragraphs we discuss some of the pioneering
|
140 |
+
works of trailer generation in-general across other domains.
|
141 |
+
Trailer generation can also be seen as special case of larger
|
142 |
+
research interest of adding an element of surprise to the engage
|
143 |
+
receiver’s attention in midst of information overload [13], [14].
|
144 |
+
Authors in [15]–[18] present an approach for automatic
|
145 |
+
trailer generation from movies as input. Hermes et al. [16]
|
146 |
+
create trailers for action movies by analyzing audio and
|
147 |
+
video signals present in movies and automatically detecting
|
148 |
+
features like faces, scene cuts, sound-volume, etc and use
|
149 |
+
ontology of the corresponding domain for producing trailers.
|
150 |
+
Irie et al. [17] propose a movie trailer generation method
|
151 |
+
which extracts symbols like title logo, main theme music and
|
152 |
+
selects impressive shot or speech segments based on clustering
|
153 |
+
methods and EM algorithm. Brachmann et al. [15] propose an
|
154 |
+
approach of generating action movie trailers using the concept
|
155 |
+
of trailer grammar, knowledge base and various ML techniques
|
156 |
+
for analyzing audio and images present in the movie. Smith
|
157 |
+
et al. [18] propose a system that understands and encodes
|
158 |
+
the patterns and emotions present in horror movies using
|
159 |
+
Convolution Neural Networks(CNN).
|
160 |
+
All the above methods use visual and audio cues to derive
|
161 |
+
the trailer frames, whereas we use raw text data and build
|
162 |
+
the necessary discriminative and generative Neural Network
|
163 |
+
models to create frames and its elements to be placed in the
|
164 |
+
trailer.
|
165 |
+
Hesham et al. in [19] explore the idea of creating movie
|
166 |
+
trailers from their subtitles. They first classify the movie by
|
167 |
+
genre, identify important keywords and then rank important
|
168 |
+
subtitles. The trailer is then generated by stacking the movie
|
169 |
+
time-frames corresponding to the important subtitles. Gaikwad
|
170 |
+
et al. in [20] propose a technique to create previews of movies
|
171 |
+
by utilizing subtitles and finding the most representative scenes
|
172 |
+
by matching them with the plot summaries. Chi et al. [21]
|
173 |
+
propose an approach to automatically create marketing-style
|
174 |
+
short videos for a given product page url by extracting
|
175 |
+
elements and their styles present in the product html page
|
176 |
+
under specified tags.
|
177 |
+
Unlike the aforementioned works which primarily focus
|
178 |
+
on generating trailers based on an extractive strategies, in
|
179 |
+
our work we develop various modules that comprehend in-
|
180 |
+
put document and generate content for the trailer either by
|
181 |
+
paraphrasing or by using Natural Language Generator based
|
182 |
+
model.
|
183 |
+
As far as we know, automatic/semi-automatic generation
|
184 |
+
of video trailers for learning pathways is unexplored. Our
|
185 |
+
proposed approach of video trailer generation using Machine
|
186 |
+
Learning, Natural Language Processing and Generation tech-
|
187 |
+
niques is also unique.
|
188 |
+
III. PROPOSED SYSTEM
|
189 |
+
We propose a framework for trailer generation consisting of
|
190 |
+
different trailer fragments that form a trailer timeline, genera-
|
191 |
+
tion of the trailer fragments and finally applying templates that
|
192 |
+
determine the look and feel of the trailer. Based on our analysis
|
193 |
+
of multiple trailers presented for various online courses offered
|
194 |
+
on various educational platforms like Coursera and Udemy, we
|
195 |
+
designed and structured our trailer elements, fragments and
|
196 |
+
overall flow of the trailer.
|
197 |
+
We propose a trailer timeline consisting of 7 trailer frag-
|
198 |
+
ments namely, Splash, Trailer Title, Author Details, Outline,
|
199 |
+
Meta-Information, Social Proof and finally the Call-to-Action.
|
200 |
+
Figure 3 shows the timeline of all the above-mentioned frag-
|
201 |
+
ments in the trailer. Each of these fragments define a specific
|
202 |
+
part of the trailer, their purpose and their importance in the
|
203 |
+
trailer. We define the fragments in detail in further part of
|
204 |
+
this section. As discussed earlier, fragments are composed of
|
205 |
+
|
206 |
+
Trailer
|
207 |
+
Fragment 1
|
208 |
+
Fragment 2
|
209 |
+
Fragment t
|
210 |
+
Frame? 1
|
211 |
+
Frame? 2
|
212 |
+
Frame?2 f
|
213 |
+
Element 1 Element 1
|
214 |
+
Element eFig. 2. Illustration of Frames
|
215 |
+
Fig. 3. Trailer Timeline
|
216 |
+
a sequence of frames and each frame is composed of various
|
217 |
+
types of elements and their properties.
|
218 |
+
The overall approach for trailer generation is illustrated in
|
219 |
+
Fig. 4. All the resources mapped to a learning pathway form
|
220 |
+
the input to our Fragment Data Generator (FDG) module.
|
221 |
+
Template constraints that define the elements, fragments and
|
222 |
+
frames also form the input to FDG. The FDG along with other
|
223 |
+
sources like creator’s input, any images or information from
|
224 |
+
the web or knowledge bases, etc., can be incorporated into
|
225 |
+
the frames or the fragments. Once the elements for all the
|
226 |
+
frames across all the fragments are generated, we pass it to
|
227 |
+
the composition module for adding in other important aspects
|
228 |
+
of the trailer like voice-over, subtitles, sounds, etc., to add to
|
229 |
+
its multi-modal experience.
|
230 |
+
A. Fragment Data Generation
|
231 |
+
Following are the proposed trailer fragments arranged in the
|
232 |
+
order of their appearance in the trailer timeline-
|
233 |
+
Splash Fragment: The idea of splash fragment is to
|
234 |
+
display any introductory information related to the trailer such
|
235 |
+
as credits, software logo, etc., mostly obtained from creator’s
|
236 |
+
input. This optional fragment could also be the last fragment
|
237 |
+
in the trailer depending on the creator’s preference.
|
238 |
+
Trailer Title Fragment: In this fragment we generate a
|
239 |
+
short yet representative title for the entire trailer, hence giving
|
240 |
+
a quick idea about the topic that summarizes the underlying
|
241 |
+
pathway or the set of resources. We apply Hierarchical Title
|
242 |
+
Generation model [22] over the resources mapped to the
|
243 |
+
learning pathway to get the list of trailer titles. We select a title
|
244 |
+
among them based on their Term Frequency. In case, none of
|
245 |
+
the titles are above a threshold, we fall back on the fact that the
|
246 |
+
first resource in the pathway is the proxy to the introductory
|
247 |
+
resource, and we generate the trailer title for it by applying
|
248 |
+
Single Document Title Generator [23], [24]. Figure 5 shows
|
249 |
+
the trailer title fragment generation flow.
|
250 |
+
Author Details Fragment: A quick introduction about
|
251 |
+
the author or the instructor of the learning pathway could
|
252 |
+
help the learners build an implicit connect and trust. Majority
|
253 |
+
of the elements in the Author Details Fragment like author
|
254 |
+
names, affiliations and author’s image are expected from the
|
255 |
+
creator while creating the trailer. Template constraints such
|
256 |
+
as addressing multiple authors with different frame elements,
|
257 |
+
handling and getting relevant images to be put in this fragment
|
258 |
+
etc are also obtained from trailer creator. These inputs and
|
259 |
+
template constraints are plugged in the automation system
|
260 |
+
to fill the overall author frame. Additionally, we crawl the
|
261 |
+
web to get relevant images, for example: we crawl the web
|
262 |
+
and get relevant affiliation images and place it in the desired
|
263 |
+
coordinates as defined by the template. Also for the templates
|
264 |
+
that allow for having only the frontal face of author, we make
|
265 |
+
use of an open-sourced face recognition model5 to crop the
|
266 |
+
face from the uploaded author image. In case no author image
|
267 |
+
is provided to the system by the creator, we place a dummy
|
268 |
+
caricatured relevant sized image. Similarly, we have defined
|
269 |
+
defaults for the features, frames and templates in case there is
|
270 |
+
no input from the trailer creator. For example, when multiple
|
271 |
+
authors exists, we display information w.r.t to the the first
|
272 |
+
author entered by the creator and treat him/her as the primary
|
273 |
+
instructor and rest all the authors are abstracted by placing
|
274 |
+
them under the “and others” category.
|
275 |
+
Outline Fragment: This fragment gives an idea about
|
276 |
+
the specific topics that would be covered in the learning
|
277 |
+
pathway. This could help in setting learners’ expectation in
|
278 |
+
terms of the topics covered and in deciding whether the content
|
279 |
+
aligns to his/her end goals. For this we use Single Document
|
280 |
+
5https://docs.opencv.org/3.4/db/d28/tutorial cascade classifier.html
|
281 |
+
|
282 |
+
AddTextHere
|
283 |
+
Add Text Here
|
284 |
+
What you will learn ..
|
285 |
+
AddTextHere
|
286 |
+
Add Text Here
|
287 |
+
Add Text Here
|
288 |
+
1
|
289 |
+
Add TextHere
|
290 |
+
②
|
291 |
+
3
|
292 |
+
4
|
293 |
+
Add Text
|
294 |
+
Add Text
|
295 |
+
Add Text
|
296 |
+
Here
|
297 |
+
Here
|
298 |
+
Here
|
299 |
+
Frame 1
|
300 |
+
Frame 2
|
301 |
+
Frame 3Meta-
|
302 |
+
Splash
|
303 |
+
Title
|
304 |
+
Author
|
305 |
+
Outline
|
306 |
+
Information
|
307 |
+
Social Proof
|
308 |
+
CTA
|
309 |
+
Introduction
|
310 |
+
Introduction
|
311 |
+
Overview of
|
312 |
+
Course
|
313 |
+
Building
|
314 |
+
Credits/Logo
|
315 |
+
Defining
|
316 |
+
to the Course
|
317 |
+
about the
|
318 |
+
topics covered
|
319 |
+
Structure and
|
320 |
+
Validation
|
321 |
+
Next Steps
|
322 |
+
Instructor
|
323 |
+
other details
|
324 |
+
and TrustFig. 4. Trailer Generation Flow
|
325 |
+
Fig. 5. Trailer Title Fragment Generation Flow
|
326 |
+
Title Generator [23], [24] model to generate titles for all the
|
327 |
+
resources in the learning pathway which represents the outline
|
328 |
+
of the learning pathway.
|
329 |
+
Every template under the outline fragment limits the number
|
330 |
+
of text elements to be listed on the screen with the aim to
|
331 |
+
balance aesthetics and information at the same time. To adhere
|
332 |
+
to this prior constraint, we design a multi-step process to select
|
333 |
+
diverse, yet impactful set of elements from a relatively larger
|
334 |
+
list of outlines generated in the previous step. Fig. 6 shows
|
335 |
+
the entire pipeline of Outline Text Selection.
|
336 |
+
Let K be the number of text elements that the frame requires
|
337 |
+
and N be the total number of resources we have as input
|
338 |
+
and let K < N. We start with all the resources (N) given
|
339 |
+
by the user and remove any instance of assessments and
|
340 |
+
short documents under the assumption that such documents
|
341 |
+
won’t hold much informational content. After this we remove
|
342 |
+
any occurrence of exact duplicates and near duplicates in the
|
343 |
+
remaining set and pass the remaining resource list to the title
|
344 |
+
generator system to generate title for every resource.
|
345 |
+
Post this, we fix the first and the last position of the outline
|
346 |
+
with the first and last resource title. We specifically do this
|
347 |
+
action because of the inherent ordering present in the input
|
348 |
+
resource as a part of learning pathway. Also intuitively, picking
|
349 |
+
first and last sets a bound over the topic space to be covered
|
350 |
+
under a particular course.
|
351 |
+
Finally on this reduced set, we divide the space into bins of
|
352 |
+
equal size from which we randomly sample one outline ele-
|
353 |
+
ment from each bin to remaining K−2 positions in the outline
|
354 |
+
list. We use threshold based Jaccard and cosine similarity for
|
355 |
+
filtering syntactic and semantic duplicates respectively. The
|
356 |
+
Jaccard similarity between any two documents is calculated as
|
357 |
+
an intersection over union of word sets for both documents. It
|
358 |
+
helps us get sense of syntactic similarity between documents.
|
359 |
+
For calculating cosine similarity, we vectorise our inputs using
|
360 |
+
|
361 |
+
Learning Pathway
|
362 |
+
R1
|
363 |
+
RR3
|
364 |
+
R4
|
365 |
+
R5
|
366 |
+
Ra
|
367 |
+
R7
|
368 |
+
Rs
|
369 |
+
Template
|
370 |
+
Constraints
|
371 |
+
Fragment Data Generator
|
372 |
+
OtherSources
|
373 |
+
Creator
|
374 |
+
Input
|
375 |
+
Fragment Data
|
376 |
+
Splash
|
377 |
+
Trailer Title
|
378 |
+
Outline
|
379 |
+
Meta-
|
380 |
+
Social Proof
|
381 |
+
Call-to-
|
382 |
+
Knowledge
|
383 |
+
AuthorDetails
|
384 |
+
Information
|
385 |
+
Action
|
386 |
+
Base
|
387 |
+
Fragment Data
|
388 |
+
Web
|
389 |
+
Composition
|
390 |
+
Voice-over
|
391 |
+
Text-to-
|
392 |
+
Frame
|
393 |
+
Subtitle
|
394 |
+
ixal
|
395 |
+
Speech
|
396 |
+
Generation
|
397 |
+
Generation
|
398 |
+
Duration
|
399 |
+
Generation
|
400 |
+
Trailer
|
401 |
+
Music
|
402 |
+
ArchiveTask: Generate
|
403 |
+
No
|
404 |
+
Hierarchical Title
|
405 |
+
Titles List
|
406 |
+
/Winning
|
407 |
+
No
|
408 |
+
KUserInput
|
409 |
+
Pick 1st Resource
|
410 |
+
Trailer Title
|
411 |
+
Generation
|
412 |
+
Title ?
|
413 |
+
from Input
|
414 |
+
Yes
|
415 |
+
Yes
|
416 |
+
Single Document Title Generator
|
417 |
+
Trailer Title
|
418 |
+
4pre-trained Sentence Transformers [25] and then measure the
|
419 |
+
semantic closeness between them using cosine similarity.
|
420 |
+
Algorithm 1 Duplicates Filter
|
421 |
+
1: resources = Array(1, 2, . . . , N − 1, N)
|
422 |
+
2: remaining resources = Array(1, N)
|
423 |
+
3: for i ← 2 to N − 1 do
|
424 |
+
4:
|
425 |
+
scores = Array()
|
426 |
+
5:
|
427 |
+
for r ← remaining resources do
|
428 |
+
6:
|
429 |
+
scores ← calculate similarity(i, r)
|
430 |
+
7:
|
431 |
+
end for
|
432 |
+
8:
|
433 |
+
if max(scores) < threshold then
|
434 |
+
9:
|
435 |
+
remaining resources ← i
|
436 |
+
10:
|
437 |
+
end if
|
438 |
+
11: end for
|
439 |
+
12: return remaining resources
|
440 |
+
Since every pathway is composed of different resources of
|
441 |
+
various properties like length, style, etc., having one threshold
|
442 |
+
that fits all does not work. Hence, our threshold is adaptable in
|
443 |
+
a way that guarantees at-least K items are selected post any of
|
444 |
+
the syntactic or semantic pruning steps. The threshold search
|
445 |
+
space is between 0 to 1 where for efficiency and tractability we
|
446 |
+
quantize it at 0.1. Then for each threshold we get remaining
|
447 |
+
resources as defined in Algorithm 1. Finally the threshold that
|
448 |
+
guarantees at-least K items and possibly reduces the input set
|
449 |
+
by maximum is chosen as the final threshold.
|
450 |
+
Meta-Information Fragment: The idea of having Meta-
|
451 |
+
Information Fragment is to inform learners about other impor-
|
452 |
+
tant aspects of the course like course structure, total reading
|
453 |
+
time, total number of resources, etc. We believe this would
|
454 |
+
help learners understand more about the learning pathway or
|
455 |
+
resources apart from just knowing the topics that would be
|
456 |
+
covered. Also, such information can be used by learners in
|
457 |
+
charting out their learning hours and estimating the efforts
|
458 |
+
it would take for the successful completion of the course.
|
459 |
+
Some of the elements that we generate automatically as part
|
460 |
+
of this fragment are: generating topical word clouds 6 bases on
|
461 |
+
word frequencies after pre-processing like stop-word removal,
|
462 |
+
estimating total reading time based on average reading speed
|
463 |
+
statistics and other pathway level derived statistics like total
|
464 |
+
resources, availability of discussion forum, etc.
|
465 |
+
Social Proof Fragment: Social Proof is one of the most
|
466 |
+
prominent ways of social influence and is based on the
|
467 |
+
heuristic that the users follow others similar to them when
|
468 |
+
uncertain [26]. We collect these statistics from the deployed
|
469 |
+
learning environments. This information is added to the video
|
470 |
+
trailer over time when different learners take this course and
|
471 |
+
the analytical data is available.
|
472 |
+
Call-to-Action Fragment: CTA is a marketing term which
|
473 |
+
is designed to push the audience in taking the desired actions.
|
474 |
+
It is an important aspect of any trailer because all of the
|
475 |
+
enthusiasm that is built in a learner while watching the trailer is
|
476 |
+
of no use if the learner is not clear on the next actionable [27],
|
477 |
+
6https://pypi.org/project/wordcloud/
|
478 |
+
[28] item. In our system, we randomly select phrases from a
|
479 |
+
set of pre-defined list of potential key-phrases to be placed on
|
480 |
+
the screen at a pre-defined location under this fragment. Some
|
481 |
+
of the phrases we use are ‘Start your learning today’, ‘Let’s
|
482 |
+
get started’, ‘Are you ready?’, etc., along with the action that
|
483 |
+
will take the learner on the learning pathway.
|
484 |
+
B. Additional Elements
|
485 |
+
In this subsection, we discuss two other interesting elements
|
486 |
+
that we propose to be added to the trailers, namely, Definition
|
487 |
+
Extractor and Paraphraser. These are shown as suggestions
|
488 |
+
to the trailer creator and it’s up to the creator to include them
|
489 |
+
and decide their placement in the trailer.
|
490 |
+
Definition Extractor: Definitions are descriptive elements
|
491 |
+
that we believe can help in introduction of concepts. To
|
492 |
+
select the definition from the learning resource, we propose a
|
493 |
+
discriminative model that classifies a given piece of text into
|
494 |
+
Definition or Non-Definition class. For building the classifier
|
495 |
+
model, we use a dataset7 that contains positive and negative
|
496 |
+
definition candidates extracted from Wikipedia for various
|
497 |
+
topics. Our best performing model is a fine-tuned DistilBERT-
|
498 |
+
base-uncased8 model with a Definition class F1-score of 0.96
|
499 |
+
and Non-Definition class F1-score of 0.97 on the test set.
|
500 |
+
Paraphraser: We believe that this is an useful utility that
|
501 |
+
can be used in the Outline and Trailer title fragments. This
|
502 |
+
gives the creator an ability to re-write concisely any substan-
|
503 |
+
tially larger textual content present in any frame. We use a
|
504 |
+
publicly available pre-trained model9 for this task which fine-
|
505 |
+
tunes a large sized T5 (Text-to-Text Transfer Transformer) [7]
|
506 |
+
model on a parallel corpus of sentence and it’s corresponding
|
507 |
+
paraphrase.
|
508 |
+
C. Video Composition
|
509 |
+
Video Composition module is responsible for stitching
|
510 |
+
together all the elements that need to be part of the trailer, such
|
511 |
+
as the Frame data, Voice-over text, Text-to-Speech (TTS), etc.,
|
512 |
+
into a trailer video. Fig. 4 pictorially shows the overall flow
|
513 |
+
of the various components that are a part of the video compo-
|
514 |
+
sition. We use Python’s MoviePy library10 as our choice for
|
515 |
+
video editing and composition of the templates as it provides
|
516 |
+
us with all the necessary editing functions like inserting text,
|
517 |
+
concatenations and cuts, which we use to draft our templates.
|
518 |
+
After the frame-level data elements are in-place, the next
|
519 |
+
step is to generate voice-over text for each of the frames.
|
520 |
+
Voice-over text is defined as the spoken-text that the narrator
|
521 |
+
speaks while a frame is displayed on the screen. For this,
|
522 |
+
we select grammar from a pre-defined set of slot based
|
523 |
+
text grammars which we define per frame. The slots in the
|
524 |
+
grammar are nothing but the screen’s text elements. Finally,
|
525 |
+
once the Voice-over Text is generated for every frame, we
|
526 |
+
pass them through the IBM Watson’s Text-to-speech (TTS)
|
527 |
+
7http://nlp.uniroma1.it/wcl/
|
528 |
+
8https://huggingface.co/distilbert-base-uncased
|
529 |
+
9https://github.com/ramsrigouthamg/Questgen.ai
|
530 |
+
10 https://zulko.github.io/moviepy
|
531 |
+
|
532 |
+
Fig. 6. Outline Text Selection
|
533 |
+
API11 with relevant parameters such as voice-type, gender,
|
534 |
+
etc., by choosing from a list of speaker profiles to get the
|
535 |
+
audio files for every frame. Fig. 7 illustrates the flow from
|
536 |
+
grammar selection to voice generation for the Trailer Title
|
537 |
+
Fragment. We then derive the frame duration accordingly to
|
538 |
+
make sure that the visual and audio aspects of the frames are
|
539 |
+
in sync and minimize any kind of lag on either ends. Finally,
|
540 |
+
along with all the above details, we input template constraints
|
541 |
+
like positioning of elements, and styles, user preferences, and
|
542 |
+
some basic animations like fade-in and fade-out settings to
|
543 |
+
come up with the final trailer.
|
544 |
+
IV. EXPERIMENTS
|
545 |
+
In this section, we describe the dataset, evaluation strategy
|
546 |
+
and results obtained for the trailers generated by our proposed
|
547 |
+
system.
|
548 |
+
Dataset: Apart from the datasets which we have used for
|
549 |
+
training and evaluating specific modules that are responsible
|
550 |
+
for generating fragment relevant data. We created three dif-
|
551 |
+
ferent learning pathways for our experiments and evaluation
|
552 |
+
of the generated trailers. Each learning pathway differs with
|
553 |
+
each other in the number of resources and stylometry. Two of
|
554 |
+
the pathways are based on text book chapters with difference
|
555 |
+
in number of resources mapped, and one pathway is video
|
556 |
+
lectures. We tried to take different pathways to evaluate our
|
557 |
+
model’s flexibility on different types of learning pathways.
|
558 |
+
First one was created by sampling some chapters sequentially
|
559 |
+
from a freely available Machine Learning textbook [29]. For
|
560 |
+
second, we chose the speech-to-text transcription of a week’s
|
561 |
+
video lectures from an academic course on NLP. Our third
|
562 |
+
learning pathway is the entire ML textbook [29]12. All the
|
563 |
+
three corpus are analogous to learning pathways as they are all
|
564 |
+
semantically coherent, progressive and share the same global
|
565 |
+
topic.
|
566 |
+
Evaluation and Results: Trailers can be seen as gen-
|
567 |
+
erative tasks with an inherent notion of creativity. Here the
|
568 |
+
objective evaluation is not straight-forward because the ef-
|
569 |
+
fectiveness of a trailer is highly subjective and relies on the
|
570 |
+
human perception. However, we think that human evaluation
|
571 |
+
11https://cloud.ibm.com/catalog/services/speech-to-text
|
572 |
+
12Datasets can be found at: https://bit.ly/3ro3JLO
|
573 |
+
1
|
574 |
+
The first trailer looked more catchy compared to the second
|
575 |
+
one. Being generated by an AI agent, both seems to be good.
|
576 |
+
2
|
577 |
+
Looks amazing. Great work!
|
578 |
+
3
|
579 |
+
You guys have truly done a remarkable work!
|
580 |
+
4
|
581 |
+
Good job, keep it up!
|
582 |
+
5
|
583 |
+
Great!
|
584 |
+
TABLE I
|
585 |
+
POSITIVE COMMENTS
|
586 |
+
1
|
587 |
+
Maybe I just felt that he was conveying info too fast
|
588 |
+
2
|
589 |
+
As of now, it sounds a bit robotic. Some improvements w.r.t
|
590 |
+
the TTS can help make it better.
|
591 |
+
3
|
592 |
+
Slowing the video when the information that is being conveyed
|
593 |
+
is relatively dense would be helpful. For example, when going
|
594 |
+
through the list of topics, speaking slowly helps. When giving
|
595 |
+
instructor names, one can be fast.
|
596 |
+
4
|
597 |
+
Also, if there’s some way to bring viewer’s attention to the
|
598 |
+
part of the slide that’s being mentioned, that would be better
|
599 |
+
where the content is not sequential.
|
600 |
+
5
|
601 |
+
Remove the date from the frame. Add something about what
|
602 |
+
can they do once they learn the course(what type of problems
|
603 |
+
can they solve)
|
604 |
+
TABLE II
|
605 |
+
IMPROVEMENTS SUGGESTED BY USERS
|
606 |
+
on various trailers generated can give us a good perspective
|
607 |
+
on the quality of the trailers. We had 63 human evaluators
|
608 |
+
consisting of Engineering graduates, Post-graduates and PhD
|
609 |
+
students well versed in the technical domain that represent our
|
610 |
+
dataset.
|
611 |
+
We evaluate 6 trailers13 in total that were generated from 3
|
612 |
+
different learning pathways as discussed above, i.e., 2 trailer
|
613 |
+
per learning pathway. These two trailers are based on two
|
614 |
+
templates T1, T2 created by us. Both the templates differ in
|
615 |
+
aesthetics and level-of-detail(LOD). The evaluation for each
|
616 |
+
trailer was done on a set of 8 questions on Likert-scale from
|
617 |
+
1 to 5, where 1 would mean very poor and 5 would mean very
|
618 |
+
good.
|
619 |
+
There were three separate groups of evaluators. Each group
|
620 |
+
was provided with 2 trailers based on 2 templates for the
|
621 |
+
same pathway. We thoughtfully perform this diversification to
|
622 |
+
simulate the cluster sampling procedure, since showing all 6
|
623 |
+
trailers to the same evaluators would have created boredom,
|
624 |
+
resulting in not so accurate evaluation.
|
625 |
+
13Sample Trailers: https://bit.ly/3Hscie9
|
626 |
+
|
627 |
+
Filtering Less-informative Documents
|
628 |
+
Syntactic Filters over Document Text
|
629 |
+
All Input
|
630 |
+
Filter Assessments
|
631 |
+
Filter Short
|
632 |
+
Filter Exact
|
633 |
+
Filter Near
|
634 |
+
Generate Title for
|
635 |
+
Resources
|
636 |
+
Documents
|
637 |
+
Duplicates
|
638 |
+
Duplicates
|
639 |
+
every Resource
|
640 |
+
R = [1, 2, ... N-1, N]
|
641 |
+
Select 1st and pth
|
642 |
+
Randomly Select 1
|
643 |
+
resource and add in
|
644 |
+
Outline Elements
|
645 |
+
resource from each
|
646 |
+
Outline then Divide
|
647 |
+
Filter Semantic
|
648 |
+
Filter Near
|
649 |
+
Filter Exact
|
650 |
+
bin
|
651 |
+
P-2 resources into K-
|
652 |
+
Duplicates
|
653 |
+
Duplicates
|
654 |
+
Duplicates
|
655 |
+
O = [1, 2, ... K-1, K]
|
656 |
+
2 equal spaced bins
|
657 |
+
Semantic Filter over Titles
|
658 |
+
R = [1, 2, .. P-1, P]
|
659 |
+
Syntactic Filters over Titles
|
660 |
+
where, K<=P<=NFig. 7. Flow of Grammar selection to Voice-over generation
|
661 |
+
We also encouraged the evaluators to give free comments
|
662 |
+
for the trailers they evaluated, as this would help us improve
|
663 |
+
our system in future iterations. Table. I and II lists down some
|
664 |
+
of the positive comments and improvements suggested by the
|
665 |
+
users. Fig. 8 shows some of the trailer fragments generated by
|
666 |
+
our proposed system14.
|
667 |
+
Following is the list of 8 questions that were asked to the
|
668 |
+
evaluator during the evaluation. The text in italics highlights
|
669 |
+
the broader aspect of the evaluation feature.
|
670 |
+
Q1. Did you find the trailer to be self-contained?
|
671 |
+
Q2. How were the fonts and styles used in the trailer in
|
672 |
+
terms of readability?
|
673 |
+
Q3. How did you find the length and pace of the trailer?
|
674 |
+
Q4. As a user, how impressed are you with this trailer
|
675 |
+
overall?
|
676 |
+
Q5. Could this trailer evoke interest in someone taking
|
677 |
+
this course? (Ignoring any prior inclination to the topic)
|
678 |
+
Q6. How was the average duration of each frame?
|
679 |
+
Q7. Based on the trailer you just saw, do you think you
|
680 |
+
have a good impression of the course now?
|
681 |
+
Q8. How did you find the sync between the audio and
|
682 |
+
visuals you saw?
|
683 |
+
As can be seen in Fig. 9, the scores obtained for each of the
|
684 |
+
survey questions are good and far above the average(score of
|
685 |
+
3) for almost all the trailers generated by our approach. Also,
|
686 |
+
in our study, we found both the templates performed equally
|
687 |
+
good. However, for Q5, the average scores is relatively lower
|
688 |
+
compared to other questions. On digging deeper we found
|
689 |
+
some of the comments of total 24 comments we received
|
690 |
+
mentioned about the difficulty of the course for not getting
|
691 |
+
interested in the course. This could mean that this question
|
692 |
+
(Q5) is more subjective.
|
693 |
+
14Detailed
|
694 |
+
demo
|
695 |
+
walk-through:
|
696 |
+
https://www.youtube.com/watch?v=
|
697 |
+
06VVuAlFhTk
|
698 |
+
V. CONCLUSIONS AND FUTURE WORK
|
699 |
+
In this paper, we presented a novel framework for au-
|
700 |
+
tomatically generating video trailers for a learning pathway
|
701 |
+
using ML and NLP techniques. We validated our trailers on
|
702 |
+
multiple corpus of varied granularity with human evaluation
|
703 |
+
and the results obtained were encouraging. This approach can
|
704 |
+
be adapted to different domains given enough data to train
|
705 |
+
the models involved in the entire process. We believe that
|
706 |
+
this approach can lay foundation to building more advanced
|
707 |
+
versions of trailer.
|
708 |
+
In future, we plan to improve the existing system by
|
709 |
+
incorporating suggestions obtained in the user evaluation and
|
710 |
+
adding more interesting themes like automatically detecting
|
711 |
+
learning outcomes given the resources. We also intend to create
|
712 |
+
an interactive dashboard to take inputs from the creator and
|
713 |
+
allow the creator to make edits to the auto-generated content.
|
714 |
+
ACKNOWLEDGMENT
|
715 |
+
We thank the Center of Excellence on Cognitive Com-
|
716 |
+
puting, funded by Mphasis F1 Foundation for funding this
|
717 |
+
research. We also thank Dr. Prasad Ram and Gooru team
|
718 |
+
(https://gooru.org) for the topical discussions and encourage-
|
719 |
+
ment.
|
720 |
+
REFERENCES
|
721 |
+
[1] O. Simpson, “Student retention in distance education: are we failing
|
722 |
+
our students?” Open Learning: The Journal of Open, Distance and e-
|
723 |
+
Learning, vol. 28, no. 2, pp. 105–119, 2013.
|
724 |
+
[2] M. Hartnett, A. St George, and J. Dron, “Examining motivation in online
|
725 |
+
distance learning environments: Complex, multifaceted, and situation-
|
726 |
+
dependent,” International Review of Research in Open and Distributed
|
727 |
+
Learning, vol. 12, no. 6, pp. 20–38, 2011.
|
728 |
+
[3] L. Gayoung, K. Sunyoung, K. Myungsun, C. Yoomi, and R. Ilju,
|
729 |
+
“A study on the development of a mooc design model,” Educational
|
730 |
+
technology international, vol. 17, no. 1, pp. 1–37, 2016.
|
731 |
+
[4] B. T.-m. Wong, “Factors leading to effective teaching of moocs,” Asian
|
732 |
+
Association of Open Universities Journal, 2016.
|
733 |
+
[5] P. STACEY, “Pedagogy of moocs,” for Innovation and Quality in
|
734 |
+
Learning, p. 111, 2014.
|
735 |
+
|
736 |
+
Title Generator
|
737 |
+
Sample Title: Graph Neural Networks
|
738 |
+
Hello, Welcome to this course on _TITLE_
|
739 |
+
Hi, Welcome to this course on _TITLE
|
740 |
+
Grammar
|
741 |
+
Random Selection
|
742 |
+
Hi, Welcome to this course on -_TITLE
|
743 |
+
Hello, Welcome to _TITLE_ course.
|
744 |
+
Hi, Welcome to the course_TITLE_
|
745 |
+
Trailer Title Fragment
|
746 |
+
Hi, Welcome to this course on Graph Neural Networks
|
747 |
+
Text-to-speech engineFig. 8. Trailer Fragments
|
748 |
+
Fig. 9. Average scores per Survey Question for all 3 pathways and trailers. Here P1, P2, P3 represent 3 Pathways and T1, T2 represent Templates
|
749 |
+
[6] J. Zhang, Y. Zhao, M. Saleh, and P. Liu, “Pegasus: Pre-training with
|
750 |
+
extracted gap-sentences for abstractive summarization,” in International
|
751 |
+
Conference on Machine Learning.
|
752 |
+
PMLR, 2020, pp. 11 328–11 339.
|
753 |
+
[7] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena,
|
754 |
+
Y. Zhou, W. Li, and P. J. Liu, “Exploring the limits of trans-
|
755 |
+
fer learning with a unified text-to-text transformer,” arXiv preprint
|
756 |
+
arXiv:1910.10683, 2019.
|
757 |
+
[8] R. Mihalcea and P. Tarau, “Textrank: Bringing order into text,” in
|
758 |
+
Proceedings of the 2004 conference on empirical methods in natural
|
759 |
+
language processing, 2004, pp. 404–411.
|
760 |
+
[9] R. Lienhart, S. Pfeiffer, and W. Effelsberg, “Video abstracting,” Com-
|
761 |
+
munications of the ACM, vol. 40, no. 12, pp. 54–62, 1997.
|
762 |
+
[10] C. Diwan, S. Srinivasa, and P. Ram, “Automatic generation of coherent
|
763 |
+
learning pathways for open educational resources,” in European Confer-
|
764 |
+
ence on Technology Enhanced Learning.
|
765 |
+
Springer, 2019, pp. 321–334.
|
766 |
+
[11] Y.-L. Chi, “Ontology-based curriculum content sequencing system with
|
767 |
+
semantic rules,” Expert Systems with Applications, vol. 36, no. 4, pp.
|
768 |
+
7838–7847, 2009.
|
769 |
+
[12] V. Shmelev, M. Karpova, and A. Dukhanov, “An approach of learning
|
770 |
+
path sequencing based on revised bloom’s taxonomy and domain ontolo-
|
771 |
+
gies with the use of genetic algorithms,” Procedia Computer Science,
|
772 |
+
vol. 66, pp. 711–719, 2015.
|
773 |
+
[13] L. R. Varshney, “To surprise and inform,” in 2013 IEEE International
|
774 |
+
Symposium on Information Theory.
|
775 |
+
IEEE, 2013, pp. 3145–3149.
|
776 |
+
[14] ——, “Must surprise trump information?” IEEE Technology and Society
|
777 |
+
Magazine, vol. 38, no. 1, pp. 81–87, 2019.
|
778 |
+
[15] C. Brachmann, H. I. Chunpir, S. Gennies, B. Haller, P. Kehl, A. P.
|
779 |
+
Mochtarram, D. M¨ohlmann, C. Schrumpf, C. Schultz, B. Stolper et al.,
|
780 |
+
in Digital Tools in Media Studies. transcript-Verlag, 2015, pp. 145–158.
|
781 |
+
[16] T. Hermes and C. Schultz, “Automatic generation of hollywood-like
|
782 |
+
movie trailers,” eCulture Factory, 2006.
|
783 |
+
[17] G. Irie, T. Satou, A. Kojima, T. Yamasaki, and K. Aizawa, “Automatic
|
784 |
+
trailer generation,” in Proceedings of the 18th ACM international con-
|
785 |
+
ference on Multimedia, 2010, pp. 839–842.
|
786 |
+
[18] J. R. Smith, D. Joshi, B. Huet, W. Hsu, and J. Cota, “Harnessing ai
|
787 |
+
for augmenting creativity: Application to movie trailer creation,” in
|
788 |
+
Proceedings of the 25th ACM international conference on Multimedia,
|
789 |
+
2017, pp. 1799–1808.
|
790 |
+
[19] M. Hesham, B. Hani, N. Fouad, and E. Amer, “Smart trailer: Automatic
|
791 |
+
generation of movie trailer using only subtitles,” in 2018 First Interna-
|
792 |
+
tional Workshop on Deep and Representation Learning (IWDRL). IEEE,
|
793 |
+
2018, pp. 26–30.
|
794 |
+
[20] B.
|
795 |
+
Gaikwad,
|
796 |
+
A.
|
797 |
+
Sontakke,
|
798 |
+
M.
|
799 |
+
Patwardhan,
|
800 |
+
N.
|
801 |
+
Pedanekar,
|
802 |
+
and
|
803 |
+
S. Karande, “Plots to previews: Towards automatic movie preview
|
804 |
+
retrieval using publicly available meta-data,” in Proceedings of the
|
805 |
+
IEEE/CVF International Conference on Computer Vision, 2021, pp.
|
806 |
+
3205–3214.
|
807 |
+
[21] P. Chi, Z. Sun, K. Panovich, and I. Essa, “Automatic video creation from
|
808 |
+
a web page,” in Proceedings of the 33rd Annual ACM Symposium on
|
809 |
+
User Interface Software and Technology, 2020, pp. 279–292.
|
810 |
+
[22] P. Mishra, C. Diwan, S. Srinivasa, and G. Srinivasaraghavan, “Automatic
|
811 |
+
title generation for learning resources and pathways with pre-trained
|
812 |
+
transformer models,” International Journal of Semantic Computing,
|
813 |
+
vol. 15, no. 04, pp. 487–510, 2021.
|
814 |
+
[23] ——, “Automatic title generation for text with pre-trained transformer
|
815 |
+
language model,” in 2021 IEEE 15th International Conference on
|
816 |
+
Semantic Computing (ICSC).
|
817 |
+
IEEE, 2021, pp. 17–24.
|
818 |
+
[24] J. Tan, X. Wan, and J. Xiao, “From neural sentence summarization to
|
819 |
+
headline generation: A coarse-to-fine approach.” in IJCAI, vol. 17, 2017,
|
820 |
+
pp. 4109–4115.
|
821 |
+
[25] N. Reimers and I. Gurevych, “Sentence-bert: Sentence embeddings using
|
822 |
+
siamese bert-networks,” arXiv preprint arXiv:1908.10084, 2019.
|
823 |
+
[26] R. B. Cialdini and L. James, Influence: Science and practice.
|
824 |
+
Pearson
|
825 |
+
education Boston, MA, 2009, vol. 4.
|
826 |
+
[27] “Call-to-action (cta),” https://bit.ly/3DDUBp4, accessed: 2021-12-08.
|
827 |
+
[28] “3 reasons a call to action is important,” https://bit.ly/33c7WbO, ac-
|
828 |
+
cessed: 2021-12-08.
|
829 |
+
[29] J. Gareth, W. Daniela, H. Trevor, and T. Robert, An introduction to
|
830 |
+
statistical learning: with applications in R.
|
831 |
+
Spinger, 2013.
|
832 |
+
|
833 |
+
What you will learn ...
|
834 |
+
October 6, 20:
|
835 |
+
October 6, 2021
|
836 |
+
Readtimo
|
837 |
+
~8hr
|
838 |
+
model
|
839 |
+
Resources
|
840 |
+
oneet al
|
841 |
+
Let's get started!
|
842 |
+
Regularization in
|
843 |
+
Convolutional Neural
|
844 |
+
function
|
845 |
+
Deep Learning
|
846 |
+
Networks
|
847 |
+
1
|
848 |
+
2
|
849 |
+
3
|
850 |
+
4
|
851 |
+
6
|
852 |
+
Text
|
853 |
+
layergradient
|
854 |
+
Regular
|
855 |
+
Resources
|
856 |
+
neuralnetwork
|
857 |
+
Assessments
|
858 |
+
Are you
|
859 |
+
rward Networks
|
860 |
+
Optimization Techniques
|
861 |
+
Recurrent Noural
|
862 |
+
ready?
|
863 |
+
Networks
|
864 |
+
Training
|
865 |
+
Active
|
866 |
+
Discussion
|
867 |
+
Forum
|
868 |
+
Ihis course, In this specaly curated course
|
869 |
+
ol the curriculum you wll go througn
|
870 |
+
startyourjourney
|
871 |
+
Outline Frame
|
872 |
+
Meta-Information Frame
|
873 |
+
CTA Frame5
|
874 |
+
4
|
875 |
+
Likert Value
|
876 |
+
3
|
877 |
+
2
|
878 |
+
1
|
879 |
+
0
|
880 |
+
Q1
|
881 |
+
Q2
|
882 |
+
Q3
|
883 |
+
Q4
|
884 |
+
Q5
|
885 |
+
Q6
|
886 |
+
Q7
|
887 |
+
Q8
|
888 |
+
SurveyQuestion
|
889 |
+
P1-T1
|
890 |
+
P1-T2
|
891 |
+
P2-T1
|
892 |
+
P2-T2
|
893 |
+
P3-T1
|
894 |
+
P3-T2
|
0tE2T4oBgHgl3EQfiQfL/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1dFLT4oBgHgl3EQfpy-5/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3ebd44b889223021ca073749402a0626fc467147d97a1e2c2d358cec0551f38
|
3 |
+
size 235245
|
1tE3T4oBgHgl3EQfngpH/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
2NAzT4oBgHgl3EQfuP1K/content/2301.01687v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50216d986e7a9513e28d22f27d5968156ff88da65751df700f1f535a5419b5d1
|
3 |
+
size 584705
|
2NAzT4oBgHgl3EQfuP1K/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c1ac2a5ace02d19d52ed4720e051cab46bdf1df08fd920e9b713eb99a15b2cd
|
3 |
+
size 377061
|
2NE4T4oBgHgl3EQfzw00/content/2301.05276v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d7e2b7d48546d9e9502eb6ce467bc86da3935ce31b9db049431f12f3d551f62
|
3 |
+
size 542378
|
2NE4T4oBgHgl3EQfzw00/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55c909ebc1c1040ac63c8fb0b1c53806df1888eb350d1a8be033f307aa970a5e
|
3 |
+
size 156404
|
39AyT4oBgHgl3EQfcPct/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55169d147e3371c55411165884274c35b803e5d49eb679484d4d84df993aef7b
|
3 |
+
size 3670061
|
39E1T4oBgHgl3EQf6AV3/content/2301.03518v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5186fee447c70af2a96284ef89bb7ae9391b9385fb7b0a76d155eb7b97423759
|
3 |
+
size 516036
|
39E1T4oBgHgl3EQf6AV3/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df37c62e06e396496e684b965c38817a59e309326857f2d8b64c7c75c5a07005
|
3 |
+
size 1114157
|
39E1T4oBgHgl3EQf6AV3/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:868224aa6b2a091d2942f3770f2bd9f696e240e7921822af18504b8954efbbb0
|
3 |
+
size 42066
|
3dE4T4oBgHgl3EQfbAyT/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ebe2b10699c18520c2ee7be1bd119f19f97d026502ac5a4e3231ea38b697fb2
|
3 |
+
size 1638445
|
49A0T4oBgHgl3EQfNv86/content/tmp_files/2301.02150v1.pdf.txt
ADDED
@@ -0,0 +1,1735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MNRAS 000, 1–18 (222)
|
2 |
+
Preprint 6 January 2023
|
3 |
+
Compiled using MNRAS LATEX style file v3.0
|
4 |
+
A Gaian Habitable Zone
|
5 |
+
Rudy Arthur,1★ Arwen Nicholson,2†
|
6 |
+
1University of Exeter, Department of Computer Science
|
7 |
+
2University of Exeter, Department of Physics and Astronomy
|
8 |
+
Accepted XXX. Received YYY; in original form ZZZ
|
9 |
+
ABSTRACT
|
10 |
+
When searching for inhabited exoplanets, understanding the boundaries of the habitable zone around the parent star is key. If
|
11 |
+
life can strongly influence its global environment, then we would expect the boundaries of the habitable zone to be influenced
|
12 |
+
by the presence of life. Here using a simple abstract model of ‘tangled-ecology’ where life can influence a global parameter,
|
13 |
+
labelled as temperature, we investigate the boundaries of the habitable zone of our model system. As with other models of
|
14 |
+
life-climate interactions, the species act to regulate the temperature. However, the system can also experience ‘punctuations’,
|
15 |
+
where the system’s state jumps between different equilibria. Despite this, an ensemble of systems still tends to sustain or even
|
16 |
+
improve conditions for life on average, a feature we call Entropic Gaia. The mechanism behind this is sequential selection with
|
17 |
+
memory which is discussed in detail. With this modelling framework we investigate questions about how Gaia can affect and
|
18 |
+
ultimately extend the habitable zone to what we call the Gaian habitable zone. This generates concrete predictions for the size
|
19 |
+
of the habitable zone around stars, suggests directions for future work on the simulation of exoplanets and provides insight into
|
20 |
+
the Gaian bottleneck hypothesis and the habitability/inhabitance paradox.
|
21 |
+
Key words: Gaia – Habitable Zone – Biosignatures
|
22 |
+
1 INTRODUCTION
|
23 |
+
The Gaia hypothesis is that life influences the Earth’s feedback mech-
|
24 |
+
anisms to form a self-regulating system, and therefore life can help
|
25 |
+
maintain habitable conditions on its host planet Lovelock & Mar-
|
26 |
+
gulis (1974). Distinct from the biosphere Huggett (1999), Gaia is
|
27 |
+
the whole life-earth system, considered as a single entity. The impor-
|
28 |
+
tance of life’s interactions with the non-living environment are now
|
29 |
+
common sense, and the discipline of Earth System Science Lenton
|
30 |
+
& Watson (2013) studies the various feedback loops that constitute
|
31 |
+
‘Gaia’s body’ Volk (2012). Gaia theory itself takes a very broad
|
32 |
+
perspective, aiming to describe life at a planetary scale. Gaia theory
|
33 |
+
asks questions like: Is Gaia inevitable on a planet that hosts life, or
|
34 |
+
is it due to chance? What mechanisms can create a long lived Gaian
|
35 |
+
system? How will we detect other ‘Gaias’ beyond our solar system,
|
36 |
+
where direct planetary exploration is not an option? The astrophysical
|
37 |
+
point of view was crucial in the early development of Gaia, with the
|
38 |
+
search for life on Mars providing the initial inspiration for the Gaia
|
39 |
+
hypothesis Lovelock (1965). When looking at Earth from afar, Gaia
|
40 |
+
is what we see and the search for habitable or inhabited exoplanets is
|
41 |
+
the search for other Gaias.
|
42 |
+
Methods for exoplanet detection have developed considerably
|
43 |
+
since Gaia was first proposed. New telescopes, such as the James
|
44 |
+
Webb Space telescope and the Extremely Large Telescope (currently
|
45 |
+
under construction), and future missions, such as the Large Ultravi-
|
46 |
+
olet Optical Infrared Surveyor, mean that searching for signs of alien
|
47 |
+
★ E-mail: [email protected]
|
48 |
+
† E-mail: [email protected]
|
49 |
+
life will be possible within the coming decades Snellen et al. (2021);
|
50 |
+
Quanz et al. (2021). While robotic missions to potentially habitable
|
51 |
+
exoplanets remain unfeasible, evidence for alien life will only be ob-
|
52 |
+
servable for exoplanets that have been dramatically shaped by their
|
53 |
+
biospheres. Exoplanets with newly emerging life, or those with the
|
54 |
+
remnants of a once-thriving biosphere that has since collapsed, will
|
55 |
+
be unlikely to produce a remotely observable signature. Answering
|
56 |
+
the key questions of Gaia theory not only informs how we think about
|
57 |
+
the history of life on Earth, but can form the theoretical foundation
|
58 |
+
for the study of life in the universe.
|
59 |
+
Catling et. al. Catling et al. (2018) proposed a framework for as-
|
60 |
+
sessing potential biosignatures using a probabilistic approach that
|
61 |
+
combines observations of the candidate planet and host star with
|
62 |
+
models of the possible abiotic and biotic planetary processes to de-
|
63 |
+
termine the probability of the planet being inhabited. With a great
|
64 |
+
diversity of exoplanets being found, any potential biosignature must
|
65 |
+
be considered within the context of its host planet Seager (2013);
|
66 |
+
Claudi. (2017); Kiang et al. (2018); Schwieterman et al. (2018);
|
67 |
+
Krissansen-Totton et al. (2022). Detailed abiotic models of exoplan-
|
68 |
+
ets are being developed for a wide range of detected planets, see e.g.
|
69 |
+
Amundsen et al. (2016); Boutle et al. (2017); Collins (2021); Fauchez
|
70 |
+
et al. (2021), and sophisticated models of biogeochemistry exist for
|
71 |
+
different points in Earth’s history, e.g. Kharecha et al. (2005); Daines
|
72 |
+
et al. (2017); Lenton et al. (2018b); Zakem et al. (2020).
|
73 |
+
Detailed and realistic modelling of life on other planets is im-
|
74 |
+
portant, however this paper will take a broader view that aims to
|
75 |
+
understand the generic mechanisms that lead to Gaia. We build on
|
76 |
+
recent work Ford Doolittle (2014); Lenton et al. (2018a); Arthur
|
77 |
+
& Nicholson (2022) on Gaian selection principles. We argued in
|
78 |
+
© 222 The Authors
|
79 |
+
arXiv:2301.02150v1 [astro-ph.EP] 5 Jan 2023
|
80 |
+
|
81 |
+
2
|
82 |
+
Arthur & Nicholson
|
83 |
+
Arthur & Nicholson (2022) that some approaches to Gaian selec-
|
84 |
+
tion Ford Doolittle (2014); Lenton et al. (2018a) lead to anthropic
|
85 |
+
reasoning - we see Gaia because if we didn’t we wouldn’t exist. An-
|
86 |
+
thropic reasoning is controversial, with its opponents arguing that it
|
87 |
+
unfalsifiable with limited (if any) predictive power Smolin (2007).
|
88 |
+
The coming era of exoplanet astronomy gives new context and pur-
|
89 |
+
pose to these discussions. If our aim is for Gaia theory to inform our
|
90 |
+
search for life in the universe, then anthropic arguments are clearly
|
91 |
+
inadequate.
|
92 |
+
In Arthur & Nicholson (2022) we argue for ‘Entropic Gaia’ - that
|
93 |
+
the emergence of Gaia is a statistical tendency for planets that host
|
94 |
+
life. This means that life history on a single planet can be chaotic
|
95 |
+
and have periods of stability and collapse, however there is a trend
|
96 |
+
towards increasing biomass, stability, habitability and other Gaian
|
97 |
+
features. Any single planetary history for a life-bearing planet, such
|
98 |
+
as Earth, is likely to follow a (bumpy) trajectory towards Gaia. The
|
99 |
+
micro-mechanism leading to this behaviour was argued to be ‘Se-
|
100 |
+
quential Selection with Memory’ or an ‘entropic ratchet’. In brief, this
|
101 |
+
mechanism starts from the observation that coupled life-environment
|
102 |
+
systems move between regulation and disregulation. By definition,
|
103 |
+
disregulating systems quickly destroy themselves while regulating
|
104 |
+
systems persist, this is sequential selection Lenton et al. (2018a). In
|
105 |
+
models of ecology and Gaia (e.g. Becker & Sibani (2014); Hard-
|
106 |
+
ing (1999)) increasing diversity and complexity is associated with
|
107 |
+
increasing stability 1. More diverse ecosystems can generate more
|
108 |
+
novel species through mutation. Thus after every ecosystem collapse
|
109 |
+
(caused by internal or external factors) if a new ecosystem arises
|
110 |
+
it is likely to be more diverse, having started with a greater ‘pool’
|
111 |
+
of species, and therefore also more stable. Sequential selection with
|
112 |
+
memory describes a sequence of distinct stable states that tends to
|
113 |
+
get ‘more Gaian’ over time.
|
114 |
+
This mechanism was originally proposed in the framework of the
|
115 |
+
Tangled Nature Model (TNM) Christensen et al. (2002). Originally
|
116 |
+
designed to study co-evolution, we demonstrated in Arthur & Nichol-
|
117 |
+
son (2017) that the TNM is closely related to the generalised Lotka-
|
118 |
+
Volterra model. The TNM is based on the idea that the growth rate
|
119 |
+
of a species is given by a fitness function that depends on the other
|
120 |
+
species present. Any model making this assumption will look like the
|
121 |
+
TNM close to equilibrium Arthur & Nicholson (2022). By studying
|
122 |
+
the model with agent based dynamics we can incorporate mutation,
|
123 |
+
giving us a very flexible, robust and general model of evolutionary
|
124 |
+
ecology. Since the TNM is quite general, conclusions drawn in this
|
125 |
+
framework are likely to have general applicability.
|
126 |
+
Artificial life modelling has been used extensively to study Gaia.
|
127 |
+
The original Daisy World Watson & Lovelock (1983) led to a large
|
128 |
+
number of variants Wood et al. (2008) and there are a variety of
|
129 |
+
other models such as the Guild Model Downing & Zvirinsky (1999),
|
130 |
+
Greenhouse World Worden (2010), Flask Model Williams & Lenton
|
131 |
+
(2007) and Exo-Gaia Nicholson et al. (2018) to name a few. We have
|
132 |
+
previously discussed Gaian models based on the TNM in Arthur &
|
133 |
+
Nicholson (2017, 2022). Here we propose a new variant on the TNM
|
134 |
+
that is more similar to other Gaian models with a very simple abiotic
|
135 |
+
(non-living) component.
|
136 |
+
While previous Gaian models have included mutation (such as
|
137 |
+
the Flask model and ExoGaia) the complexity of the biosphere in
|
138 |
+
these models has been limited and different species within the mod-
|
139 |
+
els only impact one another via the shared environment, e.g. via
|
140 |
+
1 See Landi et al. (2018) for a thorough discussion of the relationship be-
|
141 |
+
tween ecosystem complexity and stability, though most of these models don’t
|
142 |
+
consider coupling to the external environment
|
143 |
+
resource competition or via global parameters such as temperature.
|
144 |
+
When we look at life on Earth it is clear that different species can
|
145 |
+
have a large impact on each other beyond resource competition or
|
146 |
+
changing global parameters like temperature. For example, there are
|
147 |
+
complex interactions between worms, plants and soil that change the
|
148 |
+
structure, chemistry, water retention and other properties of soil for
|
149 |
+
the benefit of many species Le Bayon et al. (2021). These kinds of
|
150 |
+
symbiotic (and also antagonistic) interactions are usually missing in
|
151 |
+
Gaian models. We also observe that throughout Earth history there
|
152 |
+
have been dramatic and spontaneous changes in the diversity and
|
153 |
+
complexity of the biosphere, e.g. the Great Oxidation Event which
|
154 |
+
allowed for aerobic respiration to become an important energy source
|
155 |
+
for life Ligrone (2019). These types of events, crucial for the selec-
|
156 |
+
tion mechanism discussed above, are absent in other Gaian models.
|
157 |
+
In contrast, TNM species interact directly through antagonistic or
|
158 |
+
symbiotic inter-species couplings, the population varies consider-
|
159 |
+
ably due to spontaneously occurring ‘quakes’ and there is no rigid
|
160 |
+
upper bound on the population. Thus by combining elements of the
|
161 |
+
TNM with elements of earlier Gaian models we can explore how Ga-
|
162 |
+
ian regulation emerges within a system that allows for more complex
|
163 |
+
ecosystem dynamics.
|
164 |
+
With this model we hope to show that the arguments for Entropic
|
165 |
+
Gaia are robust by demonstrating how they work in a setting where
|
166 |
+
life needs to interact with and regulate an external environment. At the
|
167 |
+
same time we will explore how Gaia can inform the search for life in
|
168 |
+
the universe, in particular how Gaia predicts a larger ‘habitable-zone’.
|
169 |
+
In section 2 we describe the model and how we add temperature,
|
170 |
+
which is a combination of abiotic and biotic components. In section 3
|
171 |
+
we study the model at constant background temperature to understand
|
172 |
+
how temperature is regulated and interacts with the spontaneous
|
173 |
+
‘quakes’ that occur in the TNM. Section 4 discusses the changes to
|
174 |
+
the habitable-zone in the presence of life and section 5 studies how
|
175 |
+
life adapts to deteriorating abiotic conditions. Finally we conclude in
|
176 |
+
section 6.
|
177 |
+
2 MODEL DESCRIPTION
|
178 |
+
2.1 The Tangled Nature Model
|
179 |
+
We start, as in Arthur & Nicholson (2022), with the generalised
|
180 |
+
Lotka-Volterra model
|
181 |
+
𝑑𝑁𝑖
|
182 |
+
𝑑𝑡 = 𝑁𝑖 𝑓𝑖(�𝑛, 𝑁)
|
183 |
+
(1)
|
184 |
+
𝑁𝑖 is the population of species 𝑖, 𝑁 is the total population and 𝑛𝑖 =
|
185 |
+
𝑁𝑖
|
186 |
+
𝑁 . 𝑓𝑖 is a fitness function that depends on the type and abundance
|
187 |
+
of the other species present through �𝑛 = (𝑛1, 𝑛2, . . . , 𝑛𝐷) and 𝑁. We
|
188 |
+
can expand 𝑓𝑖 to linear order around the equilibrium at 𝑁 = 0
|
189 |
+
𝑑𝑁𝑖
|
190 |
+
𝑑𝑡 = 𝑁𝑖
|
191 |
+
�
|
192 |
+
𝑓𝑖(�0, 0) +
|
193 |
+
∑︁
|
194 |
+
𝑗
|
195 |
+
𝑑𝑓𝑖
|
196 |
+
𝑑𝑛 𝑗
|
197 |
+
(�0, 0)𝑛 𝑗 + 𝑑𝑓𝑖
|
198 |
+
𝑑𝑁 (�0, 0)𝑁 . . .
|
199 |
+
�
|
200 |
+
(2)
|
201 |
+
The summations here and for the rest of this paper are over all extant
|
202 |
+
species. The three terms on the right hand side are the basic TNM
|
203 |
+
variables.
|
204 |
+
• 𝑟𝑖 ≡ 𝑓𝑖(�0, 0) is the growth rate of species 𝑖 in the absence of any
|
205 |
+
other species. We set this to zero, meaning that one species’ growth
|
206 |
+
depends entirely on the other species present. We could add some
|
207 |
+
species with non-zero growth rates to represent primary producers
|
208 |
+
but for simplicity and consistency with the rest of the TNM literature
|
209 |
+
every species has 𝑟𝑖 = 0.
|
210 |
+
MNRAS 000, 1–18 (222)
|
211 |
+
|
212 |
+
A Gaian Habitable Zone
|
213 |
+
3
|
214 |
+
• 𝐽𝑖 𝑗 ≡ 𝑑 𝑓𝑖
|
215 |
+
𝑑𝑛𝑗 (�0, 0) is the inter-species coupling matrix where 𝐽𝑖 𝑗
|
216 |
+
is the effect of species 𝑗 on species 𝑖. As usual Christensen et al.
|
217 |
+
(2002), we set the elements randomly from a symmetric distribution.
|
218 |
+
Here each element 𝐽𝑖 𝑗 is randomly chosen from a standard normal
|
219 |
+
product distribution times 𝑐 = 100. The exact functional form of the
|
220 |
+
distribution is not important, only that it has infinite support Arthur
|
221 |
+
et al. (2017).
|
222 |
+
• −𝜇 ≡
|
223 |
+
𝑑 𝑓𝑖
|
224 |
+
𝑑𝑁 (�0, 0) is the inverse carrying capacity, controlling
|
225 |
+
how much of the global ‘resource’ is consumed by each individual.
|
226 |
+
The growth equation now looks like
|
227 |
+
𝑑𝑁𝑖
|
228 |
+
𝑑𝑡 = 𝑁𝑖 ��
|
229 |
+
�
|
230 |
+
∑︁
|
231 |
+
𝑗
|
232 |
+
𝐽𝑖 𝑗𝑛 𝑗 − 𝜇𝑁��
|
233 |
+
�
|
234 |
+
= 𝑁𝑖 𝑓 𝑇 𝑁 𝑀
|
235 |
+
𝑖
|
236 |
+
(3)
|
237 |
+
In Arthur & Nicholson (2022) we added higher order terms to
|
238 |
+
the fitness function and argued that these could be interpreted as
|
239 |
+
species-environment interactions, since their net effect was to modify
|
240 |
+
the 𝜇 term to create an “effective” carrying capacity. This kind of
|
241 |
+
‘endogenous’ environment (e.g. roughly analogous to atmospheric
|
242 |
+
composition or oceanic pH) is in contrast to most Gaian models
|
243 |
+
which represent the environment through one or more ‘exogenous’
|
244 |
+
parameters, which the model agents aim to regulate. Daisyworld is the
|
245 |
+
paradigmatic example, where black and white daisies spontaneously
|
246 |
+
regulate a rising global temperature. We want to study this type of
|
247 |
+
regulation in the TNM framework and only deal with an abiotic
|
248 |
+
environment so, for simplicity, we do not include the higher order
|
249 |
+
terms.
|
250 |
+
While this is a common approach in Gaian modelling it is worth
|
251 |
+
some consideration. It was shown in Arthur & Nicholson (2022) and
|
252 |
+
Arthur & Nicholson (2017) that selection in the TNM tends to pro-
|
253 |
+
duce beneficial endogenous/biotic environments. If we included both
|
254 |
+
an abiotic and a biotic environment, TNM agents would be subject to
|
255 |
+
more selective pressure i.e. they would need to avoid degrading the
|
256 |
+
external parameters (temperature) and internal parameters (∼ pH). In
|
257 |
+
Arthur & Nicholson (2017) it was noted that environmental selection
|
258 |
+
isrelatively weak, because whennewspecies occurtheystart with low
|
259 |
+
populations and therefore minimal impact on the environment. This
|
260 |
+
must also be the case for an abiotic environment. Ultimately the rela-
|
261 |
+
tive weighting of each in the fitness function would determine which
|
262 |
+
environmental parameters are most ‘optimised’. Studying these ef-
|
263 |
+
fects is interesting but we leave it for future work, focusing here on
|
264 |
+
understanding the model with a purely exogenous environment.
|
265 |
+
2.2 Adding Temperature
|
266 |
+
To add temperature to the TNM we let the global temperature 𝑇 be
|
267 |
+
the sum of abiotic and biotic components:
|
268 |
+
𝑇 = 𝑇0 + 𝑇𝑙𝑖 𝑓 𝑒
|
269 |
+
(4)
|
270 |
+
𝑇0 is the temperature in the absence of life and𝑇𝑙𝑖 𝑓 𝑒 is the effect of the
|
271 |
+
extant species in the model on the temperature. Every individual of
|
272 |
+
species𝑖 has an effect, 𝐻𝑖, on the global temperature. The values of 𝐻𝑖
|
273 |
+
will be selected from a normal distribution with mean 0 and standard
|
274 |
+
deviation 𝜎𝐻 , so species are equally likely to have a warming or
|
275 |
+
cooling effect. The total effect of life on the temperature is
|
276 |
+
∑︁
|
277 |
+
𝑖
|
278 |
+
𝐻𝑖𝑁𝑖
|
279 |
+
(5)
|
280 |
+
We describe how �
|
281 |
+
𝑖 𝐻𝑖𝑁𝑖 is related to 𝑇𝑙𝑖 𝑓 𝑒 in the next section.
|
282 |
+
We make the reproduction rate depend on the temperature by
|
283 |
+
modifying the fitness function to
|
284 |
+
𝑓 𝑇 𝑁 𝑀
|
285 |
+
𝑖
|
286 |
+
(𝑇) =
|
287 |
+
∑︁
|
288 |
+
𝑗
|
289 |
+
𝐽𝑖 𝑗
|
290 |
+
1 +
|
291 |
+
�𝑇 −𝑇𝑃
|
292 |
+
𝜏
|
293 |
+
�2 𝑛 𝑗 − 𝜇𝑁
|
294 |
+
(6)
|
295 |
+
𝑇𝑃 is the preferred temperature and 𝜏 is a tolerance parameter. The
|
296 |
+
functional form is chosen so that at temperatures, 𝑇, far from 𝑇𝑃
|
297 |
+
the interaction strength is reduced, for example at 𝑇 = 𝑇𝑃 + 𝜏 the
|
298 |
+
inter-species interaction strength is halved. The functional form
|
299 |
+
1
|
300 |
+
1+𝑥2
|
301 |
+
is chosen for simplicity, any function that applies a smooth and
|
302 |
+
symmetric temperature ‘window’ would work. We have chosen 𝑇𝑃
|
303 |
+
and 𝜏 to be constant for all species and interactions. We could, for
|
304 |
+
example, make the width different for every inter-species interaction:
|
305 |
+
𝜏 → 𝜏𝑖 𝑗 and similarly for 𝑇𝑃. In the interest of keeping this work
|
306 |
+
relatively brief and in line with other work such as the original
|
307 |
+
Daisyworld model Watson & Lovelock (1983), Flask model Williams
|
308 |
+
& Lenton (2007) and ExoGaia Nicholson et al. (2018), we use a
|
309 |
+
constant 𝑇𝑃. By keeping 𝑇𝑃 constant for all species we can focus on
|
310 |
+
and highlight life’s impact on its environment. If 𝑇𝑃 is kept constant,
|
311 |
+
then any improvement to a “planet’s” survival rate when including
|
312 |
+
life-environment interaction can only come from life improving its
|
313 |
+
environment rather than life simply adapting to it. As this is the part
|
314 |
+
of Gaia theory that is less well accepted Kirchner (2003) it makes
|
315 |
+
sense to explore scenarios where this effect isn’t potentially obscured
|
316 |
+
by species adaptation.
|
317 |
+
2.3 Running the Model
|
318 |
+
We solve the growth equation using agent based dynamics. This
|
319 |
+
means that we generate individual agents whose reproduction rate
|
320 |
+
is controlled by the fitness function 𝑓 𝑇 𝑁 𝑀
|
321 |
+
𝑖
|
322 |
+
(𝑇). Each agent is an
|
323 |
+
individual of some species𝑖 and each agent’s reproduction probability
|
324 |
+
is given by
|
325 |
+
𝑝𝑜 𝑓 𝑓
|
326 |
+
𝑖
|
327 |
+
=
|
328 |
+
1
|
329 |
+
1 + 𝑒− 𝑓 𝑇 𝑁 𝑀
|
330 |
+
𝑖
|
331 |
+
(𝑇 )
|
332 |
+
(7)
|
333 |
+
The basic dynamics of the model are then (see also Arthur et al.
|
334 |
+
(2017)):
|
335 |
+
(i) Choose an individual and, with probability 𝑝𝑜 𝑓 𝑓
|
336 |
+
𝑖
|
337 |
+
, make a
|
338 |
+
copy of that individual. The copying step is meant to mimic asexual
|
339 |
+
reproduction. We take the 𝐿 = 20 bit binary representation of the
|
340 |
+
species-index 𝑖 and copy one bit at a time, with a probability 𝑝𝑚𝑢𝑡 =
|
341 |
+
0.01 to flip a bit during each copy operation.
|
342 |
+
(ii) Chose a random individual and kill it with probability 𝑝𝑘𝑖𝑙𝑙 =
|
343 |
+
0.1
|
344 |
+
𝐿 is the genome length, where the value of 20 is standard Christensen
|
345 |
+
et al. (2002), meaning that the model can generate 2𝐿 ∼ 106 unique
|
346 |
+
species. A ‘generation’ is the time required to iterate over the basic
|
347 |
+
reproduction/death loop above 𝑁/𝑝𝑘𝑖𝑙𝑙 times, where this number
|
348 |
+
is recalculated at the end of each generation. This means in each
|
349 |
+
generation every individual has had a chance to be selected once
|
350 |
+
on average for a birth/death process. To update the temperature we
|
351 |
+
perform the following steps after every generation
|
352 |
+
• If required, update the abiotic temperature 𝑇0 (see Section 5).
|
353 |
+
• Update 𝑇𝑙𝑖 𝑓 𝑒 using
|
354 |
+
𝑇𝑙𝑖 𝑓 𝑒(𝑡) = 𝜆𝑇𝑙𝑖 𝑓 𝑒(𝑡 − 1) + (1 − 𝜆)
|
355 |
+
∑︁
|
356 |
+
𝑖
|
357 |
+
𝐻𝑖𝑁𝑖
|
358 |
+
(8)
|
359 |
+
• Set 𝑇 = 𝑇0 + 𝑇𝑙𝑖 𝑓 𝑒
|
360 |
+
MNRAS 000, 1–18 (222)
|
361 |
+
|
362 |
+
4
|
363 |
+
Arthur & Nicholson
|
364 |
+
Variable
|
365 |
+
Symbol
|
366 |
+
Value
|
367 |
+
Inverse carrying capacity
|
368 |
+
𝜇
|
369 |
+
0.1
|
370 |
+
Mutation rate
|
371 |
+
𝑝𝑚𝑢𝑡
|
372 |
+
0.01
|
373 |
+
Death rate
|
374 |
+
𝑝𝑘𝑖𝑙𝑙
|
375 |
+
0.1
|
376 |
+
Lag parameter
|
377 |
+
𝜆
|
378 |
+
0.9
|
379 |
+
Preferred temperature
|
380 |
+
𝑇𝑃
|
381 |
+
100
|
382 |
+
Temperature tolerance
|
383 |
+
𝜏
|
384 |
+
2
|
385 |
+
Temperature effect
|
386 |
+
𝜎𝐻
|
387 |
+
0.05
|
388 |
+
Table 1. A list of all the key parameters in the model and the values we
|
389 |
+
choose. The model has a large parameter space and the parameters are set
|
390 |
+
to convenient values used in previous work on the TNM. The qualitative
|
391 |
+
behaviour of the model is very robust to variations in these parameter values
|
392 |
+
Christensen et al. (2002); Arthur et al. (2017).
|
393 |
+
Here 𝑡 is the generation number, the timescale in this model and 𝜆
|
394 |
+
is a lag-parameter that stops the temperature from changing instan-
|
395 |
+
taneously. This mimics the real behaviour of the Earth-system, e.g.
|
396 |
+
climate models have demonstrated a lag in the response of surface
|
397 |
+
temperatures over the ocean due to changes in atmospheric 𝐶𝑂2
|
398 |
+
Boucher et al. (2012). The model is initialised with 500 individuals
|
399 |
+
of a randomly chosen species and all averages are taken over 1000
|
400 |
+
model runs using different random seeds.
|
401 |
+
3 CONSTANT TEMPERATURE EXPERIMENTS
|
402 |
+
First we run the model with constant𝑇0. Figure 1 shows the behaviour
|
403 |
+
of the population and temperature in one ‘run’ of the model for
|
404 |
+
104 generations. The basic features of the standard TNM - quasi-
|
405 |
+
stable states punctuated by sharp transitions - persist Christensen
|
406 |
+
et al. (2002). The important features of ‘core’ and ‘cloud’ Becker
|
407 |
+
& Sibani (2014) are are retained as can be seen in Figure 2. The
|
408 |
+
core species are the only ones with significant population and these
|
409 |
+
are the primary drivers of the temperature. The cloud species are
|
410 |
+
mutants with small populations and random positive and negative
|
411 |
+
effects on the temperature. These two runs show that life can move
|
412 |
+
the temperature away from 𝑇𝑃 or towards it, the question is what
|
413 |
+
happens on average, in the long run.
|
414 |
+
Figures 3 (a) and (b) show the average population and average
|
415 |
+
temperature for 𝑇0 = 100 = 𝑇𝑃 and 𝑇0 = 105 = 𝑇𝑃 + 2.5𝜏 respec-
|
416 |
+
tively. For (a) 𝑇0 = 𝑇𝑃 and the temperature fluctuates close to the
|
417 |
+
abiotic temperature while the population increases logarithmically.
|
418 |
+
This behaviour, increasing population with constant temperature, in-
|
419 |
+
dicates that the TNM agents are optimising their mutual interactions,
|
420 |
+
�
|
421 |
+
𝑗 𝐽𝑖 𝑗 𝑁 𝑗, as in the standard model, while keeping the temperature
|
422 |
+
close to 𝑇𝑃. In (b) where 𝑇0 > 𝑇𝑃 we see that the population in-
|
423 |
+
creases while the temperature decreases. Thus the TNM agents are,
|
424 |
+
on average, simultaneously optimising their mutual interactions while
|
425 |
+
improving the temperature.
|
426 |
+
In Arthur & Nicholson (2022) we discussed Selection by Survival
|
427 |
+
(SBS) and Sequential Selection with memory (SSM). SBS is just dif-
|
428 |
+
ferential survival i.e. at late times we see systems with Gaian features
|
429 |
+
because those are the only ones that could survive that long. SBS is a
|
430 |
+
good null model, here it would predict that the average temperature
|
431 |
+
tends towards 𝑇𝑃 because runs that don’t maintain 𝑇𝑃 go extinct,
|
432 |
+
leaving a small number of surviving runs that happen to operate at
|
433 |
+
𝑇𝑃. SSM would predict that the punctuations during individual runs
|
434 |
+
drive the average temperature towards 𝑇𝑃. The numbers in the top
|
435 |
+
row of Figure 3 (a) and (b) show the proportion of runs which survive
|
436 |
+
up to that point in the experiment. In (b) for example, at 𝑇0 > 𝑇𝑃
|
437 |
+
about 9% of the runs have gone completely extinct (𝑁 = 0) by 105
|
438 |
+
generations compared to 3% when 𝑇0 = 𝑇𝑃. This is a relatively small
|
439 |
+
increase in extinction rate compared to the relatively large decrease
|
440 |
+
in the scaling factor 1/
|
441 |
+
�
|
442 |
+
1 +
|
443 |
+
�𝑇0−𝑇𝑃
|
444 |
+
𝜏
|
445 |
+
�2�
|
446 |
+
≃ 0.14.
|
447 |
+
Figure 4 shows the model runs in more detail for 𝑇0 = 105 > 𝑇𝑃.
|
448 |
+
(a), (b) and (c) demonstrate that the runs can be split into two types:
|
449 |
+
low temperature, cooling core; and high temperature, heating core.
|
450 |
+
We will loosely call these ‘Gaian’ and ‘non-Gaian’ respectively. (d)
|
451 |
+
is the crucial plot. It shows the proportion of surviving runs over
|
452 |
+
time (dashed line) and the proportion of the surviving runs that have
|
453 |
+
𝑇 ≤ 𝑇𝑃. Here we see that while some runs do go extinct (SBS)
|
454 |
+
in the surviving runs the proportion of Gaian states increases. This
|
455 |
+
means that non-Gaian states transition to Gaian states, leading to
|
456 |
+
more of them over time. This is exactly as sequential selection with
|
457 |
+
memory predicts: (non-terminal) resets tend, on average, to improve
|
458 |
+
conditions for life. We will discuss the exact mechanism in detail
|
459 |
+
below.
|
460 |
+
This mechanism, Sequential Selection with Memory (SSM) was
|
461 |
+
discussed in Arthur & Nicholson (2022) and briefly in Secion 1.
|
462 |
+
Each model run consists of multiple quasi-equilibria interrupted by
|
463 |
+
quakes (Figure 1). These quakes completely reset the species which
|
464 |
+
make up the core. These core species are (by definition) the ones
|
465 |
+
with large populations which control the model dynamics, in this
|
466 |
+
case the total population and temperature. As has been discussed
|
467 |
+
in the TNM literature (especially Becker & Sibani (2014)), quakes
|
468 |
+
occur spontaneously due to the evolution of a ‘parasite’ that disrupts
|
469 |
+
the core. A parasite, 𝑎, is any species with significant reproduction
|
470 |
+
probability that isn’t a member of the core. To have a large probability
|
471 |
+
to reproduce, the sum of its interactions must be high enough that
|
472 |
+
its reproduction rate is higher than its death rate. Solving for fitness
|
473 |
+
gives:
|
474 |
+
∑︁
|
475 |
+
𝑗
|
476 |
+
𝐽𝑎 𝑗𝑛 𝑗
|
477 |
+
1 +
|
478 |
+
�𝑇 −𝑇𝑃
|
479 |
+
𝜏
|
480 |
+
�2 ≥ 𝜇𝑁 +
|
481 |
+
�
|
482 |
+
1 − 1
|
483 |
+
𝑝𝑘
|
484 |
+
�
|
485 |
+
(9)
|
486 |
+
Lower total population makes it easier for a parasite to occur by
|
487 |
+
decreasing the 𝜇𝑁 term. Low total population can occur either due
|
488 |
+
to weak inter-species interactions in the core or unfavourable tem-
|
489 |
+
peratures. However because of the smaller number of reproduction
|
490 |
+
events at low 𝑁, fewer mutants are generated. On the other hand high
|
491 |
+
populations raise the barrier and increase the number of mutation
|
492 |
+
events.
|
493 |
+
Crossing the barrier requires finding a mutant 𝑎 with sufficiently
|
494 |
+
large, positive interactions with some or all species in the core.
|
495 |
+
Large values of 𝐽𝑎 𝑗 are rare (for our choice of distribution, expo-
|
496 |
+
nentially so) and the rate of generating new mutants is low. Con-
|
497 |
+
sidering each reproduction event as 𝐿 = 20 Bernoulli trials, the
|
498 |
+
expected number of mutations in a reproduction is given by a Bino-
|
499 |
+
mial distribution 𝐵(𝐿, 𝑝𝑚𝑢𝑡) with mean 𝐿𝑝𝑚𝑢𝑡 = 0.2 and variance
|
500 |
+
𝐿𝑝𝑚𝑢𝑡 (1 − 𝑝𝑚𝑢𝑡) ≃ 0.2. Thus the rate of exploration of the genetic
|
501 |
+
space is quite slow. Ultimately the barrier height is more important
|
502 |
+
than the increased rate of reproduction and is what explains the trend
|
503 |
+
of (slowly) increasing population and stability in the TNM. For much
|
504 |
+
more on this see Becker & Sibani (2014).
|
505 |
+
Here we have to analyse how the temperature interacts with this
|
506 |
+
mechanism. Assume we have a case where 𝑇0 > 𝑇𝑃 as in Figure
|
507 |
+
4. Temperatures far from 𝑇𝑃 make a quake more likely by reducing
|
508 |
+
the total population and hence the barrier height. When a quake
|
509 |
+
occurs a new core is selected on the basis of strong inter-species
|
510 |
+
interactions that allow it to quickly ‘use up’ the carrying capacity.
|
511 |
+
This new core has an equal chance to be warming or cooling, because
|
512 |
+
of the symmetry of 𝐻𝑖. If it is warming we stay in a non-Gaian state,
|
513 |
+
if not we move to a Gaian state. In a Gaian state the barrier can
|
514 |
+
MNRAS 000, 1–18 (222)
|
515 |
+
|
516 |
+
A Gaian Habitable Zone
|
517 |
+
5
|
518 |
+
Figure 1. The column (a) shows the population (top row) and temperature (bottom row) where the background temperature is 𝑇0 = 𝑇𝑃 = 100. Column (b) shows
|
519 |
+
the population and temperature where 𝑇0 = 105. The temperature in (a) is above 𝑇0 and 𝑇𝑃 while the temperature in (b) is below both 𝑇0 and 𝑇𝑃.
|
520 |
+
be significantly higher, leading to a much more stable, long lived
|
521 |
+
core. In a non-Gaian state the barrier is low, meaning the state will
|
522 |
+
be relatively short lived, being vulnerable to parasites and to large
|
523 |
+
population fluctuations which may result in total extinction. As shown
|
524 |
+
in Figure 4 (d) over time this leads to more and more model runs in
|
525 |
+
a Gaian state.
|
526 |
+
To summarise: both mechanisms, SBS and SSM operate. Ga-
|
527 |
+
ian states have temperatures close to 𝑇𝑃, and thus high populations
|
528 |
+
which, in this model, makes them more stable. Non-Gaian states are
|
529 |
+
far from 𝑇𝑃 and have low populations. This makes them vulnerable
|
530 |
+
to total extinction (SBS) and punctuation which can take a non-Gaian
|
531 |
+
to a Gaian state (SSM). In this model, for this particular temperature,
|
532 |
+
SSM is a more important mechanism than SBS, though the ratio can
|
533 |
+
vary with 𝑇0, as we will explore in the next section.
|
534 |
+
These ideas can help explain why the Earth today is in a habit-
|
535 |
+
able state. Since its conception the Gaia hypothesis has been defined
|
536 |
+
in numerous ways and ranging from a strong hypothesis that self-
|
537 |
+
regulating feedback loops are an expected property of a life-planet
|
538 |
+
coupled system, known as ‘probable Gaia’ Lenton & Wilkinson
|
539 |
+
(2003), to a weaker hypotheses that suggests that while the Earth
|
540 |
+
MNRAS 000, 1–18 (222)
|
541 |
+
|
542 |
+
(a)
|
543 |
+
(b)
|
544 |
+
1400
|
545 |
+
1000
|
546 |
+
1200
|
547 |
+
800-
|
548 |
+
1000
|
549 |
+
800
|
550 |
+
600
|
551 |
+
N
|
552 |
+
N
|
553 |
+
600-
|
554 |
+
400
|
555 |
+
400-
|
556 |
+
200
|
557 |
+
200
|
558 |
+
0
|
559 |
+
0
|
560 |
+
2000
|
561 |
+
4000
|
562 |
+
6000
|
563 |
+
8000
|
564 |
+
10000
|
565 |
+
0
|
566 |
+
2000
|
567 |
+
4000
|
568 |
+
6000
|
569 |
+
8000
|
570 |
+
10000
|
571 |
+
t (generations)
|
572 |
+
t (generations)
|
573 |
+
108
|
574 |
+
To
|
575 |
+
108
|
576 |
+
Tp
|
577 |
+
Tp
|
578 |
+
106
|
579 |
+
106
|
580 |
+
104
|
581 |
+
104
|
582 |
+
Temperature
|
583 |
+
102
|
584 |
+
102
|
585 |
+
100
|
586 |
+
100
|
587 |
+
98
|
588 |
+
98 -
|
589 |
+
96
|
590 |
+
96
|
591 |
+
94
|
592 |
+
94
|
593 |
+
0
|
594 |
+
2000
|
595 |
+
4000
|
596 |
+
6000
|
597 |
+
8000
|
598 |
+
10000
|
599 |
+
0
|
600 |
+
2000
|
601 |
+
4000
|
602 |
+
6000
|
603 |
+
8000
|
604 |
+
10000
|
605 |
+
t (generations)
|
606 |
+
t (generations)6
|
607 |
+
Arthur & Nicholson
|
608 |
+
Figure 2. Model snapshot at 𝑡 = 9000 generations for the runs (a) and (b) from Figure 1. Each node represents a different species, with the size of the node an
|
609 |
+
indication of species’ population (upper and lower limits are applied to the point sizes for clarity). The colour of the nodes indicates the heating or cooling effect,
|
610 |
+
𝐻𝑖. The width of the arrows indicates the interaction strength 𝐽𝑖 𝑗𝑛𝑗. Only interactions with core species are shown. In (a) the red (bottom-right) core species
|
611 |
+
has a strong enough heating effect to overwhelm the cooling effect of the other core species, so this configuration has a net heating effect, as seen in Figure 1(a).
|
612 |
+
In (b) both core species have a (weak) cooling effect, reducing the temperature, as seen in Figure 1(b).
|
613 |
+
does have self-regulating feedback loops, these emerged merely by
|
614 |
+
chance and that Gaia is not an expected feature of a planet hosting
|
615 |
+
life, known as ‘lucky Gaia’ Watson (2004). As Figure 5 shows, in our
|
616 |
+
model the fraction of Gaian states is increasing over time. This sug-
|
617 |
+
gests that for early life starting out on a planet, a large amount of luck
|
618 |
+
might be needed to initially start off in a Gaian state, but for surviv-
|
619 |
+
ing runs over time the probability of being in a Gaian state increases.
|
620 |
+
This would suggest that when observing a biosphere ‘lucky Gaia’
|
621 |
+
may be the case for young planets but ‘probable Gaia’ is operating
|
622 |
+
for older ones.
|
623 |
+
The experiments in Figure 5 have considered systems with only
|
624 |
+
internal perturbations, that is, those generated by the biosphere. How-
|
625 |
+
ever, real planets experience many abiotic perturbations, both rapid
|
626 |
+
and slower, such as changes in volcanic activity, changes in solar
|
627 |
+
luminosity or impacts by large objects Covey et al. (1994); Overpeck
|
628 |
+
& Cole (2006); Goldblatt & Zahnle (2011). Life is thought to have
|
629 |
+
emerged early on Earth during a time when debris left over from
|
630 |
+
the formation of the solar system was frequently colliding with the
|
631 |
+
Earth. Biospheres in a non-Gaian state will be more susceptible than
|
632 |
+
Gaian biospheres to perturbations and will have a higher risk of going
|
633 |
+
extinct. This is closely related to the ‘Gaian bottleneck’ hypothesis
|
634 |
+
Chopra & Lineweaver (2016) that proposes that early on in a planet’s
|
635 |
+
history, if life emerges it must quickly establish self-regulatory feed-
|
636 |
+
back loops to stabilise the climate of its planet in order to persist.
|
637 |
+
If the biosphere fails then life goes extinct, the planet’s abiotic pro-
|
638 |
+
cesses take over and the planet reverts to an inhospitable state. What
|
639 |
+
is novel here is the idea that apart from total extinction, a planet can
|
640 |
+
have a ‘near death experience’ where a mass extinction clears out a
|
641 |
+
large fraction of the extant species. These mass extinctions are cru-
|
642 |
+
cial for the exploration of the space of possible ecosystems Arthur
|
643 |
+
& Sibani (2017) and ultimately lead to the emergence of long-lived
|
644 |
+
stable states. Population diversity is known to significantly increase
|
645 |
+
the resilience of ecosystems to perturbations Peterson et al. (1998);
|
646 |
+
Luck et al. (2003), and additionally yeast Guan et al. (2012) and
|
647 |
+
bacteria Lambert & Kussell (2014) have been shown to develop in-
|
648 |
+
creased resilience to environmental stressors if exposed to them in
|
649 |
+
the past. It is possible that large perturbations that do not eliminate
|
650 |
+
all life are actually beneficial for evolving Gaia. Indeed, there may
|
651 |
+
be evidence of this in Earth history, as it is thought that a period of
|
652 |
+
global glaciation may have triggered the evolution of multi-cellular
|
653 |
+
life Hoffman et al. (1998); Hedges (2004); Vincent et al. (2004);
|
654 |
+
Boyle et al. (2007).
|
655 |
+
4 HABITABLE ZONE EXPERIMENTS
|
656 |
+
The habitable zone around a star is defined as the distance from a star
|
657 |
+
where liquid water could exist on the surface of a planet Kasting et al.
|
658 |
+
(1993). Models demonstrate that the habitable zone is impacted by
|
659 |
+
several factors, including the age and class of the host star Ramirez &
|
660 |
+
Kaltenegger (2016), planetary mass Kopparapu et al. (2014), plane-
|
661 |
+
tary atmospheric composition Pierrehumbert & Gaidos (2011), and
|
662 |
+
the surface water content of the planet Abe et al. (2011). Additionally
|
663 |
+
a planet being within the habitable zone doesn’t guarantee habitabil-
|
664 |
+
ity, as a planet may have more than one possible climate state for
|
665 |
+
the same stellar and orbital parameters, e.g. a temperate Earth versus
|
666 |
+
a frozen Earth Goldblatt & Zahnle (2011). For a more extreme ex-
|
667 |
+
ample, it is thought that Venus and Earth might represent alternate
|
668 |
+
end states for the same planetary system, with small perturbations
|
669 |
+
occurring early on in their history influencing their modern day states
|
670 |
+
Lenardic et al. (2016).
|
671 |
+
Existing exoplanet surveys and models have identified that rocky
|
672 |
+
MNRAS 000, 1–18 (222)
|
673 |
+
|
674 |
+
(a)
|
675 |
+
(b)
|
676 |
+
0.04
|
677 |
+
0.02
|
678 |
+
0.00
|
679 |
+
-0.02
|
680 |
+
-0.04A Gaian Habitable Zone
|
681 |
+
7
|
682 |
+
Figure 3. (a) shows the average (over all surviving model runs) of the population (top row) and temperature (bottom row) where the background temperature
|
683 |
+
𝑇0 = 100 = 𝑇𝑃. Column (b) shows the population and temperature where 𝑇0 = 105. The numbers next to the vertical dashed lines in the top row are the
|
684 |
+
proportion of runs which have survived for that number of generations.
|
685 |
+
planets can exist at a range of distances from their host star Domagal-
|
686 |
+
Goldman et al. (2016). Thus, it is a natural question to ask about the
|
687 |
+
stability and persistence of Gaia across a range of background tem-
|
688 |
+
peratures, some more conducive to life, some less. In this section we
|
689 |
+
run many experiments where we vary the background temperature
|
690 |
+
𝑇0 and look at averages over 1000 model histories. To mimic the
|
691 |
+
idea of a habitable zone with and without biotic influence we com-
|
692 |
+
pare two versions of the model: one where life cannot influence the
|
693 |
+
temperature, 𝜎𝐻 = 0, and one where life can influence it 𝜎𝐻 = 0.05.
|
694 |
+
In Figure 5 we show the fraction of runs which survive for 105
|
695 |
+
generations in both scenarios. Perhaps surprisingly, the distributions
|
696 |
+
are roughly similar. As the background temperature changes, a similar
|
697 |
+
number of model runs survive for 105 generations whether life can
|
698 |
+
effect the environment or not. This shows, at least, that species-
|
699 |
+
environment interactions have little effect on the probability of total
|
700 |
+
extinction and therefore on the presence or absence of life. However,
|
701 |
+
as we saw in the previous section, the model runs can be split into
|
702 |
+
Gaian and non-Gaian states. Figure 5 also shows the proportion of
|
703 |
+
MNRAS 000, 1–18 (222)
|
704 |
+
|
705 |
+
(a)
|
706 |
+
(b)
|
707 |
+
400
|
708 |
+
400
|
709 |
+
350-
|
710 |
+
350
|
711 |
+
10.972
|
712 |
+
i0.909
|
713 |
+
300 -
|
714 |
+
300-
|
715 |
+
10.98
|
716 |
+
10.989
|
717 |
+
≥ 250
|
718 |
+
≥ 250
|
719 |
+
0.9
|
720 |
+
i0.992
|
721 |
+
10.981
|
722 |
+
200 -
|
723 |
+
10.99
|
724 |
+
150 -
|
725 |
+
150-
|
726 |
+
100
|
727 |
+
100
|
728 |
+
102
|
729 |
+
103
|
730 |
+
104
|
731 |
+
105
|
732 |
+
102
|
733 |
+
103
|
734 |
+
104
|
735 |
+
105
|
736 |
+
t (generations)
|
737 |
+
t (generations)
|
738 |
+
108
|
739 |
+
108
|
740 |
+
Tp
|
741 |
+
Tp
|
742 |
+
T
|
743 |
+
106
|
744 |
+
106
|
745 |
+
Temperature
|
746 |
+
104 -
|
747 |
+
Temperature
|
748 |
+
104
|
749 |
+
102
|
750 |
+
102
|
751 |
+
100
|
752 |
+
100
|
753 |
+
98
|
754 |
+
98
|
755 |
+
102
|
756 |
+
103
|
757 |
+
104
|
758 |
+
105
|
759 |
+
102
|
760 |
+
103
|
761 |
+
104
|
762 |
+
105
|
763 |
+
t (generations)
|
764 |
+
t (generations)8
|
765 |
+
Arthur & Nicholson
|
766 |
+
Figure 4. 𝑇0 = 105. (a) shows the temperature at 𝑡 = 105 generations versus population. Colour corresponds to the heating (red) or cooling (blue) effect of the
|
767 |
+
core. There are clearly two distinct clusters: one with (potentially) high population and low temperature and one with low population and high temperature. (b)
|
768 |
+
and (c) show histograms of the temperature and population respectively. (d) shows the proportion of surviving runs at each generation as well as the proportion
|
769 |
+
that have 𝑇 ≤ 𝑇𝑃.
|
770 |
+
runs that have 𝑁 > 200. The value of 200 is not itself significant,
|
771 |
+
what is important is the comparison between 𝜎𝐻 = 0 and 𝜎𝐻 = 0.05.
|
772 |
+
Far from 𝑇𝑃, only the Gaian states can have large populations, in the
|
773 |
+
other cases the total population is low and life is simply ‘clinging
|
774 |
+
on’. Importantly for exoplanet astronomy, a small pocket of life that
|
775 |
+
is clinging on to existence is unlikely to produce a detectable bio-
|
776 |
+
signature.
|
777 |
+
Figure 6 shows the population of the model runs as a function of
|
778 |
+
𝑇0. We see that when 𝜎𝐻 = 0 the total population at 𝑇𝑃 is larger.
|
779 |
+
At 𝜎𝐻 = 0 the TNM agents are only attempting to optimise inter-
|
780 |
+
species interactions, not interactions and temperature and thus can
|
781 |
+
find a better maxima. For example, strongly symbiotic cores may
|
782 |
+
have a detrimental effect on the temperature which is only relevant
|
783 |
+
in the 𝜎𝐻 = 0.05 case. However, the population falls rather rapidly
|
784 |
+
with 𝑇0 at 𝜎𝐻 = 0 compared to the 𝜎𝐻 = 0.05 case. We also see
|
785 |
+
(from the colour gradient) that at 𝜎𝐻 = 0.05, for 𝑇0 far from 𝑇𝑃 only
|
786 |
+
MNRAS 000, 1–18 (222)
|
787 |
+
|
788 |
+
(a)
|
789 |
+
(b)
|
790 |
+
110.0
|
791 |
+
110.0
|
792 |
+
107.5
|
793 |
+
107.5
|
794 |
+
105.0
|
795 |
+
105.0
|
796 |
+
102.5
|
797 |
+
102.5
|
798 |
+
100.0
|
799 |
+
100.0
|
800 |
+
97.5
|
801 |
+
97.5
|
802 |
+
95.0
|
803 |
+
95.0
|
804 |
+
92.5
|
805 |
+
92.5
|
806 |
+
0
|
807 |
+
200
|
808 |
+
400
|
809 |
+
600
|
810 |
+
800
|
811 |
+
1000
|
812 |
+
1200
|
813 |
+
1400
|
814 |
+
0.000
|
815 |
+
0.025
|
816 |
+
0.050
|
817 |
+
0.075
|
818 |
+
0.100
|
819 |
+
0.125
|
820 |
+
0.150
|
821 |
+
0.175
|
822 |
+
N
|
823 |
+
P(T)
|
824 |
+
(c)
|
825 |
+
(d)
|
826 |
+
0.0000
|
827 |
+
1.0
|
828 |
+
0.0005
|
829 |
+
0.0010
|
830 |
+
0.8 -
|
831 |
+
0.0015
|
832 |
+
0.6
|
833 |
+
0.0020
|
834 |
+
0.0025
|
835 |
+
0.4 -
|
836 |
+
0.0030
|
837 |
+
0.2
|
838 |
+
0.0035
|
839 |
+
Fraction Surviving
|
840 |
+
0.0040
|
841 |
+
0.0
|
842 |
+
Fractionofsurvivors<Tp
|
843 |
+
0
|
844 |
+
200
|
845 |
+
400
|
846 |
+
600
|
847 |
+
800
|
848 |
+
1000
|
849 |
+
1200
|
850 |
+
1400
|
851 |
+
102
|
852 |
+
103
|
853 |
+
104
|
854 |
+
105
|
855 |
+
N
|
856 |
+
t (generations)A Gaian Habitable Zone
|
857 |
+
9
|
858 |
+
Figure 5. We run 1000 experiments at a variety of different 𝑇0 from 80 to 120 in steps of 2. As usual 𝑇𝑃 = 100 and 𝜏 = 2 for 105 generations. The figure
|
859 |
+
shows proportion of surviving runs and the proportion of large population runs for a model with no temperature feedback 𝜎𝐻 = 0 and a model with feedback
|
860 |
+
𝜎𝐻 = 0.05. (Note the anomaly at 𝑇0 = 118 for 𝜎𝐻 = 0.05 is a consequence of very low statistics, of the 7/1000 surviving runs, 2 happen to have 𝑁 > 200).
|
861 |
+
those runs which heat or cool as appropriate are capable of having
|
862 |
+
large populations. Figure 7 demonstrates that the runs split into two
|
863 |
+
clusters, as also shown in Figure 4, which can be labelled by their
|
864 |
+
temperature, in combination with Figure 6 this demonstrates that
|
865 |
+
large population Gaian states may be observed when 𝑇0 is far from
|
866 |
+
𝑇𝑃.
|
867 |
+
This simple Gaian model would therefore predicts that if life plays
|
868 |
+
only a minimal role in shaping its planet and we were looking at an
|
869 |
+
abiotic habitable zone, that there would be a narrow range of radii
|
870 |
+
around the host star where we might expect a detectable biospheres.
|
871 |
+
Outside this narrow range the chance of finding an inhabited planet
|
872 |
+
drops dramatically. If however life does play a strong role in shaping
|
873 |
+
and regulating its host planet then we would expect to observe a much
|
874 |
+
larger habitable zone. In the centre of this zone where conditions are
|
875 |
+
‘ideal’ large population states, and so therefore potentially detectable
|
876 |
+
biospheres, will be most probable but as we move towards the edges
|
877 |
+
of the habitable zone the probability of detectable biospheres will
|
878 |
+
be much higher than an abiotic habitable zone would predict. Our
|
879 |
+
MNRAS 000, 1–18 (222)
|
880 |
+
|
881 |
+
OH= O
|
882 |
+
OH= 0.05
|
883 |
+
TemperatureWindow
|
884 |
+
TemperatureWindow
|
885 |
+
Proportionnotextinct
|
886 |
+
Proportionnotextinct
|
887 |
+
1.2
|
888 |
+
FractionofsurvivingrunswithN>2o0
|
889 |
+
1.2 -
|
890 |
+
FractionofsurvivingrunswithN>2o0
|
891 |
+
1.0 -
|
892 |
+
1.0-
|
893 |
+
0.8
|
894 |
+
0.8 -
|
895 |
+
0.6
|
896 |
+
0.6-
|
897 |
+
0.4 -
|
898 |
+
0.4
|
899 |
+
0.2
|
900 |
+
0.2
|
901 |
+
0.0 -
|
902 |
+
0.0 -
|
903 |
+
80
|
904 |
+
90
|
905 |
+
100
|
906 |
+
110
|
907 |
+
120
|
908 |
+
80
|
909 |
+
90
|
910 |
+
100
|
911 |
+
110
|
912 |
+
120
|
913 |
+
To
|
914 |
+
To10
|
915 |
+
Arthur & Nicholson
|
916 |
+
Figure 6. As Figure 5, 1000 experiments at different 𝑇0 values. The figure shows the population of each of the model runs. Some jitter in the x-direction is
|
917 |
+
applied to the points for clarity. The left hand shows the case where there is no species environment interaction 𝜎𝐻 = 0 and the right shows 𝜎𝐻 = 0.05 where
|
918 |
+
the colour of the points reflects a heating (red) or cooling (blue) core.
|
919 |
+
model suggests that there is a chance to detect biosignatures quite far
|
920 |
+
from the abiotic habitable zone, provided life can affect the global
|
921 |
+
temperature. Our model also predicts that looking at planets outside
|
922 |
+
the abiotic habitable zone will be more informative for testing ideas
|
923 |
+
of Gaia Theory, since within it we expect to see habitable planets
|
924 |
+
whether Gaia is operating or not. Our model also demonstrates that
|
925 |
+
finding a non-Gaian state within the biotic habitable zone is not
|
926 |
+
incompatible with Gaia theory. Where life can shape its planet there
|
927 |
+
remains the possibility for it to push its planet towards inhospitable
|
928 |
+
conditions.
|
929 |
+
5 INCREASING TEMPERATURE
|
930 |
+
Geological evidence on Earth suggests that life emerged on our planet
|
931 |
+
very soon after surface conditions allowed Nisbet & Sleep (2001)
|
932 |
+
implying that the probability for the emergence of life might be high
|
933 |
+
for planets with the correct prerequisites, however no alien life has yet
|
934 |
+
MNRAS 000, 1–18 (222)
|
935 |
+
|
936 |
+
1400
|
937 |
+
.
|
938 |
+
1400
|
939 |
+
0
|
940 |
+
.
|
941 |
+
.
|
942 |
+
..
|
943 |
+
·
|
944 |
+
.
|
945 |
+
.
|
946 |
+
1200
|
947 |
+
:
|
948 |
+
1200
|
949 |
+
!
|
950 |
+
:
|
951 |
+
:
|
952 |
+
:
|
953 |
+
:
|
954 |
+
1000
|
955 |
+
1000
|
956 |
+
0
|
957 |
+
800
|
958 |
+
800
|
959 |
+
N
|
960 |
+
N
|
961 |
+
600
|
962 |
+
.
|
963 |
+
0
|
964 |
+
600
|
965 |
+
400
|
966 |
+
400
|
967 |
+
200
|
968 |
+
200
|
969 |
+
9
|
970 |
+
.
|
971 |
+
.
|
972 |
+
0
|
973 |
+
0
|
974 |
+
80
|
975 |
+
90
|
976 |
+
100
|
977 |
+
110
|
978 |
+
120
|
979 |
+
80
|
980 |
+
90
|
981 |
+
100
|
982 |
+
110
|
983 |
+
120
|
984 |
+
To
|
985 |
+
ToA Gaian Habitable Zone
|
986 |
+
11
|
987 |
+
Figure 7. As Figure 5, showing the temperature in each of the model runs. The colour of the points reflects a heating (red) or cooling (blue) core. Only
|
988 |
+
𝜎𝐻 = 0.05 is shown, when 𝜎𝐻 = 0, 𝑇 = 𝑇0.
|
989 |
+
been detected. The Gaian bottleneck hypothesis suggests an answer to
|
990 |
+
this apparent contradiction and proposes that for newly emerged life
|
991 |
+
on a young planet, there is a small window of opportunity whereby
|
992 |
+
life can establish self-regulatory feedback loops to maintain habitable
|
993 |
+
conditions. If the biosphere succeeds, then planetary habitability can
|
994 |
+
be maintained for long time spans, however if the biosphere fails,
|
995 |
+
surface conditions on the planet will rapidly become inhospitable,
|
996 |
+
causing life to go extinct. This hypothesis is closely tied to ideas of an
|
997 |
+
inhabitance paradox Goldblatt (2016) - that the long term habitability
|
998 |
+
of a planet depends directly on whether or not it is inhabited. In this
|
999 |
+
section we investigate aspects of the inhabitance paradox in the TNM
|
1000 |
+
setting.
|
1001 |
+
The classic Daisyworld experiment studies temperature regulation
|
1002 |
+
by life in the face of increasing solar luminosity. We can perform
|
1003 |
+
a similar experiment by increasing 𝑇0 over the course of the model
|
1004 |
+
runs. Figure 8 shows population and temperature for individual model
|
1005 |
+
runs where the background temperature, 𝑇0, increases linearly from
|
1006 |
+
𝑇𝑖𝑛𝑖𝑡 = 𝑇𝑃 = 100 up to 𝑇0 = 105 over the course of 104 genera-
|
1007 |
+
tions. The key observation is that the actual temperature 𝑇 (bottom
|
1008 |
+
row of Figure 8) increases more slowly than 𝑇0 - meaning that life
|
1009 |
+
is regulating the temperature. The only way the TNM can regulate
|
1010 |
+
without changing the composition of the core is by altering the pop-
|
1011 |
+
ulations of the core species. In Figure 8 we can see the temperature
|
1012 |
+
increase during an equilibrium is slowed by increasing or decreasing
|
1013 |
+
the population, and thus life’s contribution to the total temperature.
|
1014 |
+
Figure 9 shows the configuration of the model agents at a partic-
|
1015 |
+
ular time in the history of the simulation where the core - the group
|
1016 |
+
of species with significant reproduction probability - is stable and
|
1017 |
+
life is adapting to the temperature change. There are two different
|
1018 |
+
cases shown in (a) and (b). In case (a), between roughly 𝑡 = 4000
|
1019 |
+
and 𝑡 = 10000, the total population is increasing, which has the
|
1020 |
+
effect of slowing the temperature increase. Figure 9 (a) shows that
|
1021 |
+
the cloud (by definition species not in the core) has a roughly equal
|
1022 |
+
number of heating and cooling species, and each of these species has
|
1023 |
+
a small population, thus the cloud (i.e. the majority of species) does
|
1024 |
+
not participate in temperature regulation. Of the 4 species making up
|
1025 |
+
the core, 2 have a cooling effect, one is heating and one is approxi-
|
1026 |
+
mately neutral. The upper left and lower right species happen to have
|
1027 |
+
𝐻𝑖 = −0.047 and 𝐻𝑖 = 0.046 respectively, as well as roughly equal
|
1028 |
+
populations, so their effects cancel out, resulting in a net cooling by
|
1029 |
+
increasing the core population.
|
1030 |
+
Note that in Figure 8 (a) during this period the temperature is
|
1031 |
+
below 𝑇𝑃 = 100. As 𝑇0 increases it will push 𝑇 towards 𝑇𝑃, the
|
1032 |
+
fitness of all species
|
1033 |
+
𝑓𝑖 =
|
1034 |
+
∑︁
|
1035 |
+
𝑗
|
1036 |
+
𝐽𝑖 𝑗𝑛 𝑗
|
1037 |
+
1 +
|
1038 |
+
�𝑇 −𝑇𝑃
|
1039 |
+
𝜏
|
1040 |
+
�2 − 𝜇𝑁
|
1041 |
+
increases and therefore the population increases, which increases the
|
1042 |
+
cooling effect to (partially) offset the abiotic temperature increase.
|
1043 |
+
Figures 8 (b) and 9 (b) shows the opposite case. The core has a net
|
1044 |
+
heating effect and the temperature is above 𝑇𝑃. Increasing 𝑇0 moves
|
1045 |
+
the temperature further from 𝑇𝑃, reducing the fitness and also the
|
1046 |
+
population, therefore reducing the heating effect of life.
|
1047 |
+
This is a regulation mechanism known as ‘rein-control’ where the
|
1048 |
+
temperature of the system can be thought of as being ‘pulled’ in
|
1049 |
+
two different directions by different reins, in this case 𝑇0 and the
|
1050 |
+
heating or cooling effect of life. As all species share the same 𝑇𝑃
|
1051 |
+
it is the overall heating or cooling impact of the TNM community
|
1052 |
+
that is important for temperature regulation. Looking at the case of
|
1053 |
+
a cooling community first, Figure 8 (a), after 𝑡 ≈ 4000 generations
|
1054 |
+
has 𝑇 < 𝑇𝑃 < 𝑇0. In this case when 𝑇 < 𝑇𝑃, as 𝑇0 increases, this
|
1055 |
+
moves 𝑇 closer to 𝑇𝑃 and boosts the growth rate and hence the size
|
1056 |
+
of the cooling core, slowing the rate of heating. Once 𝑇 ≃ 𝑇𝑃 the
|
1057 |
+
𝑇0 rein is pulling away from 𝑇𝑃, limiting further growth and so the
|
1058 |
+
system stabilises. These feedback loops for an overall cooling TNM
|
1059 |
+
community are shown in Figure 10.
|
1060 |
+
MNRAS 000, 1–18 (222)
|
1061 |
+
|
1062 |
+
120
|
1063 |
+
115
|
1064 |
+
110 -
|
1065 |
+
105 -
|
1066 |
+
← 100 -
|
1067 |
+
S6
|
1068 |
+
90 -
|
1069 |
+
85 -
|
1070 |
+
80 -
|
1071 |
+
80
|
1072 |
+
85
|
1073 |
+
90
|
1074 |
+
95
|
1075 |
+
100
|
1076 |
+
105
|
1077 |
+
110
|
1078 |
+
115
|
1079 |
+
120
|
1080 |
+
To12
|
1081 |
+
Arthur & Nicholson
|
1082 |
+
Figure 8. Showing a single model run where background temperature 𝑇0 is increasing over time. The population is shown in the top row and temperature in the
|
1083 |
+
bottom row. The two columns show the two different types of temperature regulation by the core. On the left, after 4000 generations the temperature is regulated
|
1084 |
+
by increasing the population. On the right, between 4000 and 8000 generations, temperature is regulated by decreasing the population.
|
1085 |
+
In Figure 8 (b) by 𝑡 ≈ 2000 the TNM community is overall heating
|
1086 |
+
and 𝑇 > 𝑇0 > 𝑇𝑃. In this scenario any further growth of the com-
|
1087 |
+
munity would increase 𝑇 which would decrease the growth rate. On
|
1088 |
+
the other hand a reduction in population reduces its heating effect,
|
1089 |
+
which partially offsets the increase in 𝑇0 and so the real temperature
|
1090 |
+
𝑇 increases more slowly. Even though 𝑇0 > 𝑇𝑃 the heating TNM
|
1091 |
+
community and 𝑇0 are still ‘pulling’ the temperature in opposite di-
|
1092 |
+
rections as a reduction in the population will cool the environment
|
1093 |
+
which will move 𝑇 closer to 𝑇𝑃. When 𝑇0 > 𝑇𝑃 a heating TNM
|
1094 |
+
community can never achieve a 𝑇 close to 𝑇𝑃.
|
1095 |
+
At 𝑡 ≈ 9000 we see that there is a quake and 𝑇 rapidly drops
|
1096 |
+
below 𝑇𝑃 as the TNM community switches from an overall heating
|
1097 |
+
one to overall cooling. This example demonstrates that a biosphere
|
1098 |
+
in a non-Gaian state can become ‘unstuck’ and transition to a Ga-
|
1099 |
+
ian state if life can cling on for long enough. Twice during Earth’s
|
1100 |
+
history it is thought that the planet was covered in ice from poles to
|
1101 |
+
MNRAS 000, 1–18 (222)
|
1102 |
+
|
1103 |
+
(a)
|
1104 |
+
(b)
|
1105 |
+
1750
|
1106 |
+
1400
|
1107 |
+
1500
|
1108 |
+
1200
|
1109 |
+
1250-
|
1110 |
+
1000
|
1111 |
+
1000
|
1112 |
+
800
|
1113 |
+
N
|
1114 |
+
750
|
1115 |
+
600
|
1116 |
+
500
|
1117 |
+
400
|
1118 |
+
250
|
1119 |
+
200
|
1120 |
+
0-
|
1121 |
+
.
|
1122 |
+
2000
|
1123 |
+
4000
|
1124 |
+
6000
|
1125 |
+
8000
|
1126 |
+
10000
|
1127 |
+
12000
|
1128 |
+
2000
|
1129 |
+
4000
|
1130 |
+
6000
|
1131 |
+
8000
|
1132 |
+
10000
|
1133 |
+
12000
|
1134 |
+
t (generations)
|
1135 |
+
t (generations)
|
1136 |
+
108
|
1137 |
+
108
|
1138 |
+
IP
|
1139 |
+
106
|
1140 |
+
106
|
1141 |
+
104
|
1142 |
+
104
|
1143 |
+
emperature
|
1144 |
+
Temperature
|
1145 |
+
102
|
1146 |
+
102
|
1147 |
+
100
|
1148 |
+
100
|
1149 |
+
98 -
|
1150 |
+
98 -
|
1151 |
+
96
|
1152 |
+
96
|
1153 |
+
94 -
|
1154 |
+
94 -
|
1155 |
+
0
|
1156 |
+
2000
|
1157 |
+
4000
|
1158 |
+
6000
|
1159 |
+
8000
|
1160 |
+
10000
|
1161 |
+
12000
|
1162 |
+
0
|
1163 |
+
2000
|
1164 |
+
4000
|
1165 |
+
6000
|
1166 |
+
8000
|
1167 |
+
10000
|
1168 |
+
12000
|
1169 |
+
t (generations)
|
1170 |
+
t (generations)A Gaian Habitable Zone
|
1171 |
+
13
|
1172 |
+
Figure 9. Model snapshot at 𝑡 = 7000 generations for the runs (a) and (b) from Figure 8. (a) The core has an overall cooling effect (b) the core has a heating
|
1173 |
+
effect.
|
1174 |
+
Figure 10. Feedback loops between community population, N, and environment temperature, T, for an overall cooling TNM community. A + symbol (also
|
1175 |
+
indicated with a solid arrow) indicates an increase in the source leads to an increase in the sink, e.g. an increase in population leads to an increase in temperature.
|
1176 |
+
A - symbol (also indicated with a dashed arrow) indicates an increase in the source leads to a decrease in the sink, e.g. an increase in the temperature leads to a
|
1177 |
+
decrease in the total population. A feedback loop with an overall positive sign (determined by multiplying each sign in the loop) indicates a runaway feedback
|
1178 |
+
loop, whereas a feedback look with an overall negative sign indicates a stable feedback loop. Therefore for a cooling TNM community, temperature regulation
|
1179 |
+
occurs below 𝑇𝑃.
|
1180 |
+
equator - known as a Snowball Earth Hoffman et al. (1998). These
|
1181 |
+
Snowball Earth states persisted for millions of years and although
|
1182 |
+
there is evidence that a diversity of life survived these states, a frozen
|
1183 |
+
planet would present fewer niches for life than a thawed planet would
|
1184 |
+
(indeed, on Earth the biodiversity is lowest at the poles Rutherford
|
1185 |
+
et al. (1999)). Such a state could represent a non-Gaian biosphere
|
1186 |
+
clinging on, and both Earth history and our experiments demonstrate
|
1187 |
+
that observing a planet in a non-Gaian state doesn’t mean that it will
|
1188 |
+
always remain so.
|
1189 |
+
5.1 Averages
|
1190 |
+
Again, we are interested in what happens in the long run on average.
|
1191 |
+
Figure 11 shows that for our setup where 𝑇0 > 𝑇𝑃, on average the
|
1192 |
+
temperature is regulated below 𝑇0. Only those communities that have
|
1193 |
+
strong mutually symbiotic interactions and a cooling effect are likely
|
1194 |
+
to survive. If the rate of heating is not too strong (top row) most of
|
1195 |
+
the runs survive and the population grows logarithmically over time
|
1196 |
+
while the proportion of runs at or below𝑇𝑃 falls at a much slower rate
|
1197 |
+
than the increasing background temperature. Since most of the runs
|
1198 |
+
survive we can’t have Selection by Survival, so Sequential Selection
|
1199 |
+
with Memory must be responsible for this behaviour. The 𝑁 versus
|
1200 |
+
MNRAS 000, 1–18 (222)
|
1201 |
+
|
1202 |
+
N
|
1203 |
+
N
|
1204 |
+
+
|
1205 |
+
for T < Tp
|
1206 |
+
for T > T.(a)
|
1207 |
+
(b)
|
1208 |
+
0.04
|
1209 |
+
0.02
|
1210 |
+
0.00
|
1211 |
+
-0.02
|
1212 |
+
-0.0414
|
1213 |
+
Arthur & Nicholson
|
1214 |
+
Figure 11. Top row is the scenario where we heat from 𝑇0 = 100 = 𝑇𝑝 to 𝑇𝑓 𝑖𝑛 = 105 over the course of 104 generations. The first column is the average
|
1215 |
+
population, the numbers in black are the proportion of runs which have survived, cyan italic shows the proportion of survivors which have 𝑇 ≤ 𝑇𝑃. The second
|
1216 |
+
column shows the average temperature and 𝑇0. The final column shows 𝑁 versus 𝑇 for all the model runs after 104 generations when 𝑇0 = 𝑇𝑓 𝑖𝑛. The second
|
1217 |
+
row is the same as the first but with 𝑇𝑓 𝑖𝑛 = 120.
|
1218 |
+
𝑇 plot in the top row of Figure 11 shows that we still have a split
|
1219 |
+
between model runs with a heating core and a cooling core, where
|
1220 |
+
only those with a cooling core can have large 𝑁.
|
1221 |
+
The bottom row of Figure 11 shows the case where the heating is
|
1222 |
+
much more aggressive with 𝑇𝑓 𝑖𝑛 = 120. With a constant background
|
1223 |
+
𝑇0 = 120 around 27% of model runs survive for 104 generations.
|
1224 |
+
Figure 11 shows that once the temperature goes above ∼ 110 the
|
1225 |
+
runs start to go extinct though a larger proportion, 68%, survive until
|
1226 |
+
𝑡 = 104. Surviving runs are divided into two groups: runs where a
|
1227 |
+
small population is ‘clinging on’ at high 𝑇 and runs where a large,
|
1228 |
+
cooling population can be maintained.
|
1229 |
+
We investigate this further in Figure 12, where we directly compare
|
1230 |
+
runs with an constant background temperature 𝑇0 = 110 = 𝑇𝑃 + 5𝜏
|
1231 |
+
to runs where the temperature gradually increases to 𝑇0 = 110 over
|
1232 |
+
104 generations. At 104 generations, when both systems experience
|
1233 |
+
the same 𝑇0, the runs which have been heated gradually are doing
|
1234 |
+
better i.e. more of them survive, they have higher populations and
|
1235 |
+
lower temperatures. This simple observation has a few implications.
|
1236 |
+
First it suggests that if life occurs earlier, as soon as conditions are
|
1237 |
+
optimal for it, then it can survive longer and it can have a greater
|
1238 |
+
influence on the long term habitability of its planet. Second it sug-
|
1239 |
+
gests that more realistic models aiming to map out the habitable zone
|
1240 |
+
around a star should consider if the planet has ever been hospitable
|
1241 |
+
for life. In that case planets which would have inhospitable abiotic
|
1242 |
+
parameters, like 𝑇0, at the time of observation may have been able to
|
1243 |
+
maintain habitable temperatures. This phenomena - where life is key
|
1244 |
+
in preserving habitability is known as the inhabitance paradox - that
|
1245 |
+
long term habitability of a planet isn’t possible without life main-
|
1246 |
+
taining habitability Goldblatt (2016). It also ties closely to the Gaian
|
1247 |
+
Bottleneck hypothesis Chopra & Lineweaver (2016) - life emerging
|
1248 |
+
during a window of opportunity can prevent the environment from
|
1249 |
+
degrading, even as 𝑇0 changes.
|
1250 |
+
Finally, Figure 13 studies the effect of the rate of heating by
|
1251 |
+
comparing two scenarios where 𝑇0 is increased from 𝑇𝑃 = 100
|
1252 |
+
to 𝑇𝑓 𝑖𝑛 = 110 over 104 versus 105 generations. The ‘slow’ heating
|
1253 |
+
scenario could be thought to mimic something like the gradually
|
1254 |
+
increasing solar luminosity while the fast heating scenario is akin
|
1255 |
+
to something like the rapid onset of global glaciation Overpeck &
|
1256 |
+
Cole (2006). Figure 13 shows that, in general, slower heating leads
|
1257 |
+
to more Gaian states. The population is higher and the final temper-
|
1258 |
+
ature is lower. The average population in fact stops increasing in the
|
1259 |
+
fast heating case, as abiotic conditions degrade faster than SSM can
|
1260 |
+
operate, while the slow heating case shows a continuously increas-
|
1261 |
+
ing population up to 105 generations. Thus, if SSM is to operate
|
1262 |
+
the larger the separation between abiotic and biotic timescales (e.g.
|
1263 |
+
geologic versus evolutionary) then the more likely we are to observe
|
1264 |
+
a Gaia.
|
1265 |
+
MNRAS 000, 1–18 (222)
|
1266 |
+
|
1267 |
+
t= 104
|
1268 |
+
300
|
1269 |
+
107
|
1270 |
+
To
|
1271 |
+
110.0
|
1272 |
+
106
|
1273 |
+
280
|
1274 |
+
Tp
|
1275 |
+
(T)
|
1276 |
+
107.5
|
1277 |
+
:0.47
|
1278 |
+
10.43
|
1279 |
+
105
|
1280 |
+
260
|
1281 |
+
105.0
|
1282 |
+
104 -
|
1283 |
+
10.99
|
1284 |
+
10.98
|
1285 |
+
240
|
1286 |
+
10.50
|
1287 |
+
102.5
|
1288 |
+
M
|
1289 |
+
220-
|
1290 |
+
10.99
|
1291 |
+
102
|
1292 |
+
100.0
|
1293 |
+
101
|
1294 |
+
97.5
|
1295 |
+
180
|
1296 |
+
100
|
1297 |
+
95.0
|
1298 |
+
66
|
1299 |
+
92.5
|
1300 |
+
160
|
1301 |
+
98
|
1302 |
+
102
|
1303 |
+
103
|
1304 |
+
104
|
1305 |
+
2000
|
1306 |
+
4000
|
1307 |
+
6000
|
1308 |
+
8000
|
1309 |
+
10000
|
1310 |
+
0
|
1311 |
+
250
|
1312 |
+
500
|
1313 |
+
750
|
1314 |
+
1000
|
1315 |
+
1250
|
1316 |
+
1500
|
1317 |
+
1750
|
1318 |
+
t (generations)
|
1319 |
+
N
|
1320 |
+
t (generations)
|
1321 |
+
t= 104
|
1322 |
+
120
|
1323 |
+
280 -
|
1324 |
+
120
|
1325 |
+
(T)
|
1326 |
+
10.45
|
1327 |
+
260
|
1328 |
+
115
|
1329 |
+
115
|
1330 |
+
10.99
|
1331 |
+
10.48
|
1332 |
+
240 -
|
1333 |
+
ature
|
1334 |
+
110
|
1335 |
+
10.99
|
1336 |
+
220 -
|
1337 |
+
105
|
1338 |
+
200
|
1339 |
+
:0.24
|
1340 |
+
105 -
|
1341 |
+
10.68
|
1342 |
+
100
|
1343 |
+
180 -
|
1344 |
+
160 -
|
1345 |
+
100
|
1346 |
+
95
|
1347 |
+
102
|
1348 |
+
103
|
1349 |
+
104
|
1350 |
+
2000
|
1351 |
+
4000
|
1352 |
+
6000
|
1353 |
+
8000
|
1354 |
+
10000
|
1355 |
+
0
|
1356 |
+
200
|
1357 |
+
400
|
1358 |
+
600
|
1359 |
+
800
|
1360 |
+
1000
|
1361 |
+
1200
|
1362 |
+
1400
|
1363 |
+
t (generations)
|
1364 |
+
t (generations)
|
1365 |
+
NA Gaian Habitable Zone
|
1366 |
+
15
|
1367 |
+
Figure 12. Comparing increasing 𝑇0 = 100 → 110 to constant 𝑇0 = 110. Top left is average population, the numbers show the number of surviving runs at each
|
1368 |
+
time-step. Top right shows average temperature and 𝑇0. Bottom left shows all of the runs (see Figure 4) for the increasing temperature case, and bottom right
|
1369 |
+
shows the runs for the constant temperature case.
|
1370 |
+
6 CONCLUSIONS
|
1371 |
+
Models such as the one described here help us to understand how
|
1372 |
+
planetary regulation arises from ‘selfish’ individuals. Gaia is a prime
|
1373 |
+
example of an emergent system - one where the whole has properties
|
1374 |
+
its parts do not. However Gaia was first discussed some years before
|
1375 |
+
emergence and complexity thinking were common. Lovelock and
|
1376 |
+
others discussing Gaia at the macro level, for example talking about
|
1377 |
+
her health with the notion of Geophysiology Lovelock (1989), have
|
1378 |
+
been harshly criticised. There have been two primary criticisms: the
|
1379 |
+
first argues that Gaia is simply a metaphor and not a scientific theory
|
1380 |
+
Kirchner (1989) and the second argues that episodes from earth
|
1381 |
+
history where life generates hostile conditions is strong evidence
|
1382 |
+
against Gaia Ward (2009). We believe the notion of Entropic Gaia
|
1383 |
+
Arthur & Nicholson (2022) and our discussion of selection principles
|
1384 |
+
answers both of these criticisms.
|
1385 |
+
First to address the charge that Gaia is ‘just a metaphor’ it is in-
|
1386 |
+
structive to discuss some other emergent systems. A gas is ‘just’ a
|
1387 |
+
collection of individual atoms. However emergent properties, like
|
1388 |
+
MNRAS 000, 1–18 (222)
|
1389 |
+
|
1390 |
+
300
|
1391 |
+
112
|
1392 |
+
Tinit = 100
|
1393 |
+
Tinit = 100
|
1394 |
+
Tinit = 110
|
1395 |
+
Tinit = 110
|
1396 |
+
:0.99
|
1397 |
+
110
|
1398 |
+
250 -
|
1399 |
+
10.95
|
1400 |
+
10.99
|
1401 |
+
108-
|
1402 |
+
200
|
1403 |
+
10.81
|
1404 |
+
106
|
1405 |
+
≤ 150
|
1406 |
+
10.94
|
1407 |
+
104
|
1408 |
+
100
|
1409 |
+
10.99
|
1410 |
+
102
|
1411 |
+
50
|
1412 |
+
100
|
1413 |
+
98
|
1414 |
+
102
|
1415 |
+
103
|
1416 |
+
104
|
1417 |
+
2000
|
1418 |
+
4000
|
1419 |
+
6000
|
1420 |
+
8000
|
1421 |
+
10000
|
1422 |
+
t (generations)
|
1423 |
+
t (generations)
|
1424 |
+
Tinit = 100, t = 104
|
1425 |
+
Tinit = 110, t = 104
|
1426 |
+
115.0
|
1427 |
+
112.5
|
1428 |
+
112.5
|
1429 |
+
110.0
|
1430 |
+
110.0
|
1431 |
+
107.5
|
1432 |
+
107.5
|
1433 |
+
105.0-
|
1434 |
+
105.0
|
1435 |
+
102.5
|
1436 |
+
102.5
|
1437 |
+
100.0-
|
1438 |
+
100.0
|
1439 |
+
97.5
|
1440 |
+
97.5
|
1441 |
+
95.0 -
|
1442 |
+
95.0
|
1443 |
+
92.5
|
1444 |
+
0
|
1445 |
+
250
|
1446 |
+
500
|
1447 |
+
750
|
1448 |
+
1000
|
1449 |
+
1250
|
1450 |
+
1500
|
1451 |
+
1750
|
1452 |
+
2000
|
1453 |
+
o
|
1454 |
+
200
|
1455 |
+
400
|
1456 |
+
600
|
1457 |
+
800
|
1458 |
+
1000
|
1459 |
+
1200
|
1460 |
+
N
|
1461 |
+
N16
|
1462 |
+
Arthur & Nicholson
|
1463 |
+
Figure 13. Increasing 𝑇0 = 100 → 110 over 104 generations and 105 generations. Top left is average population, the numbers show the fraction of surviving
|
1464 |
+
runs at each time-step. Top right shows average temperature and 𝑇0 (note the log scale makes the linearly increasing 𝑇0 look exponential). The numbers give the
|
1465 |
+
average temperature at the end of the experiment. Bottom left shows all of the runs at the end of the fast heating experiment, and bottom right shows the runs in
|
1466 |
+
the slow heating scenario.
|
1467 |
+
pressure and temperature, are not features of individual atoms but
|
1468 |
+
are still very much real. An organism is a ‘just’ a system of chemical
|
1469 |
+
reactions. However biology is not just applied chemistry, it is legiti-
|
1470 |
+
mate and useful to reason about cells. The economy is a phenomenon
|
1471 |
+
that emerges out of the production and consumption patterns of mil-
|
1472 |
+
lions of individuals. Depression, recession, asset bubbles and so on
|
1473 |
+
are properties of the whole system that have real explanatory power.
|
1474 |
+
Of course we can have incorrect theories about gases, cells or eco-
|
1475 |
+
nomics, but these do not make it illegitimate to reason about whole
|
1476 |
+
systems. When talking about life at planetary scale, we talk about
|
1477 |
+
something called ‘Gaia’. This is a metaphor in the same sense as
|
1478 |
+
an organism or an economy, a metaphor that admits rigorous micro
|
1479 |
+
foundations and which can be very productive for understanding a
|
1480 |
+
system or a collection of systems. In the context of bio-signature
|
1481 |
+
detection, where we may have potentially very many ‘Gaias’ and
|
1482 |
+
MNRAS 000, 1–18 (222)
|
1483 |
+
|
1484 |
+
400
|
1485 |
+
112
|
1486 |
+
tmax =10000
|
1487 |
+
tmax = 10000
|
1488 |
+
tmax=100000
|
1489 |
+
tmax = 100000
|
1490 |
+
110
|
1491 |
+
350
|
1492 |
+
10.88
|
1493 |
+
108
|
1494 |
+
300
|
1495 |
+
10.98
|
1496 |
+
105.55
|
1497 |
+
M
|
1498 |
+
10.98
|
1499 |
+
10.99
|
1500 |
+
104-
|
1501 |
+
103/79
|
1502 |
+
250 -
|
1503 |
+
10.99
|
1504 |
+
i0.95
|
1505 |
+
102
|
1506 |
+
200寸
|
1507 |
+
100
|
1508 |
+
150
|
1509 |
+
98
|
1510 |
+
102
|
1511 |
+
103
|
1512 |
+
104
|
1513 |
+
105
|
1514 |
+
102
|
1515 |
+
103
|
1516 |
+
104
|
1517 |
+
105
|
1518 |
+
t (generations)
|
1519 |
+
t (generations)
|
1520 |
+
To=110,t=104
|
1521 |
+
To=110, t= 105
|
1522 |
+
115.0
|
1523 |
+
115
|
1524 |
+
112.5
|
1525 |
+
110.0-
|
1526 |
+
110
|
1527 |
+
107.5
|
1528 |
+
105.0
|
1529 |
+
105
|
1530 |
+
102.5
|
1531 |
+
100
|
1532 |
+
100.0
|
1533 |
+
97.5
|
1534 |
+
95
|
1535 |
+
95.0
|
1536 |
+
0
|
1537 |
+
250
|
1538 |
+
500
|
1539 |
+
750
|
1540 |
+
1000
|
1541 |
+
1250
|
1542 |
+
1500
|
1543 |
+
1750
|
1544 |
+
2000
|
1545 |
+
o
|
1546 |
+
500
|
1547 |
+
1000
|
1548 |
+
1500
|
1549 |
+
2000
|
1550 |
+
N
|
1551 |
+
NA Gaian Habitable Zone
|
1552 |
+
17
|
1553 |
+
very limited information about the processes going on inside them,
|
1554 |
+
a holistic theory is crucial.
|
1555 |
+
The second class of criticisms is directly addressed by our idea that
|
1556 |
+
Gaia arises due to a selection principle operating on species networks.
|
1557 |
+
To briefly re-iterate - sequential selection posits a type of punctuated
|
1558 |
+
equilibrium Eldridge & Gould (1972), characterised by stable periods
|
1559 |
+
interrupted by catastrophes. Models of co-evolution such as the TNM
|
1560 |
+
and others (e.g. Kauffman & Weinberger (1989)) show exactly this
|
1561 |
+
kind of behaviour. Entropic Gaia is the argument that these stable
|
1562 |
+
periods get longer over time. In the TNM this is for the simple reason
|
1563 |
+
that each punctuation is not a complete reset, the next stable period
|
1564 |
+
emerges from the debris of the previous equilibrium. The species
|
1565 |
+
networks that can establish themselves must have high population
|
1566 |
+
growth rates so they saturate the carrying capacity, while also not
|
1567 |
+
self limiting. High populations mean more diversity, which means
|
1568 |
+
even more ‘debris’ during the next reset. In this view, periods of
|
1569 |
+
disregulation are not evidence against Gaia, they are an integral part
|
1570 |
+
of how she arises.
|
1571 |
+
To show the use of such a theory in this work we have, within a
|
1572 |
+
concrete and fairly general modelling framework, investigated some
|
1573 |
+
pressing questions of astro-biology through Gaia theory. In section 3
|
1574 |
+
we studied the effect of life on the ability of a planet to sustain life in
|
1575 |
+
suboptimal abiotic conditions. This leads us to propose the idea of the
|
1576 |
+
Gaian habitable zone versus the standard abiotic habitable zone.
|
1577 |
+
Our results predict that Gaia extends the habitable zone around a star
|
1578 |
+
while making the abiotic habitable zone slightly less hospitable. This
|
1579 |
+
has a straightforward and testable implication - search for life outside
|
1580 |
+
the abiotic habitable zone as a signature of Gaia.
|
1581 |
+
In section 5 we study the effect of a deteriorating abiotic envi-
|
1582 |
+
ronment to address the idea of the Gaian bottleneck. Life’s chances
|
1583 |
+
of long term survival, and the emergence of Gaia, are both more
|
1584 |
+
likely if life can ‘catch’ a window of high habitability (in this model
|
1585 |
+
where 𝑇0 = 𝑇𝑃). Life can then, on average, maintain better condi-
|
1586 |
+
tions. Again this has implications in the search for life - planets which
|
1587 |
+
were once inside but are currently outside the abiotic habitable zone
|
1588 |
+
may host life. Again Gaia expands the boundary of habitability and
|
1589 |
+
inhabitance.
|
1590 |
+
Both Selection by Survival (SBS) and Sequential Selection with
|
1591 |
+
Memory (SSM) play a role in determining the likelihood of a Gaian
|
1592 |
+
planet. Nearer the centre of the abiotic habitable zone, SSM is the
|
1593 |
+
main mechanism for generating Gaias and towards the edges SBS
|
1594 |
+
becomes more important. Finding a non-Gaian planet at the center
|
1595 |
+
of the abiotic habitable zone is not incompatible with Gaia theory.
|
1596 |
+
If life can strongly influence its environment it can degrade it. The
|
1597 |
+
results of this model suggest that if life can cling on, and abiotic
|
1598 |
+
conditions do not degrade too much, then the planet can become
|
1599 |
+
‘unstuck’ through the evolution of species which regulate the tem-
|
1600 |
+
perature. To map out the Gaian habitable zone around a particular
|
1601 |
+
star, or class of star, will require fusing detailed abiotic models with
|
1602 |
+
models of biogeochemistry. Some steps in this direction were taken
|
1603 |
+
in Nicholson et al. (2022), where the fine-details, such as lifespan
|
1604 |
+
or maintenance energy requirements of the biosphere were shown
|
1605 |
+
not to affect the general conclusion about life’s effect on potential
|
1606 |
+
bio-signatures. If this is the case generally, and this framework can
|
1607 |
+
be expanded to cover a range of biotic scenarios, then we may be able
|
1608 |
+
to produce detailed predictions of the Gaian habitable zone without
|
1609 |
+
needing to know the population-level details of any alien life. Iden-
|
1610 |
+
tifying potential metabolic pathways and limiting abiotic factors on
|
1611 |
+
microbial growth (e.g. resource limitation) would be sufficent for
|
1612 |
+
robust biosignauture predictions.
|
1613 |
+
In summary, we propose a statistical theory of planetary habitabil-
|
1614 |
+
ity, with strong and testable implications on the search for alien life.
|
1615 |
+
Our model, as well as Earth history, teaches us that a Gaian planet
|
1616 |
+
can emerge from periods of disregulation and low habitability. Ulti-
|
1617 |
+
mately, this suggests a wider range of habitable and inhabited planets
|
1618 |
+
than abiotic models would predict.
|
1619 |
+
ACKNOWLEDGEMENTS
|
1620 |
+
This work was supported by a Leverhulme Trust research project
|
1621 |
+
grant [RPG-2020-82].
|
1622 |
+
DATA AVAILABILITY
|
1623 |
+
The code is available on request from the authors.
|
1624 |
+
REFERENCES
|
1625 |
+
Abe Y., Abe-Ouchi A., Sleep N. H., Zahnle K. J., 2011, Astrobiology, 11, 443
|
1626 |
+
Amundsen D. S., et al., 2016, Astronomy & Astrophysics, 595, A36
|
1627 |
+
Arthur R., Nicholson A., 2017, Journal of theoretical biology, 430, 177
|
1628 |
+
Arthur R., Nicholson A., 2022, Journal of Theoretical Biology, 533, 110940
|
1629 |
+
Arthur R., Sibani P., 2017, Physica A: Statistical Mechanics and its Applica-
|
1630 |
+
tions, 471, 696
|
1631 |
+
Arthur R., Nicholson A., Sibani P., Christensen M., 2017, Computational and
|
1632 |
+
Mathematical Organization Theory, 23, 1
|
1633 |
+
Becker N., Sibani P., 2014, EPL (Europhysics Letters), 105, 18005
|
1634 |
+
Boucher O., et al., 2012, Environmental Research Letters, 7, 024013
|
1635 |
+
Boutle I. A., Mayne N. J., Drummond B., Manners J., Goyal J., Lambert H. F.,
|
1636 |
+
Acreman D. M., Earnshaw P. D., 2017, Astronomy & Astrophysics, 601,
|
1637 |
+
13
|
1638 |
+
Boyle R. A., Lenton T. M., Williams H. T., 2007, Geobiology, 5, 337
|
1639 |
+
Catling D. C., et al., 2018, Astrobiology, 18:6
|
1640 |
+
Chopra A., Lineweaver C. H., 2016, Astrobiology, 16, 7
|
1641 |
+
Christensen K., Di Collobiano S. A., Hall M., Jensen H. J., 2002, Journal of
|
1642 |
+
theoretical Biology, 216, 73
|
1643 |
+
Claudi. R., 2017, Proceedings of Science
|
1644 |
+
Collins M., 2021, LPI Contributions, 2549, 7001
|
1645 |
+
Covey C., Thompson S. L., Weissman P. R., MacCracken M. C., 1994, Global
|
1646 |
+
and Planetary Change, 9, 263
|
1647 |
+
Daines S. J., Mills B. J., Lenton T. M., 2017, Nature Communications, 8, 1
|
1648 |
+
Domagal-Goldman S. D., et al., 2016, Astrobiology, 16, 561
|
1649 |
+
Downing K., Zvirinsky P., 1999, Artificial life, 5, 291
|
1650 |
+
Eldridge N., Gould S., 1972, Models in Paleobiology/Ed. by TJM Schopf, pp
|
1651 |
+
82–115
|
1652 |
+
Fauchez T. J., et al., 2021, The Planetary Science Journal, 2, 106
|
1653 |
+
Ford Doolittle W., 2014, Biology & Philosophy, 29, 415
|
1654 |
+
Goldblatt C., 2016, arXiv preprint arXiv:1603.00950
|
1655 |
+
Goldblatt C., Zahnle K. J., 2011, Nature, 474, E1
|
1656 |
+
Guan Q., Haroon S., Bravo D. G., Will J. L., Gasch A. P., 2012, Genetics,
|
1657 |
+
192, 495
|
1658 |
+
Harding S. P., 1999, Tellus B, 51, 815
|
1659 |
+
Hedges S. B., 2004, SYSTEMATICS ASSOCIATION SPECIAL VOLUME,
|
1660 |
+
66, 27
|
1661 |
+
Hoffman P. F., Kaufman A. J., Halverson G. P., Schrag D. P., 1998, science,
|
1662 |
+
281, 1342
|
1663 |
+
Huggett R. J., 1999, Global Ecology and Biogeography: Ecological Surround-
|
1664 |
+
ings, 8, 425
|
1665 |
+
Kasting J. F., Whitmire D. P., Reynolds R. T., 1993, Icarus, 101, 108
|
1666 |
+
Kauffman S. A., Weinberger E. D., 1989, Journal of theoretical biology, 141,
|
1667 |
+
211
|
1668 |
+
Kharecha P., Kasting J., Siefert J., 2005, Geobiology, 3, 53
|
1669 |
+
Kiang N. Y., Domagal-Goldman S., Parenteau M. N., Catling D. C., Fujii Y.,
|
1670 |
+
Meadow V. S., Schwieterman E. W., Walker S. I., 2018, Astrobiology, 18
|
1671 |
+
Kirchner J. W., 1989, Reviews of Geophysics, 27, 223
|
1672 |
+
Kirchner J. W., 2003, Climatic Change, 58, 21
|
1673 |
+
MNRAS 000, 1–18 (222)
|
1674 |
+
|
1675 |
+
18
|
1676 |
+
Arthur & Nicholson
|
1677 |
+
Kopparapu R. K., Ramirez R. M., SchottelKotte J., Kasting J. F., Domagal-
|
1678 |
+
Goldman S., Eymet V., 2014, The Astrophysical Journal Letters, 787,
|
1679 |
+
L29
|
1680 |
+
Krissansen-Totton J., Thompson M., Galloway M. L., Fortney J. J., 2022,
|
1681 |
+
arXiv
|
1682 |
+
Lambert G., Kussell E., 2014, PLoS genetics, 10, e1004556
|
1683 |
+
Landi P., Minoarivelo H. O., Brännström Å., Hui C., Dieckmann U., 2018, in
|
1684 |
+
, Systems analysis approach for complex global challenges. Springer, pp
|
1685 |
+
209–248
|
1686 |
+
Le Bayon R.-C., Bullinger G., Schomburg A., Turberg P., Brunner P.,
|
1687 |
+
Schlaepfer R., Guenat C., 2021, Hydrogeology, chemical weathering,
|
1688 |
+
and soil formation, pp 81–103
|
1689 |
+
Lenardic A., Crowley J., Jellinek A., Weller M., 2016, Astrobiology, 16, 551
|
1690 |
+
Lenton T., Watson A., 2013, Revolutions that made the Earth. OUP Oxford
|
1691 |
+
Lenton T. M., Wilkinson D. M., 2003, Climatic Change, 58, 1
|
1692 |
+
Lenton T. M., Daines S. J., Dyke J. G., Nicholson A. E., Wilkinson D. M.,
|
1693 |
+
Williams H. T., 2018a, Trends in Ecology & Evolution, 33, 633
|
1694 |
+
Lenton T. M., Daines S. J., J.W. M. B., 2018b, Earth-Science Reviews, 178,
|
1695 |
+
1
|
1696 |
+
Ligrone R., 2019, in , Biological innovations that built the world. Springer,
|
1697 |
+
pp 129–154
|
1698 |
+
Lovelock J. E., 1965, Nature, 207, 568
|
1699 |
+
Lovelock J. E., 1989, Reviews of Geophysics, 27, 215
|
1700 |
+
Lovelock J. E., Margulis L., 1974, Tellus, 26, 2
|
1701 |
+
Luck G. W., Daily G. C., Ehrlich P. R., 2003, Trends in Ecology & Evolution,
|
1702 |
+
18, 331
|
1703 |
+
Nicholson A. E., Wilkinson D. M., Williams H. T., Lenton T. M., 2018,
|
1704 |
+
Monthly Notices of the Royal Astronomical Society, 477, 727
|
1705 |
+
Nicholson A., Daines S., Mayne N., Eager-Nash J., Lenton T., Kohary K.,
|
1706 |
+
2022, Monthly Notices of the Royal Astronomical Society
|
1707 |
+
Nisbet E. G., Sleep N. H., 2001, Nature, 409, 1083
|
1708 |
+
Overpeck J. T., Cole J. E., 2006, Annual review of Environment and resources,
|
1709 |
+
31, 1
|
1710 |
+
Peterson G., Allen C. R., Holling C. S., 1998, Ecosystems, 1, 6
|
1711 |
+
Pierrehumbert R., Gaidos E., 2011, The Astrophysical Journal Letters, 734,
|
1712 |
+
L13
|
1713 |
+
Quanz S. P., et al., 2021, Experimental Astronomy, pp 1–25
|
1714 |
+
Ramirez R. M., Kaltenegger L., 2016, The Astrophysical Journal, 823, 6
|
1715 |
+
Rutherford S., D’Hondt S., Prell W., 1999, Nature, 400, 749
|
1716 |
+
Schwieterman E. W., et al., 2018, Astrobiology, 18, 663
|
1717 |
+
Seager S., 2013, Science, 340, 577
|
1718 |
+
Smolin L., 2007, Universe or multiverse, pp 323–366
|
1719 |
+
Snellen I. A. G., et al., 2021, Experimental Astronomy
|
1720 |
+
Vincent W. F., Mueller D., Hove P. V., Howard-Williams C., 2004, in , Origins.
|
1721 |
+
Springer, pp 483–501
|
1722 |
+
Volk T., 2012, Gaia’s body: Toward a physiology of Earth. Springer Science
|
1723 |
+
& Business Media
|
1724 |
+
Ward P., 2009, in , The Medea Hypothesis. Princeton University Press
|
1725 |
+
Watson A. J., 2004, Scientists Debate Gaia, pp 201–208
|
1726 |
+
Watson A. J., Lovelock J. E., 1983, Tellus B: Chemical and Physical Meteo-
|
1727 |
+
rology, 35, 284
|
1728 |
+
Williams H. T., Lenton T. M., 2007, Oikos, 116, 1087
|
1729 |
+
Wood A. J., Ackland G. J., Dyke J. G., Williams H. T., Lenton T. M., 2008,
|
1730 |
+
Reviews of Geophysics, 46
|
1731 |
+
Worden L., 2010, Ecological Economics, 69, 762
|
1732 |
+
Zakem E. J., Polz M. F., Follows M. J., 2020, Nature communications, 11, 1
|
1733 |
+
This paper has been typeset from a TEX/LATEX file prepared by the author.
|
1734 |
+
MNRAS 000, 1–18 (222)
|
1735 |
+
|
49A0T4oBgHgl3EQfNv86/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
69E3T4oBgHgl3EQfRQm6/content/2301.04420v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c620400a762f119d0f48726bd0aca59861fd63722c3cac663550c0573ad63d7
|
3 |
+
size 468324
|
69E3T4oBgHgl3EQfRQm6/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2097f4b60d0f4daf26bcac824b5e6b43b076405a8d868d57b81fd4f9b23ee48e
|
3 |
+
size 7405613
|
69E3T4oBgHgl3EQfRQm6/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b310749d46c49138d10a54c1b1327c6191ce575f4e97049071057eebd1af1c0d
|
3 |
+
size 225716
|
69FJT4oBgHgl3EQfmCy-/content/2301.11586v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:010eaed0507243df3e52eca497fef1f831eb7be5a511f86c02658c23f2a3f26e
|
3 |
+
size 1192015
|
69FJT4oBgHgl3EQfmCy-/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f474601b6d9bb52ebb40a0da4d7fca520d683c04026ec8493c8a8b906f07356d
|
3 |
+
size 6029357
|
69FJT4oBgHgl3EQfmCy-/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8513a8a60f24187ae3769d8c46e19081f8a2af7dc57b4682ac638da25d383ac
|
3 |
+
size 220606
|
79AzT4oBgHgl3EQfE_pK/content/2301.01002v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a2387df83f0a742a8a10e4bdda6f3e57f89392fff76a7fa7a21ae30046679de
|
3 |
+
size 13681998
|
79AzT4oBgHgl3EQfE_pK/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0363ab6a35bad0924b92a4397ed4d048f0a42bc5b2db491e1f6b92a19205778f
|
3 |
+
size 3342381
|
8NE4T4oBgHgl3EQf2w06/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d5fd3ac3244c7fa44b3dce1ee4bbacda9941f7870158dfdacc4d5602a76e5eb
|
3 |
+
size 2359341
|
8dAzT4oBgHgl3EQfSPuZ/content/2301.01230v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad0173e0a79177c7578c40b30be9ec5e9e686ae16bf4bd9dd284c6c4765184b3
|
3 |
+
size 4745516
|
8dAzT4oBgHgl3EQfSPuZ/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f700cd98a0e2de3760cd6b2e67171a2a3ba28abc37c9e35e52103bc067f01d71
|
3 |
+
size 4194349
|
AdFAT4oBgHgl3EQfrh5X/content/tmp_files/2301.08653v1.pdf.txt
ADDED
@@ -0,0 +1,1303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
An Analysis of the Automatic Bug Fixing
|
2 |
+
Performance of ChatGPT
|
3 |
+
Dominik Sobania
|
4 |
+
Johannes Gutenberg University Mainz
|
5 |
+
Email: [email protected]
|
6 |
+
Carol Hanna
|
7 |
+
University College London
|
8 |
+
Email: [email protected]
|
9 |
+
Martin Briesch
|
10 |
+
Johannes Gutenberg University Mainz
|
11 |
+
Email: [email protected]
|
12 |
+
Justyna Petke
|
13 |
+
University College London
|
14 |
+
Email: [email protected]
|
15 |
+
Abstract—To support software developers in finding and fixing
|
16 |
+
software bugs, several automated program repair techniques have
|
17 |
+
been introduced. Given a test suite, standard methods usually
|
18 |
+
either synthesize a repair, or navigate a search space of software
|
19 |
+
edits to find test-suite passing variants. Recent program repair
|
20 |
+
methods are based on deep learning approaches. One of these
|
21 |
+
novel methods, which is not primarily intended for automated
|
22 |
+
program repair, but is still suitable for it, is ChatGPT. The
|
23 |
+
bug fixing performance of ChatGPT, however, is so far unclear.
|
24 |
+
Therefore, in this paper we evaluate ChatGPT on the standard
|
25 |
+
bug fixing benchmark set, QuixBugs, and compare the perfor-
|
26 |
+
mance with the results of several other approaches reported in
|
27 |
+
the literature. We find that ChatGPT’s bug fixing performance is
|
28 |
+
competitive to the common deep learning approaches CoCoNut
|
29 |
+
and Codex and notably better than the results reported for the
|
30 |
+
standard program repair approaches. In contrast to previous
|
31 |
+
approaches, ChatGPT offers a dialogue system through which
|
32 |
+
further information, e.g., the expected output for a certain input
|
33 |
+
or an observed error message, can be entered. By providing such
|
34 |
+
hints to ChatGPT, its success rate can be further increased, fixing
|
35 |
+
31 out of 40 bugs, outperforming state-of-the-art.
|
36 |
+
Index Terms—Automated program repair, automatic bug fix-
|
37 |
+
ing, ChatGPT, Codex, language models.
|
38 |
+
I. INTRODUCTION
|
39 |
+
Complex software usually contains undiscovered bugs in its
|
40 |
+
source code. The later these are found, the more far-reaching
|
41 |
+
consequences these can have. Uncorrected bugs in software
|
42 |
+
can lead to failures of essential systems, which can result in
|
43 |
+
high economic costs [1].
|
44 |
+
In order to support programmers in finding and fixing
|
45 |
+
software errors, automated program repair (APR) systems have
|
46 |
+
been introduced that automatically suggest software patches
|
47 |
+
to correct the detected errors [2], [3]. For instance, Haralds-
|
48 |
+
son et al. [4] suggest an approach based on genetic improve-
|
49 |
+
ment (GI) [5] that tracks emerging bugs during a workday and
|
50 |
+
searches for potential fixes for them overnight. The following
|
51 |
+
morning the programmers get a list of suggestions which
|
52 |
+
should help fix the detected bugs.
|
53 |
+
Standard methods for automated program repair can be
|
54 |
+
classified into two categories: the generate-and-validate ap-
|
55 |
+
proaches mutate software guided by a search strategy, while
|
56 |
+
semantics-driven (or synthesis-based) approaches use a con-
|
57 |
+
straint solver to synthesize repairs [3]. The generate-and-
|
58 |
+
validate ones have first seen industrial uptake [4]. One of
|
59 |
+
the key disadvantage of standard approaches to APR is their
|
60 |
+
running cost. The generate-and-validate ones usually rely on
|
61 |
+
test suites to verify program correctness, while synthesis-based
|
62 |
+
ones on calls to a constraint solver. Both validation strategies
|
63 |
+
are costly, making typical APR tools hours to run before a
|
64 |
+
viable patch is presented to the developer.
|
65 |
+
Most recently, program repair tools based on deep learn-
|
66 |
+
ing (DL) approaches have been introduced [6]. These learn bug
|
67 |
+
fixing patterns from existing databases and treat the automated
|
68 |
+
program repair problem as a neural machine translation task,
|
69 |
+
producing a ranking of, sometimes hundreds of, patches.
|
70 |
+
Unlike standard approaches, such generated patches are not
|
71 |
+
usually evaluated against a test suite, or other automated
|
72 |
+
verification strategy, so may not even compile. Nevertheless,
|
73 |
+
DL-based program repair has shown competitive results to
|
74 |
+
standard approaches [6].
|
75 |
+
In recent years, several large-scale language models based
|
76 |
+
on the Transformer architecture [7] have been introduced, such
|
77 |
+
as CodeBERT [8], PyMT5 [9], and Codex [10], which can also
|
78 |
+
process and extend source code and achieve comparable results
|
79 |
+
to standard approaches on various coding tasks [11]. A large-
|
80 |
+
scale language model based on the Transformer architecture
|
81 |
+
that has recently received great attention is ChatGPT.1 With
|
82 |
+
ChatGPT not only text input can be extended, but it is even
|
83 |
+
possible to have a conversation with the language model and
|
84 |
+
the previous chat history is taken into account for answer
|
85 |
+
generation. In addition to very general or subject-specific
|
86 |
+
topics, ChatGPT can also be used to discuss source code, e.g.,
|
87 |
+
to ask for a suggestion for a fix of incorrect code. However,
|
88 |
+
the quality of these suggestions is still unclear.
|
89 |
+
Therefore, in this work we evaluate and analyse the au-
|
90 |
+
tomatic bug fixing performance of ChatGPT. Moreover, we
|
91 |
+
provide a comparison with results reported in the literature
|
92 |
+
obtained using state-of-the-art APR approaches and Codex.
|
93 |
+
We chose the QuixBugs [12] benchmark set for our study, as
|
94 |
+
it contains small, yet challenging programs for current APR
|
95 |
+
1https://openai.com/blog/chatgpt/ (accessed January 18, 2023).
|
96 |
+
arXiv:2301.08653v1 [cs.SE] 20 Jan 2023
|
97 |
+
|
98 |
+
approaches. We consider all Python problems from QuixBugs,
|
99 |
+
i.e., 40 overall.
|
100 |
+
We first ask ChatGPT for bug fixes for the selected bench-
|
101 |
+
marks and manually check whether the suggested solution is
|
102 |
+
correct or not. We repeat the query four times, to account for
|
103 |
+
the heuristic nature of ChatGPT. Next, we compare its perfor-
|
104 |
+
mance with that of Codex and dedicated APR approaches. For
|
105 |
+
the standard APR approaches, we take the results from a recent
|
106 |
+
paper [13] that examines the performance of several methods
|
107 |
+
on the QuixBugs benchmark set. For dedicated APR methods
|
108 |
+
based on deep learning, we take results from CoCoNut [14].2
|
109 |
+
For the large-scale language model Codex, we take the results
|
110 |
+
from [15]. Furthermore, we study and categorize ChatGPT’s
|
111 |
+
answers to gain a deeper understanding of its behavior. Given
|
112 |
+
that ChatGPT provides a unique opportunity for a conversation
|
113 |
+
with the model, we provide a small hint to the model (e.g., a
|
114 |
+
failing test input with an error it produces) to see if it improves
|
115 |
+
ChatGPT’s fix rate.
|
116 |
+
We find that ChatGPT’s program repair performance is
|
117 |
+
competitive to the results achieved with CoCoNut and Codex
|
118 |
+
(19 vs. 19 vs. 21 instances solved, respectively). Compared
|
119 |
+
to the standard program repair approaches, ChatGPT achieves
|
120 |
+
notably better results. With ChatGPT, we could fix bugs in
|
121 |
+
19 out of 40 problems while with the standard approaches
|
122 |
+
only 7 can be fixed, even though we give ChatGPT only the
|
123 |
+
incorrect code snippet without any additional information and
|
124 |
+
without using the chat option in a conversational way. If the
|
125 |
+
chat function is actively used, we can fix even more instances.
|
126 |
+
This shows the power of providing manual hints to a program
|
127 |
+
repair system. All our experimental data is available online.3
|
128 |
+
II. CHATGPT FOR AUTOMATED PROGRAM REPAIR
|
129 |
+
In this section we present our methodology for assessing
|
130 |
+
ChatGPT’s program repair performance.
|
131 |
+
A. Benchamrk
|
132 |
+
To evaluate the automatic bug fixing performance of Chat-
|
133 |
+
GPT, we use the QuixBugs [12] benchmark set. Unlike
|
134 |
+
many other benchmark suites for automated program repair,
|
135 |
+
QuixBugs contains relatively small problems (small number of
|
136 |
+
code lines). These are thus suitable for use in a dialogue sys-
|
137 |
+
tem. For each of the 40 benchmark problems from QuixBugs,
|
138 |
+
we take the erroneous Python code, remove all contained
|
139 |
+
comments4, and ask ChatGPT if the code contains a bug and
|
140 |
+
how it can be fixed. For each benchmark problem, we make
|
141 |
+
several independent requests to ChatGPT and manually check
|
142 |
+
whether the given answer is correct or not. We standardize our
|
143 |
+
procedure by using the same format for each query. We ask:
|
144 |
+
“Does this program have a bug? How to fix it?” followed by
|
145 |
+
an empty line and the buggy code without comments. Figure 1
|
146 |
+
shows an example request to ChatGPT for the BITCOUNT
|
147 |
+
problem. Lines 1-2 contain the question to ChatGPT where
|
148 |
+
2Although more recent approaches exist, we found this work is the most
|
149 |
+
recent providing sufficient patch ranking detail.
|
150 |
+
3https://gitlab.rlp.net/dsobania/chatgpt-apr.
|
151 |
+
4This was necessary, as sometimes the comments contain the solution.
|
152 |
+
1
|
153 |
+
Does
|
154 |
+
t h i s
|
155 |
+
program
|
156 |
+
have a bug ? How to
|
157 |
+
2
|
158 |
+
f i x
|
159 |
+
i t ?
|
160 |
+
3
|
161 |
+
4
|
162 |
+
def
|
163 |
+
b i t c o u n t ( n ) :
|
164 |
+
5
|
165 |
+
count = 0
|
166 |
+
6
|
167 |
+
while n :
|
168 |
+
7
|
169 |
+
n ˆ= n − 1
|
170 |
+
8
|
171 |
+
count += 1
|
172 |
+
9
|
173 |
+
r e t u r n
|
174 |
+
count
|
175 |
+
Fig. 1: Request to ChatGPT for the BITCOUNT problem.
|
176 |
+
we ask how the bug can be fixed and starting from line 4
|
177 |
+
we present the erroneous code snippet. For this example, we
|
178 |
+
would expect from ChatGPT an answer that addresses the
|
179 |
+
bug in line 7, where n ˆ= n - 1 should be replaced with
|
180 |
+
n &= n - 1, either with a response containing the complete
|
181 |
+
code snippet with the fixed bug (correctly addressed) or by
|
182 |
+
giving an exact and correct description how to change the
|
183 |
+
affected code lines.
|
184 |
+
B. Comparison Study
|
185 |
+
We ran four independent requests to ChatGPT for each
|
186 |
+
problem from the QuixBugs dataset. In order to compare the
|
187 |
+
results of ChatGPT with the standard APR methods, we take
|
188 |
+
the results from a comprehensive study from the literature
|
189 |
+
[13] that reports the performance of ten different methods
|
190 |
+
(Arja [16], Cardumen [17], Dynamoth [18], JGenProg [19],
|
191 |
+
JKali [19], JMutRepair [19], Nopol [20], NPEfix [21], RSRe-
|
192 |
+
pair [16], and Tibra [19]) on the problems from QuixBugs. For
|
193 |
+
dedicated APR approaches based on deep learning we chose
|
194 |
+
recent results reported by Lutellier et al. [14].5 In Table I
|
195 |
+
we report a fix only if the correct patch was ranked first by
|
196 |
+
Lutellier et al.’s proposed approach, CoCoNut. For the large-
|
197 |
+
scale language model Codex, we take the results from a recent
|
198 |
+
paper [15]. We ran this experiment on ChatGPT versions from
|
199 |
+
December 15, 2022 and January 9, 2023.
|
200 |
+
C. Dialogue Study
|
201 |
+
Given that ChatGPT provides a unique opportunity of a
|
202 |
+
dialogue with the model, we also conduct a study where we
|
203 |
+
provide ChatGPT with a hint, based on ChatGPT’s response. If
|
204 |
+
ChatGPT does not provide a correct answer to the first request
|
205 |
+
(described in the previous paragraph), we tell ChatGPT in a
|
206 |
+
standardized way that the function is not working correctly and
|
207 |
+
additionally provide an input example that shows that the func-
|
208 |
+
tion is not working properly. If ChatGPT incorrectly claimed
|
209 |
+
the program was correct, we replied: “The function does not
|
210 |
+
work. E.g., for the input <input> it should return <output>.”
|
211 |
+
or “The function does not work. E.g. for the input <input>
|
212 |
+
I get the following error message: <output>”, depending
|
213 |
+
on whether the failing test case from the QuixBugs dataset
|
214 |
+
returned an incorrect answer or threw an error. In the case of
|
215 |
+
5CoCoNut, solves overall only 2 instances less than best reported thus far
|
216 |
+
on the QuixBugs Python dataset [15], though details on patch ranking for
|
217 |
+
each program were missing from the later work.
|
218 |
+
|
219 |
+
more complex inputs we made the following response: “The
|
220 |
+
function does not work. E.g., given the following call: <code
|
221 |
+
snippet> The following should be the output: <output>.”6 We
|
222 |
+
only provide one such hint and report results. This experiment
|
223 |
+
was run on the ChatGPT version from January 9, 2023.
|
224 |
+
III. RESULTS AND DISCUSSION
|
225 |
+
In this section, we present the results of the comparison
|
226 |
+
of ChatGPT, Codex, CoCoNut, and the standard APR ap-
|
227 |
+
proaches. We classify ChatGPT’s answers and report on short
|
228 |
+
discussions with the model. Furthermore, we describe what
|
229 |
+
we noticed while working with ChatGPT.
|
230 |
+
A. Automatic Bug Fixing Performance
|
231 |
+
Table I shows the achieved results of ChatGPT, Codex,
|
232 |
+
CoCoNut, and the dedicated APR approaches on the bench-
|
233 |
+
mark problems from QuixBugs. For the ChatGPT results, a
|
234 |
+
checkmark () indicates that a correct answer was given in
|
235 |
+
at least one of the four runs for a benchmark problem. A
|
236 |
+
cross () indicates that no correct answer was given in any
|
237 |
+
of the runs. In parentheses we additionally report the number
|
238 |
+
of runs that led to a successful solution. For the results from
|
239 |
+
the literature, a checkmark indicates that a correct bug fix is
|
240 |
+
reported. A cross means that no successful bug fix is reported.
|
241 |
+
We see that the results achieved by ChatGPT are similar
|
242 |
+
to Codex in performance and outperform the standard APR
|
243 |
+
approaches. Overall, we find bug fixes for 19 benchmark
|
244 |
+
problems with ChatGPT, 21 are reported for Codex, 19 for
|
245 |
+
CoCoNut, and only 7 for the standard approaches.
|
246 |
+
The large gap in performance between the language model
|
247 |
+
based approaches and the standard APR approaches can be
|
248 |
+
explained by the fact that the latter usually just use a small test
|
249 |
+
suite to define the problem, which can be easily overfitted. The
|
250 |
+
authors of [13] also report this problem. If only the test suite
|
251 |
+
is considered for evaluation, the standard approaches would
|
252 |
+
solve a total of 16 benchmark problems. However, as in real-
|
253 |
+
world applications only programs that work also on unseen
|
254 |
+
inputs are usable, we have only adopted the 7 generalizing
|
255 |
+
problems from [13] as correct.
|
256 |
+
If we take a closer look at the results for ChatGPT, we
|
257 |
+
see that benchmark problems are often only solved in one or
|
258 |
+
two runs. Only for the problems BUCKETSORT and FLATTEN
|
259 |
+
ChatGPT finds a bug fix in all four runs. So ChatGPT seems
|
260 |
+
to have a relatively high variance when fixing bugs. For an
|
261 |
+
end-user, however, this means that it can be helpful to execute
|
262 |
+
requests multiple times.
|
263 |
+
Furthermore, it is not surprising that ChatGPT solves about
|
264 |
+
the same number of problems as Codex, as ChatGPT and
|
265 |
+
Codex are from the same family of language models.7 How-
|
266 |
+
ever, we still see potential for improvement for ChatGPT, as
|
267 |
+
the given responses are often close to the correct solution
|
268 |
+
(for a detailed classification of ChatGPT’s responses see
|
269 |
+
Section III-B).
|
270 |
+
6The third case only appeared once. All queries are available online.
|
271 |
+
7https://beta.openai.com/docs/model-index-for-researchers (accessed Jan-
|
272 |
+
uary 18, 2023).
|
273 |
+
Nevertheless, we are very strict in our evaluation and
|
274 |
+
consider only patches as correct if the bug introduced by
|
275 |
+
QuixBugs is actually identified and corrected. E.g., for some
|
276 |
+
problems, ChatGPT suggests a complete re-implementation
|
277 |
+
which is then bug-free. However, these are probably no real
|
278 |
+
bug fixes, since the introduced bug is not localized. We assume
|
279 |
+
that ChatGPT simply reproduced what it has learned here.
|
280 |
+
Furthermore, we do not count a bug as fixed if additional
|
281 |
+
changes suggested by ChatGPT introduce new errors that
|
282 |
+
prevent the program from running properly. Moreover, by
|
283 |
+
sending just a single request in this evaluation, we are not
|
284 |
+
using the full potential of the dialogue system. Consequently,
|
285 |
+
we take a closer look at how ChatGPT behaves when we
|
286 |
+
interact more with the system and give it more information
|
287 |
+
about the bug in Section III-C.
|
288 |
+
B. A Classification of ChatGPT’s Answers
|
289 |
+
While working with ChatGPT, we noticed different types
|
290 |
+
of responses that ChatGPT gave to our requests, especially
|
291 |
+
when a bug could not be found. Therefore, we identified the
|
292 |
+
different types of answers from ChatGPT for the benchmark
|
293 |
+
problems from QuixBugs and analyzed their frequency. We
|
294 |
+
identified the following classes of ChatGPT answers:
|
295 |
+
• More information required: Asks for more information
|
296 |
+
on the program behavior to identify the bug.
|
297 |
+
• No bug found: Does not find a bug and states the program
|
298 |
+
is working correctly.
|
299 |
+
• Correct fix provided: Provides the correct fix for the
|
300 |
+
correct bug.
|
301 |
+
• Tries to fix something else: Does not find the intended
|
302 |
+
bug and tries to fix or advise on something else that is
|
303 |
+
not really a bug or adjusts for edge cases.
|
304 |
+
• Provides fix but introduces new bug: Provides the
|
305 |
+
correct fix for the target bug but introduces a new bug
|
306 |
+
somewhere else.
|
307 |
+
• Alternative implementation: Does not fix the bug but
|
308 |
+
gives a working alternative implementation.
|
309 |
+
Figure 2 shows the number of occurrences of identified
|
310 |
+
classes of ChatGPT answers given for the problems from
|
311 |
+
QuixBugs.
|
312 |
+
We see that for most of our requests, ChatGPT asks for more
|
313 |
+
information about the problem and the bug. With the second
|
314 |
+
most number of answers given, we observe ChatGPT claiming
|
315 |
+
that the given code snippet does not seem to have a bug. In
|
316 |
+
both cases it might be useful to fully utilize the possibilities
|
317 |
+
of the dialogue system ChatGPT offers, as further information
|
318 |
+
might lead to a correct bug fix.
|
319 |
+
Less often than the request for more information, we
|
320 |
+
observe that ChatGPT fixes the bug but at the same time
|
321 |
+
introduces new errors, or we see that ChatGPT not really
|
322 |
+
addresses the bug correctly but suggests a completely new
|
323 |
+
working re-implementation for the problem.
|
324 |
+
C. A Discussion with ChatGPT
|
325 |
+
In order to be able to compare ChatGPT with other systems
|
326 |
+
in a standardized form, we have so far studied how ChatGPT
|
327 |
+
|
328 |
+
TABLE I: Results achieved by ChatGPT, Codex, CoCoNut, and the standard APR approaches on the problems from the
|
329 |
+
QuixBugs benchmark set. For ChatGPT, we also report the number of successful runs in brackets.
|
330 |
+
Benchmark problem
|
331 |
+
ChatGPT
|
332 |
+
Codex [15]
|
333 |
+
CoCoNut [14]
|
334 |
+
Standard APR [13]
|
335 |
+
bitcount
|
336 |
+
(0 / 4)
|
337 |
+
|
338 |
+
|
339 |
+
|
340 |
+
breadth-first-search
|
341 |
+
(2 / 4)
|
342 |
+
|
343 |
+
|
344 |
+
|
345 |
+
bucketsort
|
346 |
+
(4 / 4)
|
347 |
+
|
348 |
+
|
349 |
+
|
350 |
+
depth-first-search
|
351 |
+
(0 / 4)
|
352 |
+
|
353 |
+
|
354 |
+
|
355 |
+
detect-cycle
|
356 |
+
(0 / 4)
|
357 |
+
|
358 |
+
|
359 |
+
|
360 |
+
find-first-in-sorted
|
361 |
+
(2 / 4)
|
362 |
+
|
363 |
+
|
364 |
+
|
365 |
+
find-in-sorted
|
366 |
+
(3 / 4)
|
367 |
+
|
368 |
+
|
369 |
+
|
370 |
+
flatten
|
371 |
+
(4 / 4)
|
372 |
+
|
373 |
+
|
374 |
+
|
375 |
+
gcd
|
376 |
+
(0 / 4)
|
377 |
+
|
378 |
+
|
379 |
+
|
380 |
+
get-factors
|
381 |
+
(1 / 4)
|
382 |
+
|
383 |
+
|
384 |
+
|
385 |
+
hanoi
|
386 |
+
(0 / 4)
|
387 |
+
|
388 |
+
|
389 |
+
|
390 |
+
is-valid-parenthesization
|
391 |
+
(2 / 4)
|
392 |
+
|
393 |
+
|
394 |
+
|
395 |
+
kheapsort
|
396 |
+
(0 / 4)
|
397 |
+
|
398 |
+
|
399 |
+
|
400 |
+
knapsack
|
401 |
+
(1 / 4)
|
402 |
+
|
403 |
+
|
404 |
+
|
405 |
+
kth
|
406 |
+
(0 / 4)
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
lcs-length
|
411 |
+
(0 / 4)
|
412 |
+
|
413 |
+
|
414 |
+
|
415 |
+
levenshtein
|
416 |
+
(0 / 4)
|
417 |
+
|
418 |
+
|
419 |
+
|
420 |
+
lis
|
421 |
+
(0 / 4)
|
422 |
+
|
423 |
+
|
424 |
+
|
425 |
+
longest-common-subsequence
|
426 |
+
(0 / 4)
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
max-sublist-sum
|
431 |
+
(0 / 4)
|
432 |
+
|
433 |
+
|
434 |
+
|
435 |
+
mergesort
|
436 |
+
(1 / 4)
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
minimum-spanning-tree
|
441 |
+
(0 / 4)
|
442 |
+
|
443 |
+
|
444 |
+
|
445 |
+
next-palindrome
|
446 |
+
(1 / 4)
|
447 |
+
|
448 |
+
|
449 |
+
|
450 |
+
next-permutation
|
451 |
+
(0 / 4)
|
452 |
+
|
453 |
+
|
454 |
+
|
455 |
+
pascal
|
456 |
+
(1 / 4)
|
457 |
+
|
458 |
+
|
459 |
+
|
460 |
+
possible-change
|
461 |
+
(1 / 4)
|
462 |
+
|
463 |
+
|
464 |
+
|
465 |
+
powerset
|
466 |
+
(0 / 4)
|
467 |
+
|
468 |
+
|
469 |
+
|
470 |
+
quicksort
|
471 |
+
(1 / 4)
|
472 |
+
|
473 |
+
|
474 |
+
|
475 |
+
reverse-linked-list
|
476 |
+
(2 / 4)
|
477 |
+
|
478 |
+
|
479 |
+
|
480 |
+
rpn-eval
|
481 |
+
(0 / 4)
|
482 |
+
|
483 |
+
|
484 |
+
|
485 |
+
shortest-path-length
|
486 |
+
(1 / 4)
|
487 |
+
|
488 |
+
|
489 |
+
|
490 |
+
shortest-path-lengths
|
491 |
+
(0 / 4)
|
492 |
+
|
493 |
+
|
494 |
+
|
495 |
+
shortest-paths
|
496 |
+
(1 / 4)
|
497 |
+
|
498 |
+
|
499 |
+
|
500 |
+
shunting-yard
|
501 |
+
(2 / 4)
|
502 |
+
|
503 |
+
|
504 |
+
|
505 |
+
sieve
|
506 |
+
(0 / 4)
|
507 |
+
|
508 |
+
|
509 |
+
|
510 |
+
sqrt
|
511 |
+
(1 / 4)
|
512 |
+
|
513 |
+
|
514 |
+
|
515 |
+
subsequences
|
516 |
+
(1 / 4)
|
517 |
+
|
518 |
+
|
519 |
+
|
520 |
+
to-base
|
521 |
+
(0 / 4)
|
522 |
+
|
523 |
+
|
524 |
+
|
525 |
+
topological-ordering
|
526 |
+
(0 / 4)
|
527 |
+
|
528 |
+
|
529 |
+
|
530 |
+
wrap
|
531 |
+
(0 / 4)
|
532 |
+
|
533 |
+
|
534 |
+
|
535 |
+
ΣΣΣ (Solved)
|
536 |
+
19
|
537 |
+
21
|
538 |
+
19
|
539 |
+
7
|
540 |
+
|
541 |
+
0
|
542 |
+
10
|
543 |
+
20
|
544 |
+
30
|
545 |
+
40
|
546 |
+
50
|
547 |
+
# of occurrences
|
548 |
+
other
|
549 |
+
alternative implementation
|
550 |
+
provides fix but introduces new bug
|
551 |
+
tries to fix something else
|
552 |
+
correct fix provided
|
553 |
+
no bug found
|
554 |
+
more information required
|
555 |
+
Fig. 2: Number of occurrences of identified classes of ChatGPT answers given for the problems from QuixBugs.
|
556 |
+
1
|
557 |
+
Request
|
558 |
+
( 1 ) :
|
559 |
+
2
|
560 |
+
Does
|
561 |
+
t h i s
|
562 |
+
program
|
563 |
+
have a bug ? How to
|
564 |
+
f i x
|
565 |
+
i t ?
|
566 |
+
3
|
567 |
+
4
|
568 |
+
def
|
569 |
+
gcd ( a ,
|
570 |
+
b ) :
|
571 |
+
5
|
572 |
+
i f
|
573 |
+
b == 0:
|
574 |
+
6
|
575 |
+
r e t u r n
|
576 |
+
a
|
577 |
+
7
|
578 |
+
e l s e :
|
579 |
+
8
|
580 |
+
r e t u r n
|
581 |
+
gcd ( a % b ,
|
582 |
+
b )
|
583 |
+
9
|
584 |
+
10
|
585 |
+
Response
|
586 |
+
( 1 ) :
|
587 |
+
11
|
588 |
+
This
|
589 |
+
i s
|
590 |
+
the
|
591 |
+
Euclidean
|
592 |
+
a l g o r i t h m
|
593 |
+
f o r
|
594 |
+
f i n d i n g
|
595 |
+
the
|
596 |
+
g r e a t e s t
|
597 |
+
common d i v i s o r
|
598 |
+
(GCD)
|
599 |
+
of two
|
600 |
+
12
|
601 |
+
i n t e g e r s ,
|
602 |
+
a and b .
|
603 |
+
I t
|
604 |
+
appears
|
605 |
+
to
|
606 |
+
be
|
607 |
+
implemented
|
608 |
+
c o r r e c t l y .
|
609 |
+
I f
|
610 |
+
you
|
611 |
+
are
|
612 |
+
e x p e r i e n c i n g
|
613 |
+
13
|
614 |
+
i s s u e s
|
615 |
+
with
|
616 |
+
t h i s
|
617 |
+
program ,
|
618 |
+
i t
|
619 |
+
i s
|
620 |
+
l i k e l y
|
621 |
+
t h a t
|
622 |
+
the
|
623 |
+
problem
|
624 |
+
l i e s
|
625 |
+
elsewhere
|
626 |
+
in
|
627 |
+
your
|
628 |
+
code .
|
629 |
+
14
|
630 |
+
Without more
|
631 |
+
i n f o r m a t i o n
|
632 |
+
about
|
633 |
+
the
|
634 |
+
bug you
|
635 |
+
are
|
636 |
+
experiencing ,
|
637 |
+
i t
|
638 |
+
i s
|
639 |
+
d i f f i c u l t
|
640 |
+
to
|
641 |
+
say
|
642 |
+
15
|
643 |
+
how to
|
644 |
+
f i x
|
645 |
+
i t .
|
646 |
+
16
|
647 |
+
17
|
648 |
+
Request
|
649 |
+
( 2 ) :
|
650 |
+
18
|
651 |
+
No ,
|
652 |
+
the
|
653 |
+
f u n c t i o n
|
654 |
+
i s
|
655 |
+
not
|
656 |
+
c o r r e c t . E . g . ,
|
657 |
+
f o r
|
658 |
+
the
|
659 |
+
i n p u t
|
660 |
+
a = 4 and b = 5 I
|
661 |
+
get
|
662 |
+
the
|
663 |
+
f o l l o w i n g
|
664 |
+
19
|
665 |
+
e r r o r
|
666 |
+
message :
|
667 |
+
” RecursionError : maximum r e c u r s i o n
|
668 |
+
depth
|
669 |
+
exceeded
|
670 |
+
in
|
671 |
+
comparison ” .
|
672 |
+
20
|
673 |
+
21
|
674 |
+
Response
|
675 |
+
( 2 ) :
|
676 |
+
22
|
677 |
+
The
|
678 |
+
i s s u e
|
679 |
+
i s
|
680 |
+
t h a t
|
681 |
+
the
|
682 |
+
f u n c t i o n
|
683 |
+
c a l l s
|
684 |
+
i t s e l f
|
685 |
+
r e c u r s i v e l y ,
|
686 |
+
and
|
687 |
+
in some cases ,
|
688 |
+
i t
|
689 |
+
never
|
690 |
+
23
|
691 |
+
r e a c h e s
|
692 |
+
the
|
693 |
+
base
|
694 |
+
case
|
695 |
+
( b == 0) ,
|
696 |
+
causing
|
697 |
+
i t
|
698 |
+
to
|
699 |
+
r e c u r s e
|
700 |
+
i n d e f i n i t e l y .
|
701 |
+
The base
|
702 |
+
case
|
703 |
+
of
|
704 |
+
24
|
705 |
+
the
|
706 |
+
gcd
|
707 |
+
a l g o r i t h m
|
708 |
+
i s
|
709 |
+
when the
|
710 |
+
remainder
|
711 |
+
i s
|
712 |
+
zero ,
|
713 |
+
but
|
714 |
+
in
|
715 |
+
t h i s
|
716 |
+
implementation ,
|
717 |
+
i t ’ s when
|
718 |
+
25
|
719 |
+
b
|
720 |
+
i s
|
721 |
+
zero .
|
722 |
+
For example ,
|
723 |
+
when a=4 and b=5 , gcd (4 ,
|
724 |
+
5)
|
725 |
+
i s
|
726 |
+
c a l l e d ,
|
727 |
+
which
|
728 |
+
then
|
729 |
+
c a l l s
|
730 |
+
26
|
731 |
+
gcd (4 % 5 ,
|
732 |
+
5)
|
733 |
+
which
|
734 |
+
i s
|
735 |
+
gcd (4 ,
|
736 |
+
5 ) .
|
737 |
+
This
|
738 |
+
causes
|
739 |
+
an
|
740 |
+
i n f i n i t e
|
741 |
+
loop .
|
742 |
+
27
|
743 |
+
28
|
744 |
+
You can
|
745 |
+
f i x
|
746 |
+
t h i s
|
747 |
+
by swapping
|
748 |
+
the
|
749 |
+
parameters
|
750 |
+
in
|
751 |
+
the
|
752 |
+
r e c u r s i v e
|
753 |
+
c a l l .
|
754 |
+
29
|
755 |
+
30
|
756 |
+
def
|
757 |
+
gcd ( a ,
|
758 |
+
b ) :
|
759 |
+
31
|
760 |
+
i f
|
761 |
+
a == 0:
|
762 |
+
32
|
763 |
+
r e t u r n
|
764 |
+
b
|
765 |
+
33
|
766 |
+
e l s e :
|
767 |
+
34
|
768 |
+
r e t u r n
|
769 |
+
gcd ( b % a ,
|
770 |
+
a )
|
771 |
+
35
|
772 |
+
36
|
773 |
+
This way ,
|
774 |
+
i f
|
775 |
+
a
|
776 |
+
i s
|
777 |
+
zero ,
|
778 |
+
the
|
779 |
+
f u n c t i o n
|
780 |
+
r e t u r n s
|
781 |
+
b ( which
|
782 |
+
i s
|
783 |
+
the
|
784 |
+
gcd
|
785 |
+
of
|
786 |
+
a and b ) ,
|
787 |
+
o t h e r w i s e
|
788 |
+
37
|
789 |
+
i t
|
790 |
+
c a l l s
|
791 |
+
i t s e l f
|
792 |
+
with
|
793 |
+
the new values
|
794 |
+
of b % a and a .
|
795 |
+
Fig. 3: A discussion with ChatGPT about the GCD problem from QuixBugs. We inserted request and response markers in bold
|
796 |
+
font to make the chat history more readable.
|
797 |
+
behaves when making exactly one request (see Section III-A).
|
798 |
+
However, a potentially powerful advantage of ChatGPT is
|
799 |
+
that we can interact with the system in a dialogue to specify
|
800 |
+
a request in more detail. This is useful, as both, a textual
|
801 |
+
description as well as a definition given by a test suite, are
|
802 |
+
possibly ambiguous and could be misleading.
|
803 |
+
|
804 |
+
1
|
805 |
+
Request
|
806 |
+
( 1 ) :
|
807 |
+
2
|
808 |
+
Does
|
809 |
+
t h i s
|
810 |
+
program
|
811 |
+
have a bug ? How to
|
812 |
+
f i x
|
813 |
+
i t ?
|
814 |
+
3
|
815 |
+
4
|
816 |
+
def
|
817 |
+
b i t c o u n t ( n ) :
|
818 |
+
5
|
819 |
+
count = 0
|
820 |
+
6
|
821 |
+
while n :
|
822 |
+
7
|
823 |
+
n ˆ= n − 1
|
824 |
+
8
|
825 |
+
count += 1
|
826 |
+
9
|
827 |
+
r e t u r n
|
828 |
+
count
|
829 |
+
10
|
830 |
+
11
|
831 |
+
Response
|
832 |
+
( 1 ) :
|
833 |
+
12
|
834 |
+
I ’m unable
|
835 |
+
to
|
836 |
+
t e l l
|
837 |
+
i f
|
838 |
+
the
|
839 |
+
program
|
840 |
+
has a bug
|
841 |
+
without
|
842 |
+
more
|
843 |
+
i n f o r m a t i o n
|
844 |
+
on
|
845 |
+
the
|
846 |
+
expected
|
847 |
+
13
|
848 |
+
behavior
|
849 |
+
and
|
850 |
+
the
|
851 |
+
i n p u t
|
852 |
+
t h a t
|
853 |
+
i s
|
854 |
+
causing
|
855 |
+
the
|
856 |
+
i s s u e .
|
857 |
+
[ . . . ]
|
858 |
+
14
|
859 |
+
15
|
860 |
+
Request
|
861 |
+
( 2 ) :
|
862 |
+
16
|
863 |
+
The program
|
864 |
+
should
|
865 |
+
count
|
866 |
+
the
|
867 |
+
number
|
868 |
+
of 1− b i t s
|
869 |
+
in
|
870 |
+
the
|
871 |
+
bin ary
|
872 |
+
encoding
|
873 |
+
of n .
|
874 |
+
17
|
875 |
+
18
|
876 |
+
Response
|
877 |
+
( 2 ) :
|
878 |
+
19
|
879 |
+
The
|
880 |
+
f u n c t i o n
|
881 |
+
b i t c o u n t ( n )
|
882 |
+
appears
|
883 |
+
to
|
884 |
+
be
|
885 |
+
counting
|
886 |
+
the
|
887 |
+
number
|
888 |
+
of 1− b i t s
|
889 |
+
[ . . . ]
|
890 |
+
20
|
891 |
+
21
|
892 |
+
To confirm
|
893 |
+
t h i s ,
|
894 |
+
you can
|
895 |
+
t e s t
|
896 |
+
the
|
897 |
+
f u n c t i o n
|
898 |
+
with some
|
899 |
+
t e s t
|
900 |
+
cases
|
901 |
+
to
|
902 |
+
check
|
903 |
+
i f
|
904 |
+
22
|
905 |
+
i t
|
906 |
+
produces
|
907 |
+
the
|
908 |
+
expected
|
909 |
+
output :
|
910 |
+
23
|
911 |
+
24
|
912 |
+
t e s t
|
913 |
+
c a s e s
|
914 |
+
= [0 ,
|
915 |
+
1 ,
|
916 |
+
2 ,
|
917 |
+
3 ,
|
918 |
+
4 ,
|
919 |
+
5 ,
|
920 |
+
6 ,
|
921 |
+
7 ,
|
922 |
+
8 ,
|
923 |
+
9 ,
|
924 |
+
10 ,
|
925 |
+
11 ,
|
926 |
+
12 ,
|
927 |
+
13 ,
|
928 |
+
14 ,
|
929 |
+
15]
|
930 |
+
25
|
931 |
+
exp ect ed output = [0 ,
|
932 |
+
1 ,
|
933 |
+
1 ,
|
934 |
+
2 ,
|
935 |
+
1 ,
|
936 |
+
2 ,
|
937 |
+
2 ,
|
938 |
+
3 ,
|
939 |
+
1 ,
|
940 |
+
2 ,
|
941 |
+
2 ,
|
942 |
+
3 ,
|
943 |
+
2 ,
|
944 |
+
3 ,
|
945 |
+
3 ,
|
946 |
+
4]
|
947 |
+
26
|
948 |
+
27
|
949 |
+
Request
|
950 |
+
( 3 ) :
|
951 |
+
28
|
952 |
+
The
|
953 |
+
f u n c t i o n
|
954 |
+
does
|
955 |
+
not work .
|
956 |
+
For
|
957 |
+
i n p u t
|
958 |
+
1
|
959 |
+
i t
|
960 |
+
seems
|
961 |
+
to
|
962 |
+
run
|
963 |
+
i n t o
|
964 |
+
an
|
965 |
+
i n f i n i t e
|
966 |
+
loop .
|
967 |
+
29
|
968 |
+
30
|
969 |
+
Response
|
970 |
+
( 3 ) :
|
971 |
+
31
|
972 |
+
You are
|
973 |
+
c o r r e c t
|
974 |
+
t h a t
|
975 |
+
the
|
976 |
+
f u n c t i o n
|
977 |
+
w i l l
|
978 |
+
run
|
979 |
+
i n t o
|
980 |
+
an
|
981 |
+
i n f i n i t e
|
982 |
+
loop when the
|
983 |
+
i n p u t
|
984 |
+
i s
|
985 |
+
1 .
|
986 |
+
32
|
987 |
+
[ . . . ]
|
988 |
+
33
|
989 |
+
34
|
990 |
+
def
|
991 |
+
b i t c o u n t ( n ) :
|
992 |
+
35
|
993 |
+
i f
|
994 |
+
n == 0:
|
995 |
+
36
|
996 |
+
r e t u r n
|
997 |
+
0
|
998 |
+
37
|
999 |
+
count = 0
|
1000 |
+
38
|
1001 |
+
while n :
|
1002 |
+
39
|
1003 |
+
n ˆ= n − 1
|
1004 |
+
40
|
1005 |
+
count += 1
|
1006 |
+
41
|
1007 |
+
r e t u r n
|
1008 |
+
count
|
1009 |
+
42
|
1010 |
+
43
|
1011 |
+
def
|
1012 |
+
b i t c o u n t ( n ) :
|
1013 |
+
44
|
1014 |
+
count = 0
|
1015 |
+
45
|
1016 |
+
while n > 0:
|
1017 |
+
46
|
1018 |
+
n ˆ= n − 1
|
1019 |
+
47
|
1020 |
+
count += 1
|
1021 |
+
48
|
1022 |
+
r e t u r n
|
1023 |
+
count
|
1024 |
+
49
|
1025 |
+
50
|
1026 |
+
def
|
1027 |
+
b i t c o u n t ( n ) :
|
1028 |
+
51
|
1029 |
+
r e t u r n
|
1030 |
+
bin ( n ) . count ( ’1 ’)
|
1031 |
+
52
|
1032 |
+
53
|
1033 |
+
def
|
1034 |
+
b i t c o u n t ( n ) :
|
1035 |
+
54
|
1036 |
+
count = 0
|
1037 |
+
55
|
1038 |
+
while n :
|
1039 |
+
56
|
1040 |
+
i f
|
1041 |
+
n & 1:
|
1042 |
+
57
|
1043 |
+
count += 1
|
1044 |
+
58
|
1045 |
+
n >>= 1
|
1046 |
+
59
|
1047 |
+
r e t u r n
|
1048 |
+
count
|
1049 |
+
Fig. 4: A discussion with ChatGPT about the BITCOUNT problem from QuixBugs. We inserted request and response markers
|
1050 |
+
in bold font to make the chat history more readable. In addition, we shortened ChatGPT’s responses in some places to keep
|
1051 |
+
the example as small as possible. When we left out large text passages, we marked this in the text.
|
1052 |
+
|
1053 |
+
Consequently, we investigate for two benchmark problems
|
1054 |
+
how ChatGPT behaves in a conversation and if it is possible
|
1055 |
+
to find a working solution by discussing the problem with the
|
1056 |
+
system. We choose GCD and BITCOUNT as benchmark prob-
|
1057 |
+
lems because in our previous experiments the contained bug
|
1058 |
+
could not be fixed correctly for both problems. Furthermore,
|
1059 |
+
the problems consist of a relatively small number of code lines
|
1060 |
+
which allows us to discuss these problems in detail.
|
1061 |
+
Figure 3 shows an example discussion with ChatGPT about
|
1062 |
+
the GCD problem (lines 1–8). In the first response (lines
|
1063 |
+
10–15), ChatGPT does not present any solution. It asks for
|
1064 |
+
more information about the bug (we observed this behavior
|
1065 |
+
for many other problems, see Section III-B). Since the given
|
1066 |
+
function causes recursion issues for many possible inputs,
|
1067 |
+
we give ChatGPT an exact input example and the resulting
|
1068 |
+
error message from Python (lines 17–19). By mentioning the
|
1069 |
+
recursion issue, the final response goes in the right direction
|
1070 |
+
and we get a correctly working patched version (lines 30–34).
|
1071 |
+
In Figure 4 we see an example discussion with ChatGPT
|
1072 |
+
about the BITCOUNT problem (lines 1–9). Again, ChatGPT
|
1073 |
+
asks for more information about the problem and for an input
|
1074 |
+
that causes an error (lines 11–13). As follow-up request, we
|
1075 |
+
give ChatGPT a description of what the function should do
|
1076 |
+
(based on a code comment from QuixBugs) and ignore the
|
1077 |
+
request for an example input to see how ChatGPT reacts (lines
|
1078 |
+
15 and 16). We can see in the following answer (lines 18–25)
|
1079 |
+
that there is clearly a relation between ChatGPT’s first and
|
1080 |
+
second answer because now we get an explanation of how
|
1081 |
+
we can test the function with some test inputs. We respond
|
1082 |
+
with a problem description for a test input and describe that
|
1083 |
+
there is probably an issue with an infinite loop (lines 27 and
|
1084 |
+
28). ChatGPT responds with four code snippets where the first
|
1085 |
+
two (lines 34–48) do not solve the problem with the infinite
|
1086 |
+
loop and the last two (lines 50–59) are complete but working
|
1087 |
+
re-implementations which, however, not directly address the
|
1088 |
+
contained bug. It seems that ChatGPT simply returns functions
|
1089 |
+
here that somehow fit the content of the problem discussion,
|
1090 |
+
even though the test cases mentioned by ChatGPT show
|
1091 |
+
that the first two functions cannot work correctly. Also the
|
1092 |
+
bug is not simply fixed by replacing n ˆ= n - 1 with
|
1093 |
+
n &= n - 1 in the given function, but ChatGPT, as al-
|
1094 |
+
ready mentioned, returns two complete re-implementations.
|
1095 |
+
However, both observations are not particularly surprising for
|
1096 |
+
a language model based approach. Nevertheless, the given
|
1097 |
+
answers would be useful for a programmer as they help to
|
1098 |
+
solve the problem.
|
1099 |
+
D. Systematic Follow-up Requests for ChatGPT
|
1100 |
+
Next, we conducted a study where we systematically discuss
|
1101 |
+
with ChatGPT. For those programs for which the contained
|
1102 |
+
bug was not correctly addressed by ChatGPT (see Table I),
|
1103 |
+
we provide ChatGPT with a follow-up request giving a hint,
|
1104 |
+
as specified in Section II-C. We report our results in Table II.
|
1105 |
+
We use the same notation as before with the addition that a
|
1106 |
+
checkmark with an asterisk (*) defines that a solution was
|
1107 |
+
found without a follow-up request being necessary in this run.
|
1108 |
+
TABLE II: Results achieved by ChatGPT with additional
|
1109 |
+
information given in a follow-up request for the unsolved
|
1110 |
+
benchmark problems (see Table I).
|
1111 |
+
Benchmark problem
|
1112 |
+
ChatGPT
|
1113 |
+
bitcount
|
1114 |
+
|
1115 |
+
depth-first-search
|
1116 |
+
*
|
1117 |
+
detect-cycle
|
1118 |
+
*
|
1119 |
+
gcd
|
1120 |
+
|
1121 |
+
hanoi
|
1122 |
+
|
1123 |
+
kheapsort
|
1124 |
+
|
1125 |
+
kth
|
1126 |
+
|
1127 |
+
lcs-length
|
1128 |
+
|
1129 |
+
levenshtein
|
1130 |
+
|
1131 |
+
lis
|
1132 |
+
|
1133 |
+
longest-common-subsequence
|
1134 |
+
|
1135 |
+
max-sublist-sum
|
1136 |
+
|
1137 |
+
minimum-spanning-tree
|
1138 |
+
|
1139 |
+
next-permutation
|
1140 |
+
|
1141 |
+
powerset
|
1142 |
+
|
1143 |
+
rpn-eval
|
1144 |
+
|
1145 |
+
shortest-path-lengths
|
1146 |
+
|
1147 |
+
sieve
|
1148 |
+
*
|
1149 |
+
to-base
|
1150 |
+
|
1151 |
+
topological-ordering
|
1152 |
+
|
1153 |
+
wrap
|
1154 |
+
|
1155 |
+
ΣΣΣ (Solved)
|
1156 |
+
9 (12)
|
1157 |
+
For 9 benchmark problems we see that a more detailed
|
1158 |
+
description of the bug is helpful for ChatGPT. For 3 bench-
|
1159 |
+
mark problems no follow-up request was necessary in this run,
|
1160 |
+
since the bug was correctly addressed in the response given
|
1161 |
+
on our first request. Overall, adding a hint to ChatGPT vastly
|
1162 |
+
improves its performance, with 31 out of 40 problems solved.
|
1163 |
+
ChatGPT thus offers an exciting new way of approaching
|
1164 |
+
automated program repair.
|
1165 |
+
IV. THREATS TO VALIDITY
|
1166 |
+
It is worth noting that ChatGPT is currently under active
|
1167 |
+
development. During our study there was a major update to
|
1168 |
+
it, which might have influenced our results. Although we
|
1169 |
+
observed repairability rates before and after the update to be
|
1170 |
+
similar. However, future releases might yield different results.
|
1171 |
+
Furthermore, ChatGPT allows for conversation with its users.
|
1172 |
+
Asking a different question than the ones presented in this
|
1173 |
+
study could potentially have a different impact on results.
|
1174 |
+
To mitigate this threat to validity, we conducted a pre-study,
|
1175 |
+
varying the questions asked. We noted no significant influence
|
1176 |
+
on the results. Moreover, the results might vary depending
|
1177 |
+
on the programming language, size of the benchmarks, and
|
1178 |
+
|
1179 |
+
the number of queries issued. To mitigate these threats, we
|
1180 |
+
chose a standard benchmark set and targeted Python – the
|
1181 |
+
most popular programming language.8 The classification of
|
1182 |
+
the results was done manually and therefore represents the
|
1183 |
+
subjective assessment of the authors. To enable a verification
|
1184 |
+
of our results, we made our conversations with ChatGPT
|
1185 |
+
available online.
|
1186 |
+
V. CONCLUSIONS AND FUTURE WORK
|
1187 |
+
To support programmers in finding and fixing software
|
1188 |
+
bugs, several automated program repair (APR) methods have
|
1189 |
+
been proposed. ChatGPT, a recently presented deep learning
|
1190 |
+
(DL) based dialogue system, can also make suggestions for
|
1191 |
+
improving erroneous source code. However, so far the quality
|
1192 |
+
of these suggestions has been unclear. Therefore, we compared
|
1193 |
+
in this work the automatic bug fixing performance of ChatGPT
|
1194 |
+
with that of Codex and several dedicated APR approaches.
|
1195 |
+
We find that ChatGPT has similar performance to Codex
|
1196 |
+
and dedicated DL-based APR on a standard benchmark set. It
|
1197 |
+
vastly outperforms standard APR methods (19 vs. 7 out of 40
|
1198 |
+
bugs fixed). Using ChatGPT’s dialogue option and giving the
|
1199 |
+
system more information about the bug in a follow-up request
|
1200 |
+
boosts the performance even further, giving an overall success
|
1201 |
+
rate of 77.5%. This shows that human input can be of much
|
1202 |
+
help to an automated APR system, with ChatGPT providing
|
1203 |
+
means to do so.
|
1204 |
+
Despite its great performance, the question arises whether
|
1205 |
+
the mental cost required to verify ChatGPT answers outweighs
|
1206 |
+
the advantages that ChatGPT brings. Perhaps incorporation
|
1207 |
+
of automated approaches to provide ChatGPT with hints as
|
1208 |
+
well as automated verification of its responses, e.g., through
|
1209 |
+
automated testing, would yield ChatGPT to be a viable tool
|
1210 |
+
that would help software developers in their daily tasks.
|
1211 |
+
We hope our results and observations will be helpful for
|
1212 |
+
future work with ChatGPT.
|
1213 |
+
ACKNOWLEDGMENTS
|
1214 |
+
This work was partially supported by UKRI EPSRC grant
|
1215 |
+
no. EP/P023991/1.
|
1216 |
+
REFERENCES
|
1217 |
+
[1] W. E. Wong, X. Li, and P. A. Laplante, “Be more familiar with our
|
1218 |
+
enemies and pave the way forward: A review of the roles bugs played
|
1219 |
+
in software failures,” Journal of Systems and Software, vol. 133, pp.
|
1220 |
+
68–94, 2017.
|
1221 |
+
[2] C. Le Goues, T. Nguyen, S. Forrest, and W. Weimer, “GenProg: A
|
1222 |
+
generic method for automatic software repair,” Ieee transactions on
|
1223 |
+
software engineering, vol. 38, no. 1, pp. 54–72, 2011.
|
1224 |
+
[3] L. Gazzola, D. Micucci, and L. Mariani, “Automatic software repair:
|
1225 |
+
a survey,” in Proceedings of the 40th International Conference
|
1226 |
+
on Software Engineering, ICSE 2018, Gothenburg, Sweden, May
|
1227 |
+
27 - June 03, 2018, M. Chaudron, I. Crnkovic, M. Chechik,
|
1228 |
+
and M. Harman, Eds.
|
1229 |
+
ACM, 2018, p. 1219. [Online]. Available:
|
1230 |
+
https://doi.org/10.1145/3180155.3182526
|
1231 |
+
[4] S. O. Haraldsson, J. R. Woodward, A. E. Brownlee, and K. Siggeirs-
|
1232 |
+
dottir, “Fixing bugs in your sleep: How genetic improvement became
|
1233 |
+
an overnight success,” in Proceedings of the Genetic and Evolutionary
|
1234 |
+
Computation Conference Companion, 2017, pp. 1513–1520.
|
1235 |
+
8https://www.tiobe.com/tiobe-index/ (accessed January 18, 2023).
|
1236 |
+
[5] J. Petke, S. O. Haraldsson, M. Harman, W. B. Langdon, D. R. White, and
|
1237 |
+
J. R. Woodward, “Genetic improvement of software: a comprehensive
|
1238 |
+
survey,” IEEE Transactions on Evolutionary Computation, vol. 22, no. 3,
|
1239 |
+
pp. 415–432, 2017.
|
1240 |
+
[6] Q. Zhang, C. Fang, Y. Ma, W. Sun, and Z. Chen, “A survey of
|
1241 |
+
learning-based automated program repair,” 2023. [Online]. Available:
|
1242 |
+
https://arxiv.org/abs/2301.03270
|
1243 |
+
[7] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez,
|
1244 |
+
Ł. Kaiser, and I. Polosukhin, “Attention is all you need,” in Advances
|
1245 |
+
in neural information processing systems, 2017, pp. 5998–6008.
|
1246 |
+
[8] Z. Feng, D. Guo, D. Tang, N. Duan, X. Feng, M. Gong, L. Shou,
|
1247 |
+
B. Qin, T. Liu, D. Jiang et al., “CodeBERT: A pre-trained model
|
1248 |
+
for programming and natural languages,” in Proceedings of the 2020
|
1249 |
+
Conference on Empirical Methods in Natural Language Processing:
|
1250 |
+
Findings, 2020, pp. 1536–1547.
|
1251 |
+
[9] C. Clement, D. Drain, J. Timcheck, A. Svyatkovskiy, and N. Sundaresan,
|
1252 |
+
“PyMT5: Multi-mode translation of natural language and Python code
|
1253 |
+
with transformers,” in Proceedings of the 2020 Conference on Empirical
|
1254 |
+
Methods in Natural Language Processing (EMNLP), 2020, pp. 9052–
|
1255 |
+
9065.
|
1256 |
+
[10] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. d. O. Pinto, J. Kaplan,
|
1257 |
+
H. Edwards, Y. Burda, N. Joseph, G. Brockman et al., “Evaluating large
|
1258 |
+
language models trained on code,” arXiv preprint arXiv:2107.03374,
|
1259 |
+
2021.
|
1260 |
+
[11] D. Sobania, M. Briesch, and F. Rothlauf, “Choose your programming
|
1261 |
+
copilot: a comparison of the program synthesis performance of GitHub
|
1262 |
+
Copilot and genetic programming,” in Proceedings of the Genetic and
|
1263 |
+
Evolutionary Computation Conference, 2022, pp. 1019–1027.
|
1264 |
+
[12] D. Lin, J. Koppel, A. Chen, and A. Solar-Lezama, “QuixBugs: A multi-
|
1265 |
+
lingual program repair benchmark set based on the Quixey Challenge,”
|
1266 |
+
in Proceedings Companion of the 2017 ACM SIGPLAN international
|
1267 |
+
conference on systems, programming, languages, and applications:
|
1268 |
+
software for humanity, 2017, pp. 55–56.
|
1269 |
+
[13] H. Ye, M. Martinez, T. Durieux, and M. Monperrus, “A comprehensive
|
1270 |
+
study of automatic program repair on the QuixBugs benchmark,” Journal
|
1271 |
+
of Systems and Software, vol. 171, p. 110825, 2021.
|
1272 |
+
[14] T. Lutellier, H. V. Pham, L. Pang, Y. Li, M. Wei, and L. Tan, “CoCoNuT:
|
1273 |
+
combining context-aware neural translation models using ensemble for
|
1274 |
+
program repair,” in ISSTA ’20: 29th ACM SIGSOFT International
|
1275 |
+
Symposium on Software Testing and Analysis, Virtual Event, USA, July
|
1276 |
+
18-22, 2020, S. Khurshid and C. S. Pasareanu, Eds.
|
1277 |
+
ACM, 2020, pp.
|
1278 |
+
101–114. [Online]. Available: https://doi.org/10.1145/3395363.3397369
|
1279 |
+
[15] J. A. Prenner, H. Babii, and R. Robbes, “Can OpenAI’s codex fix bugs?
|
1280 |
+
an evaluation on QuixBugs,” in Proceedings of the Third International
|
1281 |
+
Workshop on Automated Program Repair, 2022, pp. 69–75.
|
1282 |
+
[16] Y. Yuan and W. Banzhaf, “ARJA: Automated repair of java programs via
|
1283 |
+
multi-objective genetic programming,” IEEE Transactions on software
|
1284 |
+
engineering, vol. 46, no. 10, pp. 1040–1067, 2018.
|
1285 |
+
[17] M. Martinez and M. Monperrus, “Ultra-large repair search space with
|
1286 |
+
automatically mined templates: The cardumen mode of astor,” in Inter-
|
1287 |
+
national Symposium on Search Based Software Engineering.
|
1288 |
+
Springer,
|
1289 |
+
2018, pp. 65–86.
|
1290 |
+
[18] T. Durieux and M. Monperrus, “Dynamoth: dynamic code synthesis
|
1291 |
+
for automatic program repair,” in Proceedings of the 11th International
|
1292 |
+
Workshop on Automation of Software Test, 2016, pp. 85–91.
|
1293 |
+
[19] M. Martinez and M. Monperrus, “Astor: Exploring the design space
|
1294 |
+
of generate-and-validate program repair beyond GenProg,” Journal of
|
1295 |
+
Systems and Software, vol. 151, pp. 65–80, 2019.
|
1296 |
+
[20] J. Xuan, M. Martinez, F. Demarco, M. Clement, S. L. Marcote,
|
1297 |
+
T. Durieux, D. Le Berre, and M. Monperrus, “Nopol: Automatic repair
|
1298 |
+
of conditional statement bugs in Java programs,” IEEE Transactions on
|
1299 |
+
Software Engineering, vol. 43, no. 1, pp. 34–55, 2016.
|
1300 |
+
[21] B. Cornu, T. Durieux, L. Seinturier, and M. Monperrus, “NPEfix:
|
1301 |
+
Automatic runtime repair of null pointer exceptions in Java,” arXiv
|
1302 |
+
preprint arXiv:1512.07423, 2015.
|
1303 |
+
|
AdFAT4oBgHgl3EQfrh5X/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
B9E4T4oBgHgl3EQf5Q72/content/tmp_files/2301.05323v1.pdf.txt
ADDED
@@ -0,0 +1,1535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Salient Object Detection for Images Taken by People With Vision Impairments
|
2 |
+
Jarek Reynolds*, Chandra Kanth Nagesh*, and Danna Gurari
|
3 |
+
* denotes equal contribution
|
4 |
+
University of Colorado Boulder
|
5 |
+
Abstract
|
6 |
+
Salient object detection is the task of producing a bi-
|
7 |
+
nary mask for an image that deciphers which pixels be-
|
8 |
+
long to the foreground object versus background. We in-
|
9 |
+
troduce a new salient object detection dataset using images
|
10 |
+
taken by people who are visually impaired who were seek-
|
11 |
+
ing to better understand their surroundings, which we call
|
12 |
+
VizWiz-SalientObject. Compared to seven existing datasets,
|
13 |
+
VizWiz-SalientObject is the largest (i.e., 32,000 human-
|
14 |
+
annotated images) and contains unique characteristics in-
|
15 |
+
cluding a higher prevalence of text in the salient objects
|
16 |
+
(i.e., in 68% of images) and salient objects that occupy a
|
17 |
+
larger ratio of the images (i.e., on average, ∼50% cover-
|
18 |
+
age). We benchmarked seven modern salient object detec-
|
19 |
+
tion methods on our dataset and found they struggle most
|
20 |
+
with images featuring salient objects that are large, have
|
21 |
+
less complex boundaries, and lack text as well as for lower
|
22 |
+
quality images. We invite the broader community to work on
|
23 |
+
our new dataset challenge by publicly sharing the dataset at
|
24 |
+
https://vizwiz.org/tasks-and-datasets/salient-object.
|
25 |
+
1. Introduction
|
26 |
+
Locating the most prominent foreground object in an im-
|
27 |
+
age is a core computer vision problem, often referred to
|
28 |
+
as salient object detection (as well as salient object seg-
|
29 |
+
mentation and foreground object detection/segmentation)
|
30 |
+
[8,12,32,40]. This work is motivated by the desire to have
|
31 |
+
salient object detection models work well for images taken
|
32 |
+
by people who are blind or with low vision1 (i.e., people
|
33 |
+
with vision impairments). Such a feature could offer sev-
|
34 |
+
eral benefits to this community. For example, it could con-
|
35 |
+
tribute to privacy-preservation for photographers who rely
|
36 |
+
on visual assistance technologies to learn about objects in
|
37 |
+
their daily lives, using mobile phone applications such as
|
38 |
+
Microsoft’s Seeing AI, Google Lookout, and TapTapSee.2
|
39 |
+
1For people with low vision, solutions do not exist to correct their vi-
|
40 |
+
sion (e.g., by wearing glasses, surgery).
|
41 |
+
2Many companies record submitted data as evidence that potentially
|
42 |
+
could be needed for legal reasons.
|
43 |
+
Figure 1.
|
44 |
+
Example images demonstrating unique features of
|
45 |
+
our new VizWiz-SalientObject dataset when compared to other
|
46 |
+
datasets. The salient objects commonly contain text and occupy
|
47 |
+
a larger portion of the image (i.e., high coverage).
|
48 |
+
All content except the foreground content of interest could
|
49 |
+
be obfuscated, which is important since private information
|
50 |
+
is often inadvertently captured in the background of images
|
51 |
+
taken by these photographers [24]. Additionally, localiza-
|
52 |
+
tion of the foreground object would empower low vision
|
53 |
+
users to rapidly magnify content of interest and also enable
|
54 |
+
quick inspection of smaller details [21,39].
|
55 |
+
Many salient object detection datasets have been created
|
56 |
+
to enable progress in algorithm development [7,8,22,42]. A
|
57 |
+
limitation of existing datasets is they are typically built us-
|
58 |
+
ing high-quality images collected from photo-sharing web-
|
59 |
+
sites on the Internet. As we will show in Section 3.2, such
|
60 |
+
images commonly lack many characteristics that can be ob-
|
61 |
+
served in real-world settings, especially for visual media
|
62 |
+
taken by visually impaired photographers who are trying
|
63 |
+
to learn about the content they photograph [24], often pho-
|
64 |
+
tographing distinct types of content such as objects showing
|
65 |
+
text [25], and cannot verify visual quality [13].
|
66 |
+
To fill this gap, we introduce a new salient object de-
|
67 |
+
tection dataset based on images captured in an authentic
|
68 |
+
use case where visually impaired photographers shared their
|
69 |
+
images to solicit assistance in learning about the visual con-
|
70 |
+
tent. We created this dataset by crowdsourcing the collec-
|
71 |
+
1
|
72 |
+
arXiv:2301.05323v1 [cs.CV] 12 Jan 2023
|
73 |
+
|
74 |
+
rCableSales
|
75 |
+
SUPPLY
|
76 |
+
ext Present
|
77 |
+
口
|
78 |
+
WASYOURTRIP?
|
79 |
+
ANTTOHEARFR
|
80 |
+
OUR ON-LINB SUR
|
81 |
+
pse hcwyou want to take the
|
82 |
+
XILLL
|
83 |
+
uter
|
84 |
+
gli.ols.sgizmo.com/s3l
|
85 |
+
pne:
|
86 |
+
KUCLO LCINNO CICMO G
|
87 |
+
gli.olsiphone.sgizmo.co
|
88 |
+
Pad
|
89 |
+
LOLIUILKIUtion of salient object annotations for nearly 40,000 images
|
90 |
+
taken from the VizWiz-Captions dataset [25]. Examples
|
91 |
+
of resulting annotated images are shown in Figure 1. Af-
|
92 |
+
ter applying quality control filtration steps, our final dataset
|
93 |
+
consists of 32,000 annotated images. We call our dataset
|
94 |
+
VizWiz-SalientObject (or VizWiz-SO). We conduct a de-
|
95 |
+
tailed analysis revealing how this new dataset relates to ex-
|
96 |
+
isting datasets. When comparing our salient objects to the
|
97 |
+
visual evidence needed to answer questions the photogra-
|
98 |
+
phers asked about their images (i.e., taken from the VizWiz-
|
99 |
+
VQA-Grounding dataset [11]), we observe that over half
|
100 |
+
the time the necessary visual evidence is the salient ob-
|
101 |
+
ject. When comparing our dataset to seven existing datasets,
|
102 |
+
we observe VizWiz-SalientObject is the largest (i.e., 32,000
|
103 |
+
human-annotated images) and is unique in its higher preva-
|
104 |
+
lence of text in the salient objects (i.e., in 68% of images) as
|
105 |
+
well as salient objects occupying a larger ratio of the images
|
106 |
+
(i.e., on average, ∼50%).
|
107 |
+
We also benchmark modern salient object detection al-
|
108 |
+
gorithms on our new dataset to uncover open challenges
|
109 |
+
for the research community. Experiments with seven al-
|
110 |
+
gorithms reveal that they struggle most for images with
|
111 |
+
salient objects that are large, have less complex bound-
|
112 |
+
aries, and lack text as well as for lower quality images.
|
113 |
+
To facilitate progress on these challenging problems, upon
|
114 |
+
publication, we will publicly-share the dataset and an
|
115 |
+
evaluation server with leaderboard at the following link:
|
116 |
+
https://vizwiz.org/tasks-and-datasets/salient-object.
|
117 |
+
In summary, our new dataset supports the development
|
118 |
+
of more generalized algorithms that not only address the in-
|
119 |
+
terests of people with vision impairments but also can ben-
|
120 |
+
efit related applications that encounter similar real world
|
121 |
+
challenges observed in our dataset. Relevant applications
|
122 |
+
include robotics, lifelogging, and privacy protection.
|
123 |
+
2. Related Work
|
124 |
+
Salient Object Detection Datasets.
|
125 |
+
Over the past cou-
|
126 |
+
ple of decades, many datasets were introduced to facili-
|
127 |
+
tate improving the design of algorithms that address salient
|
128 |
+
object detection problems. Several survey papers provide
|
129 |
+
comprehensive characterizations of the tens of datasets de-
|
130 |
+
signed for this task [7, 8, 22, 42]. A common observation
|
131 |
+
is that datasets were artificially constructed around high-
|
132 |
+
quality images which often feature salient objects in the
|
133 |
+
center of the images with a high contrast against the back-
|
134 |
+
ground. This is a mismatch from many real-world settings,
|
135 |
+
especially for visual media taken by visually impaired pho-
|
136 |
+
tographers who often photograph distinct types of content,
|
137 |
+
such as objects showing text [25], with the aim to learn
|
138 |
+
about that content. We introduce the first salient object de-
|
139 |
+
tection dataset based on images taken by visually impaired
|
140 |
+
people in an authentic use case where they were trying to
|
141 |
+
learn about their visual surroundings. Compared to seven
|
142 |
+
modern datasets, our dataset is larger, has a high prevalence
|
143 |
+
of salient objects containing textual information, and shows
|
144 |
+
objects that occupy larger portions of the images.
|
145 |
+
Salient Object Detection Algorithms.
|
146 |
+
Researchers have
|
147 |
+
designed novel algorithms to automatically perform salient
|
148 |
+
object detection for over 20 years, with the status quo since
|
149 |
+
2015 being that state-of-the-art methods employ neural net-
|
150 |
+
works trained on large-scale annotated datasets.
|
151 |
+
Several
|
152 |
+
survey papers provide comprehensive characterizations of
|
153 |
+
the hundreds of algorithms for this task [7,8,22,42]. While
|
154 |
+
convolutional neural network (CNN) based models became
|
155 |
+
the mainstream method [1, 33, 43] in 2015, transformer
|
156 |
+
based models [30, 44] have become the mainstream ap-
|
157 |
+
proach over the past few years. To assess how well mod-
|
158 |
+
ern methods perform on our new dataset, we benchmark
|
159 |
+
seven modern methods. We observe that existing methods
|
160 |
+
fall below human performance and struggle most for salient
|
161 |
+
objects that lack text and occupy a larger ratio of the image.
|
162 |
+
Visual Assistance Technologies.
|
163 |
+
Visually impaired peo-
|
164 |
+
ple can share their visual media (images and videos) with
|
165 |
+
various technologies [3, 4, 6, 14, 18, 27, 32, 40] in order to
|
166 |
+
receive assistance for daily tasks such as deciding what to
|
167 |
+
eat, wear, and buy [10,24]. The widespread impact of such
|
168 |
+
technologies for real users is exemplified by reports from
|
169 |
+
some of these companies that the technologies have 10s to
|
170 |
+
100s of thousands of users who have submitted millions of
|
171 |
+
assistance requests [5,9,14,17]. The most common reported
|
172 |
+
goal for using such technologies is to learn about a (salient)
|
173 |
+
object [9,10,23,28,47]. Given this common use case, salient
|
174 |
+
object detection models could help for privacy preservation.
|
175 |
+
Specifically, images (or video frames) could be edited be-
|
176 |
+
fore being shared with companies, by obfuscating the back-
|
177 |
+
ground, in order to reduce inadvertent disclosures of pri-
|
178 |
+
vate content that often appears in the background of images
|
179 |
+
taken by visually impaired photographers [24].
|
180 |
+
3. VizWiz-SalientObject Dataset
|
181 |
+
We now introduce our new salient object detection
|
182 |
+
dataset, we call VizWiz-SalientObject (VizWiz-SO).
|
183 |
+
3.1. Dataset Creation
|
184 |
+
Image Source.
|
185 |
+
We focus on images taken by visually im-
|
186 |
+
paired people who shared them in an authentic use case
|
187 |
+
where they were soliciting visual assistance. Specifically,
|
188 |
+
we leverage the 39,181 labeled images from the VizWiz-
|
189 |
+
Captions dataset, each of which is paired with five crowd-
|
190 |
+
sourced captions [25]. Observing that images from these
|
191 |
+
photographers can have severe quality issues resulting in no
|
192 |
+
detectable salient object (e.g., extreme blur or inadequate
|
193 |
+
illumination), we did not use the images which were cap-
|
194 |
+
tioned as follows by at least four of the five crowdworkers:
|
195 |
+
2
|
196 |
+
|
197 |
+
“Quality issues are too severe to recognize visual content.”
|
198 |
+
We also did not use the small images (i.e., both the height
|
199 |
+
and width were less than 300 pixels) because of the chal-
|
200 |
+
lenges of collecting precise annotations for such images.
|
201 |
+
This left us with 37,120 images for our annotation task.
|
202 |
+
Task Design.
|
203 |
+
Our task interface for segmenting salient
|
204 |
+
objects begins with a comprehensive instruction set at the
|
205 |
+
top detailing both how to navigate the interface and how to
|
206 |
+
complete challenging annotation scenarios. Next, it shows
|
207 |
+
an image alongside two preliminary questions for verifying
|
208 |
+
there is a single, unambiguous foreground object. The first
|
209 |
+
question asks “Is the image showing a screenshot?” If the
|
210 |
+
answer is “yes”, we conclude the image lacks a salient ob-
|
211 |
+
ject. Next, we ask the more general, direct question of “Is
|
212 |
+
there a single unambiguous foreground object?” An anno-
|
213 |
+
tator is only prompted to segment the foreground object for
|
214 |
+
images deemed by these preliminary questions to show a
|
215 |
+
single, unambiguous foreground object.
|
216 |
+
To demarcate the boundary of the salient object, the in-
|
217 |
+
terface collects a series of points that are connected into
|
218 |
+
polygon(s). When segmenting the salient object, the an-
|
219 |
+
notator is required to remove any holes (e.g., donut) as well
|
220 |
+
as capture all object parts when occlusions break a salient
|
221 |
+
object into more than one polygon (e.g., hand obfuscates a
|
222 |
+
pencil into two parts). The annotator also has an option to
|
223 |
+
select a button indicating that the salient object occupies the
|
224 |
+
full image. We provide more details about the task interface
|
225 |
+
as well as a screenshot of it in the Supplementary Materials.
|
226 |
+
Annotation Collection.
|
227 |
+
We leveraged the benefits of
|
228 |
+
an around-the-clock distributed workforce by crowdsourc-
|
229 |
+
ing annotations via Amazon’s crowdsourcing marketplace,
|
230 |
+
Amazon Mechanical Turk (AMT).
|
231 |
+
Although AMT can support our large-scale annotation
|
232 |
+
needs, it brings concerns about annotation quality due to the
|
233 |
+
anonymous nature of the crowdsourced workforce. Con-
|
234 |
+
sequently, we implemented several measures to ensure the
|
235 |
+
collection of high-quality annotations, as summarized be-
|
236 |
+
low. First, we restricted who were potential candidates for
|
237 |
+
our task.
|
238 |
+
We only accepted workers who had at least a
|
239 |
+
98% acceptance rate while having completed at least 500
|
240 |
+
Human Intelligence Tasks (HITs) on AMT. Moreover, to
|
241 |
+
encourage understanding of our initial and ongoing task in-
|
242 |
+
structions, we opted for crowdworkers only from the United
|
243 |
+
States since that provided us confidence that they have
|
244 |
+
English-language proficiency. In addition, we also required
|
245 |
+
crowdworkers to pass a qualification assessment covering
|
246 |
+
five challenging annotation scenarios documented in our in-
|
247 |
+
structions. The qualification images feature foreground ob-
|
248 |
+
jects consisting of complex boundaries, holes within the ob-
|
249 |
+
ject, and occlusions obfuscating portions of the foreground
|
250 |
+
object. Consequently, the task required crowdworkers to
|
251 |
+
demonstrate an understanding for how to generate multi-
|
252 |
+
ple polygons, annotate holes, handle occlusions, and draw
|
253 |
+
complex boundaries.
|
254 |
+
We employed 40 AMT crowdworkers who completed
|
255 |
+
our qualification task to complete annotations of all images.
|
256 |
+
For each of the 37,120 images, we collected two annotations
|
257 |
+
from the crowdworkers.3 During annotation collection, we
|
258 |
+
monitored ongoing quality by tracking each worker’s per-
|
259 |
+
formance with respect to their frequency of indicating the
|
260 |
+
presence of full-screen annotations or no prominent fore-
|
261 |
+
ground object as well as the level of detail they provided in
|
262 |
+
their segmentations (e.g., high prevalence of triangles). Cu-
|
263 |
+
mulatively, the crowdworkers took 1,290 annotation hours
|
264 |
+
over 11 days to complete annotating the 37,120 images.
|
265 |
+
Annotation Post-Processing.
|
266 |
+
We next analyzed the re-
|
267 |
+
dundant annotations per image to determine how to use each
|
268 |
+
annotated image in the final dataset.
|
269 |
+
First, we removed
|
270 |
+
3,662 images for which workers agreed there was no sin-
|
271 |
+
gle, unambiguous salient object, which occurred when both
|
272 |
+
annotators either answered “Yes” to “Is the image a screen-
|
273 |
+
shot?” or “No” to “Is there a single most prominent fore-
|
274 |
+
ground object?” Next, we manually inspected 7,443 images
|
275 |
+
for which workers disagreed on the answers to either of the
|
276 |
+
two preliminary questions and determined whether there is
|
277 |
+
indeed a single, unambiguous object. Finally, with all im-
|
278 |
+
ages deemed to have a single, unambiguous salient object,
|
279 |
+
we determined which annotation to assign as ground truth.
|
280 |
+
To assist in this process, we computed the intersection over
|
281 |
+
union (IoU) score between the two segmentations for all
|
282 |
+
images with two or more segmentations. With IoUs ≥ 0.90,
|
283 |
+
we deemed both annotations high quality and randomly se-
|
284 |
+
lected one as ground truth. For the remaining 2,951 images
|
285 |
+
with IoUs< 0.90, we manually reviewed the annotations to
|
286 |
+
decide whether one was correct or whether the image should
|
287 |
+
be discarded due to foreground object ambiguity.
|
288 |
+
3.2. Dataset Analysis
|
289 |
+
We now characterize the VizWiz-SalientObject (VizWiz-
|
290 |
+
SO) dataset and how it relates to existing datasets.
|
291 |
+
3.2.1
|
292 |
+
Salient Objects vs Answer Groundings for VQA
|
293 |
+
We first explore how the target content the photographers
|
294 |
+
were asking about relates to an image’s salient object. To
|
295 |
+
do so, we compare the annotations of the visual evidence
|
296 |
+
needed to answer questions about the images, i.e., an-
|
297 |
+
swer groundings provided in the VizWiz-VQA-Grounding
|
298 |
+
dataset [11], to the annotations of the salient objects in our
|
299 |
+
dataset. We first identified all annotated images that were in
|
300 |
+
3For a subset of images, we collected four annotations to support fur-
|
301 |
+
ther analysis of human annotation performance, which we describe in the
|
302 |
+
Supplementary Materials.
|
303 |
+
3
|
304 |
+
|
305 |
+
Figure 2. The histogram summarizes for 6,540 images the fre-
|
306 |
+
quency of observing different levels of similarity between two
|
307 |
+
segmentations per image, which show the salient object and the
|
308 |
+
visual evidence needed to answer the photographer’s question re-
|
309 |
+
spectively. These findings reveal that visually impaired photogra-
|
310 |
+
phers often want to learn about the salient objects in their images.
|
311 |
+
common across the two datasets, yielding a total of 6,540
|
312 |
+
images. For each image, we then measured the similarity
|
313 |
+
between the answer grounding and salient object segmenta-
|
314 |
+
tions using the IoU metric. We visualize our results using a
|
315 |
+
histogram where we categorize each image into one of ten
|
316 |
+
interval bins starting with IoU=[0.0, 0.1), incrementing in
|
317 |
+
intervals of 0.1, and ending with IoU=[0.9, 1.0). Results
|
318 |
+
are shown in Figure 2.
|
319 |
+
We observe that about half of the images have a high sim-
|
320 |
+
ilarity between the salient object and VQA answer ground-
|
321 |
+
ing; e.g., 46% had an IoU ≥ 0.9. This reveals that visually
|
322 |
+
impaired photographers often are trying to learn about the
|
323 |
+
salient object in their images when trying to get answers to
|
324 |
+
their visual questions.
|
325 |
+
We also observe that roughly one quarter of the images
|
326 |
+
have a very low similarity between the salient object and
|
327 |
+
VQA answer grounding; i.e., 25.7% of images had an IoU
|
328 |
+
< 0.1. We manually reviewed these 1,680 images with IoUs
|
329 |
+
less than 0.1 to understand the reasons for this finding. We
|
330 |
+
discovered that 95% (i.e., 1,599) of these images have a
|
331 |
+
salient object featuring a full-screen or large region while
|
332 |
+
the VQA answer grounding captures a small aspect of the
|
333 |
+
salient object. Examples include expiration dates on food
|
334 |
+
packages or the current page number of an open book. The
|
335 |
+
remaining 5% (i.e., 81) of these images featured a VQA an-
|
336 |
+
swer grounding unrelated to the salient object.
|
337 |
+
More generally, we observe that the IoU scores follow a
|
338 |
+
U-shaped distribution with only a small portion of images
|
339 |
+
having middling scores; e.g., 7.9% (i.e., 511) of images had
|
340 |
+
an IoU ≥ 0.3 and < 0.7. Among these images, we found the
|
341 |
+
salient object contained the VQA answer grounding region
|
342 |
+
100% of the time. There are two primary trends that led to
|
343 |
+
these less common IoU scores. The first trend is that larger
|
344 |
+
VQA answer grounding regions occur with smaller salient
|
345 |
+
objects. Examples include brands of cereal, types of soda,
|
346 |
+
and denominations of currency. The second trend was for
|
347 |
+
salient objects featuring holes. That is because the VizWiz-
|
348 |
+
VQA-Grounding dataset did not account for holes in their
|
349 |
+
annotation task. The absence of annotated holes in only one
|
350 |
+
of the two segmentations led to lower IoU scores.
|
351 |
+
Altogether, these findings highlight that a valuable step
|
352 |
+
for tackling many of this population’s VQA goals is to ini-
|
353 |
+
tially locate the salient object. That is because the answer
|
354 |
+
will likely only be grounded in the salient object or the
|
355 |
+
background rather than their intersection.
|
356 |
+
3.2.2
|
357 |
+
VizWiz-SO vs Existing Datasets
|
358 |
+
We next compare our dataset to seven datasets:
|
359 |
+
• DUTS [41]: the most commonly used dataset to train
|
360 |
+
state-of-the-art algorithms (e.g., [1,30,33,38,43,44]) due
|
361 |
+
to its large size paired with diverse saliency challenges.
|
362 |
+
• DUT-OMRON [46]: consist of images showing multiple
|
363 |
+
salient objects, often with complex backgrounds. This is
|
364 |
+
a useful reference when considering extending our dataset
|
365 |
+
to when photographs taken by visually impaired photog-
|
366 |
+
raphers showing multiple salient objects. We share our
|
367 |
+
collected metadata indicating when this occurs to facili-
|
368 |
+
tate this line of future research.
|
369 |
+
• ECSSD [45]: consists of images featuring complex scenes
|
370 |
+
that present textures and structures expected to be com-
|
371 |
+
mon in real-world salient object detection scenarios.
|
372 |
+
• PASCAL-S [29]: derived from PASCAL VOC’s [16] val-
|
373 |
+
idation set, it is designed to facilitate salient object seg-
|
374 |
+
mentation generalization on realistic images.
|
375 |
+
• HRSOD [48]: explicitly designed for salient object de-
|
376 |
+
tection on high-resolution images; this is relevant for our
|
377 |
+
real-world application since images taken by people with
|
378 |
+
vision impairments often are relatively high resolution.
|
379 |
+
• UHRSD [44]: currently the largest ultra-high resolution
|
380 |
+
salient object detection dataset, which is relevant to our
|
381 |
+
work since images taken by people with vision impair-
|
382 |
+
ments can be ultra high resolution.
|
383 |
+
• DAVIS-S [48]: derived from DAVIS [36], a densely an-
|
384 |
+
notated video segmentation dataset. This is relevant for
|
385 |
+
our real-world application to analyze implications for
|
386 |
+
video frames since visually impaired photographers often
|
387 |
+
stream live video with their cameras when using visual
|
388 |
+
assistance technologies [4,18].
|
389 |
+
Of note, images in six of these datasets originate from the
|
390 |
+
Internet on photo-sharing websites such as Flickr [29, 41,
|
391 |
+
44–46, 48], and so likely are high quality since they were
|
392 |
+
deemed of sufficient quality to upload to the Internet.4
|
393 |
+
4The origins of the images for the final dataset is not reported [48].
|
394 |
+
4
|
395 |
+
|
396 |
+
50%
|
397 |
+
46.0%
|
398 |
+
3.40%
|
399 |
+
Image
|
400 |
+
30%
|
401 |
+
25.7%
|
402 |
+
20%
|
403 |
+
9.0%
|
404 |
+
10%
|
405 |
+
4.4%
|
406 |
+
4.8%
|
407 |
+
2.4%
|
408 |
+
2.1%
|
409 |
+
1.7%
|
410 |
+
1.7%
|
411 |
+
2.2%
|
412 |
+
[0.8, 0.9]
|
413 |
+
[0.9.
|
414 |
+
[O.
|
415 |
+
[o.
|
416 |
+
0
|
417 |
+
4.
|
418 |
+
0.6)
|
419 |
+
,1.0)
|
420 |
+
0.3)
|
421 |
+
IoU SimilarityDAVIS-S [48]
|
422 |
+
PASCAL-S [29]
|
423 |
+
HR [48]
|
424 |
+
ECSSD [45]
|
425 |
+
DUT-O [46]
|
426 |
+
UH [44]
|
427 |
+
DUTS [41]
|
428 |
+
Ours
|
429 |
+
Images
|
430 |
+
92
|
431 |
+
850
|
432 |
+
2,010
|
433 |
+
1,000
|
434 |
+
5,168
|
435 |
+
5,920
|
436 |
+
15,572
|
437 |
+
32,000
|
438 |
+
Text
|
439 |
+
13%
|
440 |
+
24%
|
441 |
+
15%
|
442 |
+
15%
|
443 |
+
11%
|
444 |
+
19%
|
445 |
+
13%
|
446 |
+
68%
|
447 |
+
MR
|
448 |
+
22%
|
449 |
+
31%
|
450 |
+
25%
|
451 |
+
9%
|
452 |
+
17%
|
453 |
+
35%
|
454 |
+
19%
|
455 |
+
1%
|
456 |
+
Holes
|
457 |
+
82%
|
458 |
+
50%
|
459 |
+
62%
|
460 |
+
29%
|
461 |
+
28%
|
462 |
+
75%
|
463 |
+
41%
|
464 |
+
4%
|
465 |
+
Table 1. Characterization of our VizWiz-SO dataset and seven existing salient object detection datasets with respect to how many images
|
466 |
+
are included (“Images”), the percentage of images that have text present in the salient objects (“Text”), the percentage of images that have
|
467 |
+
salient objects consisting of more than one region (“MR”), and the percentage of images that have salient objects containing any holes
|
468 |
+
(“Holes”). As shown, our dataset is distinct in that it contains more images, more salient objects with text present, more salient objects
|
469 |
+
consisting of one region, and fewer salient objects containing holes. (HR=HRSOD; UH=UHRSD)
|
470 |
+
Figure 3. Summary statistics for ours and seven other datasets with respect to four measures. Each box reveals statistics about all salient
|
471 |
+
objects in a particular dataset with the central mark capturing the median value, box edges the 25th and 75th percentiles values, whiskers
|
472 |
+
the most extreme data points not considered outliers, and individually plotted points the outliers. Our dataset is unique in that salient objects
|
473 |
+
tend to have less complex boundaries, occupy larger portions of an image, and exhibit a greater diversity of sizes relative to the image.
|
474 |
+
For each salient object in every dataset, we characterize
|
475 |
+
it in six ways. Three measures focus on detecting the pres-
|
476 |
+
ence versus absence of particular properties for the salient
|
477 |
+
object. These are whether the salient object contains text 5,
|
478 |
+
consists of multiple regions 6, or contains any hole(s). The
|
479 |
+
remaining three measures characterize the salient region it-
|
480 |
+
self. First, we identify the position of an object within an
|
481 |
+
image by measuring its center of mass relative to the im-
|
482 |
+
age coordinates, resulting in x and y coordinate values in
|
483 |
+
the range between 0 to 1. Next, we characterize the ob-
|
484 |
+
ject’s boundary complexity by computing its isoperimetric
|
485 |
+
inequality, which is the ratio of the object’s area to the
|
486 |
+
length of its perimeter.
|
487 |
+
Values range from 0 to 1, with
|
488 |
+
larger values indicating simpler boundaries that are less
|
489 |
+
jagged/dented (e.g., a circle). Finally, to gauge the relative
|
490 |
+
size of a salient object in the image, we compute its cover-
|
491 |
+
age ratio, meaning the fraction of all image pixels that are
|
492 |
+
occupied by the salient object’s pixels.
|
493 |
+
We show summative statistics of our findings per dataset
|
494 |
+
in Table 1 and Figure 3. In particular, in Table 1, we re-
|
495 |
+
5We obfuscate all image content but the salient object and then check
|
496 |
+
whether Microsoft Azure’s OCR API returns text.
|
497 |
+
6Multiple regions means there are multiple separate polygons. This can
|
498 |
+
occur either because multiple salient objects were annotated or because of
|
499 |
+
occlusions that lead to more than one region for a single salient object.
|
500 |
+
port how many images are in each dataset paired with what
|
501 |
+
percentage of those images have salient objects with text,
|
502 |
+
multiple regions, and holes. In Figure 3, we visualize statis-
|
503 |
+
tics summarizing the values for each dataset’s salient ob-
|
504 |
+
jects with respect to center of mass, boundary complexity,
|
505 |
+
and coverage ratio using boxplots.
|
506 |
+
While our findings highlight that our VizWiz-SO dataset
|
507 |
+
has many distinct characteristics, one commonality it has
|
508 |
+
with most existing salient object detection datasets is that
|
509 |
+
the salient objects typically occupy centered positions
|
510 |
+
within an image. Specifically, in Figure 3, we observe this
|
511 |
+
trend for all datasets except HRSOD. We found this some-
|
512 |
+
what surprising since visually impaired photographers can-
|
513 |
+
not visually inspect their images to verify they are conform-
|
514 |
+
ing to the common photographer’s bias of centering con-
|
515 |
+
tents of interest they are trying to photograph. Yet, given
|
516 |
+
our findings from Section 3.2.1 that photographers often are
|
517 |
+
interested in learning about an image’s salient object, our
|
518 |
+
findings suggest these photographers have skills in center-
|
519 |
+
ing contents of interest in pictures they take.
|
520 |
+
A unique aspect of our VizWiz-SO dataset is that it fea-
|
521 |
+
tures more salient objects with textual data. Specifically,
|
522 |
+
68% of salient objects in VizWiz-SO contain text while the
|
523 |
+
dataset with the next highest prevalence of text, PASCAL-
|
524 |
+
S [29], only has it for 24% of the images (Table 1). A gap of
|
525 |
+
5
|
526 |
+
|
527 |
+
Center of mass Y-axis
|
528 |
+
Center of mass X-axis
|
529 |
+
Boundary Complexity
|
530 |
+
Coverage Ratio
|
531 |
+
1.0
|
532 |
+
0.8
|
533 |
+
0.6
|
534 |
+
0.4
|
535 |
+
0.2
|
536 |
+
0.0
|
537 |
+
DAVIS-S
|
538 |
+
PASCAL-S
|
539 |
+
HRSOD
|
540 |
+
ECSSD
|
541 |
+
DUT-OMRON
|
542 |
+
UHRSD
|
543 |
+
DUTS
|
544 |
+
Ours: VizWiz-SOthis magnitude (i.e., 44 percentage points) suggests that our
|
545 |
+
new dataset offers a considerable domain shift in the salient
|
546 |
+
object detection problem space. We suspect part of this shift
|
547 |
+
stems from the types of salient objects included, with many
|
548 |
+
more daily objects such as products (e.g., food packages)
|
549 |
+
included in our VizWiz-SO dataset.
|
550 |
+
Another unique aspect of VizWiz-SO is that far fewer
|
551 |
+
images feature salient objects that consist of multiple re-
|
552 |
+
gions; i.e., only 1% of images (Table 1).
|
553 |
+
We suspect
|
554 |
+
this distinction stems from our unique approach of adopt-
|
555 |
+
ing a rigorous annotation preprocessing step, where we re-
|
556 |
+
quire crowdworkers to verify images have one unambigu-
|
557 |
+
ous salient object before allowing them to annotate images
|
558 |
+
for use in our final dataset. Any remaining objects in our
|
559 |
+
dataset with multiple regions are therefore highly likely a
|
560 |
+
result of occlusions breaking a single salient object into
|
561 |
+
multiple polygons, which evidently is incredibly rare.
|
562 |
+
VizWiz-SO is also unique due to the rarity in which
|
563 |
+
salient objects contain holes; i.e., only observed for 4%
|
564 |
+
of images (Table 1). From visual inspection, we suspect
|
565 |
+
this finding reflects a domain shift in the types of content
|
566 |
+
found in the datasets. For example, examples from other
|
567 |
+
datasets of objects with holes include people riding bikes,
|
568 |
+
people dancing, and animals in intricate poses.
|
569 |
+
In con-
|
570 |
+
trast, in VizWiz-SO, objects with holes include retail pack-
|
571 |
+
aging made to hang from hooks, pairs of scissors, and coffee
|
572 |
+
mugs. We posit the lower prevalence of holes in VizWiz-SO
|
573 |
+
stems from the fact that images originate from an authentic
|
574 |
+
use case where photographers primarily photograph house-
|
575 |
+
hold and retail items, which naturally feature fewer holes.
|
576 |
+
A further distinction of our VizWiz-SO dataset is that the
|
577 |
+
salient objects tend to have less complex boundaries (Fig-
|
578 |
+
ure 3). We suspect this is again because of a domain shift in
|
579 |
+
the types of objects in our dataset, with many more human-
|
580 |
+
made items, such as food packaging boxes and cans, that by
|
581 |
+
design are typically more structured in shape.
|
582 |
+
A final distinction of salient objects in our VizWiz-SO
|
583 |
+
is how much of the image they occupy (Figure 3). First,
|
584 |
+
they tend to occupy a much larger amount of the image than
|
585 |
+
observed in other datasets. Specifically, they on average oc-
|
586 |
+
cupy roughly half of all image pixels, with a mean coverage
|
587 |
+
ratio of 0.5 and a median of 0.46. In contrast, the dataset
|
588 |
+
with the next highest coverage ratio statistics is PASCAL-
|
589 |
+
S [29], and over 75% of its images contain salient objects
|
590 |
+
that occupy less than half of the image pixels. We attribute
|
591 |
+
this distinction to the authentic use case of our dataset,
|
592 |
+
where visually impaired photographers attempting to learn
|
593 |
+
about the salient objects they are photographing seem to be
|
594 |
+
taking zoomed-in or close-to-camera images of the content
|
595 |
+
of interest. Another unique aspect of our salient objects, is
|
596 |
+
that they exhibit a larger range of sizes, as shown by the
|
597 |
+
gaps between the 25 and 75 percentile values of each box.
|
598 |
+
For example, PASCAL-S features the next largest interquar-
|
599 |
+
tile range with a 23% gap(i.e., 19% to 42%). In contrast,
|
600 |
+
the gap for VizWiz-SO is more than twice as large at 56%
|
601 |
+
(i.e., 22% to 78%). Consequently, a unique challenge of
|
602 |
+
our dataset for algorithms is that they no longer can assume
|
603 |
+
a strong bias regarding a salient object’s relative size.
|
604 |
+
4. Algorithm Benchmarking
|
605 |
+
We benchmark modern salient object detection algo-
|
606 |
+
rithms to show how they perform on our new dataset. We
|
607 |
+
conducted all experiments on a Nvidia A100 GPU.
|
608 |
+
4.1. Experimental Design
|
609 |
+
Dataset Splits.
|
610 |
+
We use the existing splits available for the
|
611 |
+
VizWiz-Captions dataset [25], which translates to approxi-
|
612 |
+
mately a 60/20/20 training, validation and test split for our
|
613 |
+
VizWiz-SO dataset. In particular, from the 32,000 anno-
|
614 |
+
tated images, the number of images in each split respec-
|
615 |
+
tively is 19,116, 6,105, and 6,779.
|
616 |
+
Evaluation Metrics.
|
617 |
+
We evaluate each model with re-
|
618 |
+
spect to five popular metrics for salient object detection
|
619 |
+
models: Mean Absolute Error (MAE), Structure Measure
|
620 |
+
(Sm), Mean F-Measure (Fm), Enhanced Alignment Mea-
|
621 |
+
sure (Em), and Intersection over Union (IoU).
|
622 |
+
Algorithms.
|
623 |
+
We benchmark the following seven methods
|
624 |
+
from the past three years to assess the difficulty of our new
|
625 |
+
dataset for modern salient object detection models:
|
626 |
+
• Boundary Aware Segmentation Network (BASNet) [38]:
|
627 |
+
an appealing model for real-time applications like our tar-
|
628 |
+
get use case because it can achieve 70fps during inference
|
629 |
+
time while achieving competitive performance (i.e., was
|
630 |
+
a top-performer in 2019).
|
631 |
+
• Fusion, Feedback and Focus Network (F3Net) [43]: state-
|
632 |
+
of-the-art performing model on five datasets in 2020.
|
633 |
+
• U2 Network (U2Net) [1]: an appealing model for real-
|
634 |
+
world applications like our target use case because it has
|
635 |
+
a very light footprint (4.7MB), and so is more suitable
|
636 |
+
for resource-constrained devices such as smartphones. It
|
637 |
+
achieved competitive performance in 2020.
|
638 |
+
• Visual Saliency Transformer (VST) [30]: achieved state-
|
639 |
+
of-the-art performance in 2021, and is based purely on a
|
640 |
+
transformer architecture.
|
641 |
+
• Pyramidal Feature Shrinking Network (PFSNet) [33]:
|
642 |
+
achieved state-of-the-art performance on five datasets in
|
643 |
+
2021; it consists of a decoder that aims at using aggre-
|
644 |
+
gated adjacent feature nodes hierarchically to avoid the
|
645 |
+
problem of leaping feature fusion.
|
646 |
+
• Pyramid Grafting Network (PGNet) [44]: introduced in
|
647 |
+
2022, it is a one-stage framework based on a transformer
|
648 |
+
6
|
649 |
+
|
650 |
+
HP
|
651 |
+
BASNet
|
652 |
+
F3Net
|
653 |
+
U2Net
|
654 |
+
VST
|
655 |
+
PFSNet
|
656 |
+
PGNet
|
657 |
+
DIS
|
658 |
+
VST-FT
|
659 |
+
VST-S
|
660 |
+
[38]
|
661 |
+
[43]
|
662 |
+
[1]
|
663 |
+
[30]
|
664 |
+
[33]
|
665 |
+
[44]
|
666 |
+
[37]
|
667 |
+
Attr.
|
668 |
+
Backbone
|
669 |
+
-
|
670 |
+
R-34
|
671 |
+
R-50
|
672 |
+
-
|
673 |
+
T2T-ViT
|
674 |
+
R-50
|
675 |
+
R-18+SWIN
|
676 |
+
U2Net
|
677 |
+
VST
|
678 |
+
ViT
|
679 |
+
Training set
|
680 |
+
-
|
681 |
+
D
|
682 |
+
D
|
683 |
+
D
|
684 |
+
D
|
685 |
+
D
|
686 |
+
D+HR
|
687 |
+
DIS5K
|
688 |
+
D+VW
|
689 |
+
VW
|
690 |
+
Input size
|
691 |
+
-
|
692 |
+
2562
|
693 |
+
3522
|
694 |
+
3202
|
695 |
+
2242
|
696 |
+
3522
|
697 |
+
2242, 10242
|
698 |
+
10242
|
699 |
+
2242
|
700 |
+
2242
|
701 |
+
Size (MB)
|
702 |
+
-
|
703 |
+
333
|
704 |
+
98
|
705 |
+
4.7
|
706 |
+
171
|
707 |
+
120
|
708 |
+
280
|
709 |
+
169
|
710 |
+
171
|
711 |
+
171
|
712 |
+
VizWiz-SO
|
713 |
+
MAE ↓
|
714 |
+
0.02
|
715 |
+
0.28
|
716 |
+
0.28
|
717 |
+
0.26
|
718 |
+
0.17
|
719 |
+
0.32
|
720 |
+
0.21
|
721 |
+
0.36
|
722 |
+
0.19
|
723 |
+
0.21
|
724 |
+
Sm ↑
|
725 |
+
0.92
|
726 |
+
0.59
|
727 |
+
0.55
|
728 |
+
0.61
|
729 |
+
0.65
|
730 |
+
0.48
|
731 |
+
0.62
|
732 |
+
0.46
|
733 |
+
0.64
|
734 |
+
0.63
|
735 |
+
Fm ↑
|
736 |
+
0.96
|
737 |
+
0.77
|
738 |
+
0.74
|
739 |
+
0.80
|
740 |
+
0.83
|
741 |
+
0.70
|
742 |
+
0.79
|
743 |
+
0.61
|
744 |
+
0.74
|
745 |
+
0.72
|
746 |
+
Em ↑
|
747 |
+
0.97
|
748 |
+
0.64
|
749 |
+
0.65
|
750 |
+
0.65
|
751 |
+
0.76
|
752 |
+
0.60
|
753 |
+
0.74
|
754 |
+
0.55
|
755 |
+
0.77
|
756 |
+
0.70
|
757 |
+
IoU ↑
|
758 |
+
0.94
|
759 |
+
0.62
|
760 |
+
0.53
|
761 |
+
0.63
|
762 |
+
0.73
|
763 |
+
0.48
|
764 |
+
0.67
|
765 |
+
0.49
|
766 |
+
0.70
|
767 |
+
0.69
|
768 |
+
Table 2. Analysis of existing algorithms that we benchmark on our VizWiz-SO dataset, including both off-the-shelf models (which are
|
769 |
+
cited) as well as those fine-tuned (-FT) and trained from scratch (-S). We first report differentiating attributes between the algorithm
|
770 |
+
architectures and then present the model performance with respect to five widely-used metrics. (HP=Human Performance; R=ResNet [26];
|
771 |
+
ViT=Vision Transformer [15]; Swin=Shifted window transformer [31]; D=DUTS-TR [41]; VW=VizWiz-SO; HR=HRSOD [48])
|
772 |
+
and CNN backbone that achieves state-of-the-art perfor-
|
773 |
+
mance on five benchmark datasets. [41,44,46,48].
|
774 |
+
• Dichotomous Image Segmentation (DIS) [37]: also in-
|
775 |
+
troduced in 2022 as the state-of-the-art method for the
|
776 |
+
DIS5K [37] dataset; it is designed for detecting salient
|
777 |
+
object in high resolution images, which makes it relevant
|
778 |
+
for our use case where many images coming from people
|
779 |
+
with vision impairments are relatively high resolution.
|
780 |
+
We further characterize each model by identifying the
|
781 |
+
backbone architecture used in the architecture, datasets used
|
782 |
+
for training, image size used for training, and model foot-
|
783 |
+
print. These characteristics are reported in Table 2.
|
784 |
+
All models predict saliency maps that represent the
|
785 |
+
brightness of certain pixels within the same spatial reso-
|
786 |
+
lution as the input image; e.g., ∈ [0, 1] or alternatively
|
787 |
+
∈ [0, 255]. The predictions generated by salient object de-
|
788 |
+
tection models are converted into binary masks.
|
789 |
+
Humans.
|
790 |
+
We also evaluate human performance to estab-
|
791 |
+
lish an upper bound for what we should strive for from au-
|
792 |
+
tomated methods. Since, we get two human annotations per
|
793 |
+
image in our dataset, we calculate human performance by
|
794 |
+
comparing the two annotations in cases where the IoU is
|
795 |
+
greater than 0.90.
|
796 |
+
4.2. Performance for Off-The-Shelf Models
|
797 |
+
We first evaluate each of the algorithms as is in their orig-
|
798 |
+
inal design. Results are shown in Table 2.
|
799 |
+
We observe that VST [30] is the top-performing model.
|
800 |
+
Yet, it still falls short of human performance. For exam-
|
801 |
+
ple, the gap in performance is 0.15 in terms of MAE, 0.211
|
802 |
+
in terms of IoU, 0.26 for Sm, and 0.2 for Em.
|
803 |
+
Conse-
|
804 |
+
quently, this dataset offers a new challenging benchmark
|
805 |
+
for the community.
|
806 |
+
A further observation is that the models perform poorly
|
807 |
+
on the VizWiz-SO dataset in comparison to their perfor-
|
808 |
+
mance on the original datasets for which they were bench-
|
809 |
+
marked. For example the MAE and Sm performance ob-
|
810 |
+
served by PGNet [44] on DUTS-TE is 0.028 and 0.912 re-
|
811 |
+
spectively versus 0.2123 and 0.6233 respectively for our
|
812 |
+
dataset. We hypothesize that part of the reason for this poor
|
813 |
+
performance is that models trained and evaluated on other
|
814 |
+
datasets are not able to learn how to generalize to the real-
|
815 |
+
world challenges that arise for images taken by visually im-
|
816 |
+
paired photographers.
|
817 |
+
4.3. Performance When Training on VizWiz-SO
|
818 |
+
We next explore whether training the top-performing al-
|
819 |
+
gorithm, VST [30], on our new dataset will lead to improved
|
820 |
+
performance. To do so we analyze two additional models:
|
821 |
+
(1) the pretrained VST [30] model fine-tuned on VizWiz-
|
822 |
+
SO (VST-FT) and (2) the pretrained VST [30] algorithm
|
823 |
+
trained from scratch on VizWiz-SO (VST-S). We use the
|
824 |
+
default hyperparameters reported in the VST [30] paper for
|
825 |
+
model training. Results are shown in Table 2.
|
826 |
+
We observe that both models, i.e., created by training
|
827 |
+
from scratch and fine-tuning on our VizWiz-SO dataset,
|
828 |
+
achieve worse results than the baseline of not training the al-
|
829 |
+
gorithm on our dataset. This suggests that the training data
|
830 |
+
used by algorithms is not the only culprit for what makes
|
831 |
+
our new dataset challenging. Rather, our findings suggest
|
832 |
+
that new algorithmic frameworks are also needed to achieve
|
833 |
+
strong generalization performance on our new dataset.
|
834 |
+
4.4. Fine-grained Analysis
|
835 |
+
We next conduct fine-grained analysis to better isolate
|
836 |
+
what makes our dataset challenging for modern algorithms.
|
837 |
+
To do so, we divide our VizWiz-SO test set according to the
|
838 |
+
following four factors, with the first three based on metadata
|
839 |
+
collected in Section 3.2 to characterize our dataset:
|
840 |
+
7
|
841 |
+
|
842 |
+
BASNet
|
843 |
+
F3Net
|
844 |
+
U2Net
|
845 |
+
VST
|
846 |
+
PFSNet
|
847 |
+
PGNet
|
848 |
+
DIS
|
849 |
+
VST-FT
|
850 |
+
VST-S
|
851 |
+
[38]
|
852 |
+
[43]
|
853 |
+
[1]
|
854 |
+
[30]
|
855 |
+
[33]
|
856 |
+
[44]
|
857 |
+
[37]
|
858 |
+
Text Presence
|
859 |
+
True
|
860 |
+
0.23
|
861 |
+
0.22
|
862 |
+
0.22
|
863 |
+
0.13
|
864 |
+
0.25
|
865 |
+
0.16
|
866 |
+
0.32
|
867 |
+
0.16
|
868 |
+
0.17
|
869 |
+
False
|
870 |
+
0.35
|
871 |
+
0.38
|
872 |
+
0.32
|
873 |
+
0.24
|
874 |
+
0.42
|
875 |
+
0.29
|
876 |
+
0.40
|
877 |
+
0.24
|
878 |
+
0.26
|
879 |
+
Coverage
|
880 |
+
Small
|
881 |
+
0.06
|
882 |
+
0.16
|
883 |
+
0.07
|
884 |
+
0.11
|
885 |
+
0.16
|
886 |
+
0.12
|
887 |
+
0.10
|
888 |
+
0.09
|
889 |
+
0.11
|
890 |
+
Medium
|
891 |
+
0.15
|
892 |
+
0.20
|
893 |
+
0.15
|
894 |
+
0.09
|
895 |
+
0.24
|
896 |
+
0.15
|
897 |
+
0.25
|
898 |
+
0.09
|
899 |
+
0.10
|
900 |
+
Large
|
901 |
+
0.60
|
902 |
+
0.47
|
903 |
+
0.54
|
904 |
+
0.30
|
905 |
+
0.54
|
906 |
+
0.35
|
907 |
+
0.70
|
908 |
+
0.38
|
909 |
+
0.39
|
910 |
+
Complexity
|
911 |
+
High
|
912 |
+
0.15
|
913 |
+
0.21
|
914 |
+
0.15
|
915 |
+
0.12
|
916 |
+
0.24
|
917 |
+
0.16
|
918 |
+
0.21
|
919 |
+
0.11
|
920 |
+
0.12
|
921 |
+
Low
|
922 |
+
0.38
|
923 |
+
0.34
|
924 |
+
0.35
|
925 |
+
0.21
|
926 |
+
0.38
|
927 |
+
0.25
|
928 |
+
0.48
|
929 |
+
0.26
|
930 |
+
0.27
|
931 |
+
Image Quality
|
932 |
+
Good
|
933 |
+
0.22
|
934 |
+
0.23
|
935 |
+
0.21
|
936 |
+
0.14
|
937 |
+
0.26
|
938 |
+
0.17
|
939 |
+
0.30
|
940 |
+
0.16
|
941 |
+
0.17
|
942 |
+
Poor
|
943 |
+
0.44
|
944 |
+
0.43
|
945 |
+
0.41
|
946 |
+
0.27
|
947 |
+
0.47
|
948 |
+
0.34
|
949 |
+
0.50
|
950 |
+
0.30
|
951 |
+
0.31
|
952 |
+
Table 3. Fine-grained analysis of existing algorithms with respect to presence of text on the salient object (“Text Presence”), relative size
|
953 |
+
of the salient object in the image (“Coverage”), relative complexity of the salient object’s boundary (“Complexity”), and image quality
|
954 |
+
(“Image quality”). As shown the algorithms perform worse when there is salient objects lack text, occupy a large portion of the image,
|
955 |
+
have less complex boundarys as well as when the image quality is poor.
|
956 |
+
• Text Presence:
|
957 |
+
two groups based on whether text is
|
958 |
+
present in the salient object.
|
959 |
+
• Coverage Ratio (Coverage): three groups based on the
|
960 |
+
33rd and 66th quartile values in our dataset. All images
|
961 |
+
with coverage ratio less than 0.32 has small coverage, be-
|
962 |
+
tween 0.32 and 0.62 has medium coverage, and greater
|
963 |
+
than 0.62 has large coverage.
|
964 |
+
• Boundary Complexity (Complexity): two groups by split-
|
965 |
+
ting them around the mean score for boundary complex-
|
966 |
+
ity (i.e., 0.66) with high boundary complexity when the
|
967 |
+
score is less than the mean and low boundary complexity
|
968 |
+
otherwise.
|
969 |
+
• Image Quality: leveraging metadata from prior work [25],
|
970 |
+
which indicates how many of the five crowdworkers in-
|
971 |
+
dicated an image as insufficient quality to recognize the
|
972 |
+
content, we split the images into groups with good qual-
|
973 |
+
ity being when none of the crowdworkers indicate insuf-
|
974 |
+
ficient quality and poor otherwise.
|
975 |
+
Due to space constraints, we only report results in the main
|
976 |
+
paper with respect to the Mean Absolute Error [35]. Results
|
977 |
+
for all benchmarked models are shown in Table 3.
|
978 |
+
In terms of text presence, we see that the models perform
|
979 |
+
better when there is text present as opposed to when there
|
980 |
+
is none. For example, the performance drops by 0.11 for
|
981 |
+
the best performing model, VST. We suspect visual patterns
|
982 |
+
that arise with text may serve as a valuable cue to models in
|
983 |
+
locating salient objects.
|
984 |
+
Next, we see that as the coverage ratio of the salient ob-
|
985 |
+
jects increase, the models tend to perform worse. For in-
|
986 |
+
stance, the best performing model, VST, has a performance
|
987 |
+
dropoff of 0.19 when predicting images with small cover-
|
988 |
+
age ratios as opposed to large coverage ratios. We see an
|
989 |
+
even greater performance dropoff from other models such
|
990 |
+
as 0.60 for DIS. We suspect this performance gap arises in
|
991 |
+
part from the fact that existing datasets largely lack such
|
992 |
+
large salient objects, which both could have affected what
|
993 |
+
algorithms were designed to handle as well what they could
|
994 |
+
learn from the data they observed.
|
995 |
+
Further observed trends are that performance drops for
|
996 |
+
salient objects with lower boundary complexity and for
|
997 |
+
poorer quality images. These are two additional factors that
|
998 |
+
reflect domain shifts between our dataset and prior datasets
|
999 |
+
that could have affected the design of algorithms as well
|
1000 |
+
what they could learn from the data training data.
|
1001 |
+
5. Conclusions
|
1002 |
+
We introduce the VizWiz-SalientObject dataset to en-
|
1003 |
+
courage the community to design more generalized salient
|
1004 |
+
object detection models that can handle a larger range of
|
1005 |
+
challenges motivated by our authentic use case that also can
|
1006 |
+
occur in many real-world applications. We offer our exper-
|
1007 |
+
imental findings from benchmarking modern salient object
|
1008 |
+
detection algorithms as a valuable starting point for iden-
|
1009 |
+
tifying valuable future research directions. To summarize,
|
1010 |
+
new models are needed to better handle salient objects that
|
1011 |
+
are large, have less complex boundaries, and lack text as
|
1012 |
+
well as work well in the presence of lower quality images.
|
1013 |
+
We now close with a discussion of some ethical impli-
|
1014 |
+
cations of our work. While we are motivated to better as-
|
1015 |
+
sist a population that is traditionally marginalized in society,
|
1016 |
+
we acknowledge our work can lead to potentially adverse
|
1017 |
+
social effects. Our concern is primarily centered on bad-
|
1018 |
+
actor behaviors intended to exploit the privacy, autonomy,
|
1019 |
+
and livelihoods of a population demographic inherently sus-
|
1020 |
+
ceptible to such behavior. Bad actors could use our work to
|
1021 |
+
deceive visually impaired individuals in harmful ways, such
|
1022 |
+
as through fraud, scams, and other deceptive practices, by
|
1023 |
+
for example intercepting their visual media and replacing
|
1024 |
+
automatically detected salient objects with misinformation.
|
1025 |
+
8
|
1026 |
+
|
1027 |
+
Acknowledgments. This project was supported in part by
|
1028 |
+
a National Science Foundation SaTC award (#2148080)
|
1029 |
+
and Amazon Mechanical Turk. We thank Leah Findlater
|
1030 |
+
and Yang Wang for contributing to this research idea.
|
1031 |
+
References
|
1032 |
+
[1] U2-net: Going deeper with nested u-structure for salient ob-
|
1033 |
+
ject detection. Pattern Recognition, 106:107404, 2020. 2, 4,
|
1034 |
+
6, 7, 8
|
1035 |
+
[2] Radhakrishna Achanta, Sheila Hemami, Francisco Estrada,
|
1036 |
+
and Sabine S¨usstrunk. Frequency-tuned salient region de-
|
1037 |
+
tection. In CVPR, number CONF, pages 1597–1604, 2009.
|
1038 |
+
13
|
1039 |
+
[3] AIpoly.
|
1040 |
+
Aipoly homepage.
|
1041 |
+
https://www.aipoly.
|
1042 |
+
com/, 2020. (Accessed on 01/08/2020). 2
|
1043 |
+
[4] Aira. Aira homepage. https://aira.io/, 2020. (Ac-
|
1044 |
+
cessed on 01/08/2020). 2, 4
|
1045 |
+
[5] BeSpecular. BeSpecularPressKit. 2
|
1046 |
+
[6] BeSpecular. Bespecular. https://www.bespecular.
|
1047 |
+
com/, 2020. (Accessed on 01/08/2020). 2
|
1048 |
+
[7] Ali Borji, Ming-Ming Cheng, Qibin Hou, Huaizu Jiang, and
|
1049 |
+
Jia Li. Salient object detection: A survey. Computational
|
1050 |
+
visual media, pages 1–34, 2019. 1, 2
|
1051 |
+
[8] Ali Borji, Ming-Ming Cheng, Huaizu Jiang, and Jia Li.
|
1052 |
+
Salient object detection: A benchmark. IEEE transactions
|
1053 |
+
on image processing, 24(12):5706–5722, 2015. 1, 2
|
1054 |
+
[9] Erin Brady and Jeffrey P Bigham. Crowdsourcing accessi-
|
1055 |
+
bility: Human-powered access technologies. 2015. 2
|
1056 |
+
[10] Erin L Brady, Yu Zhong, Meredith Ringel Morris, and Jef-
|
1057 |
+
frey P Bigham. Investigating the appropriateness of social
|
1058 |
+
network question asking as a resource for blind users. In
|
1059 |
+
Proceedings of the 2013 conference on Computer supported
|
1060 |
+
cooperative work, pages 1225–1236. ACM, 2013. 2
|
1061 |
+
[11] Chongyan Chen, Samreen Anjum, and Danna Gurari.
|
1062 |
+
Grounding answers for visual questions asked by visually
|
1063 |
+
impaired people.
|
1064 |
+
arXiv preprint arXiv:2202.01993, 2022.
|
1065 |
+
2, 3
|
1066 |
+
[12] Ming-Ming Cheng, Niloy J Mitra, Xiaolei Huang, Philip HS
|
1067 |
+
Torr, and Shi-Min Hu. Global contrast based salient region
|
1068 |
+
detection. IEEE transactions on pattern analysis and ma-
|
1069 |
+
chine intelligence, 37(3):569–582, 2014. 1
|
1070 |
+
[13] Tai-Yin Chiu, Yinan Zhao, and Danna Gurari. Assessing im-
|
1071 |
+
age quality issues for real-world problems. In Proceedings of
|
1072 |
+
the IEEE/CVF Conference on Computer Vision and Pattern
|
1073 |
+
Recognition, pages 3646–3656, 2020. 1
|
1074 |
+
[14] Ned Desmond. Microsoft’s Seeing AI founder Saqib Shaikh
|
1075 |
+
is speaking at Sight Tech Global. 2
|
1076 |
+
[15] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov,
|
1077 |
+
Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner,
|
1078 |
+
Mostafa Dehghani, Matthias Minderer, Georg Heigold, Syl-
|
1079 |
+
vain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is
|
1080 |
+
worth 16x16 words: Transformers for image recognition at
|
1081 |
+
scale, 2020. 7
|
1082 |
+
[16] Mark Everingham, Luc Gool, Christopher K. Williams,
|
1083 |
+
John Winn, and Andrew Zisserman.
|
1084 |
+
The pascal visual
|
1085 |
+
object classes (voc) challenge.
|
1086 |
+
Int. J. Comput. Vision,
|
1087 |
+
88(2):303–338, jun 2010. 4
|
1088 |
+
[17] Be My Eyes. Be My Eyes: Our story. 2
|
1089 |
+
[18] Be My Eyes. Bringing sight to blind and low-vision people.
|
1090 |
+
https://www.bemyeyes.com/, 2020. (Accessed on
|
1091 |
+
01/08/2020). 2, 4
|
1092 |
+
[19] Deng-Ping Fan, Ming-Ming Cheng, Yun Liu, Tao Li, and Ali
|
1093 |
+
Borji. Structure-measure: A new way to evaluate foreground
|
1094 |
+
maps. In ICCV, pages 4548–4557, 2017. 13
|
1095 |
+
[20] Deng-Ping Fan, Cheng Gong, Yang Cao, Bo Ren, Ming-
|
1096 |
+
Ming Cheng, and Ali Borji. Enhanced-alignment measure
|
1097 |
+
for binary foreground map evaluation. In IJCAI, pages 698–
|
1098 |
+
704, 2018. 13
|
1099 |
+
[21] American Federation for the Blind. Low vision optical de-
|
1100 |
+
vices. 1
|
1101 |
+
[22] Ashish Kumar Gupta, Ayan Seal, Mukesh Prasad, and Pri-
|
1102 |
+
tee Khanna. Salient object detection techniques in computer
|
1103 |
+
vision—a survey. Entropy, 22(10), 2020. 1, 2
|
1104 |
+
[23] Danna Gurari, Kun He, Bo Xiong, Jianming Zhang,
|
1105 |
+
Mehrnoosh Sameki, Suyog Dutt Jain, Stan Sclaroff, Margrit
|
1106 |
+
Betke, and Kristen Grauman. Predicting foreground object
|
1107 |
+
ambiguity and efficiently crowdsourcing the segmentation
|
1108 |
+
(s). International Journal of Computer Vision, 126(7):714–
|
1109 |
+
730, 2018. 2
|
1110 |
+
[24] Danna Gurari, Qing Li, Chi Lin, Yinan Zhao, Anhong Guo,
|
1111 |
+
Abigale Stangl, and Jeffrey P Bigham.
|
1112 |
+
Vizwiz-priv: A
|
1113 |
+
dataset for recognizing the presence and purpose of private
|
1114 |
+
visual information in images taken by blind people. In Pro-
|
1115 |
+
ceedings of the IEEE/CVF Conference on Computer Vision
|
1116 |
+
and Pattern Recognition, pages 939–948, 2019. 1, 2
|
1117 |
+
[25] Danna Gurari, Yinan Zhao, Meng Zhang, and Nilavra Bhat-
|
1118 |
+
tacharya. Captioning images taken by people who are blind.
|
1119 |
+
In European Conference on Computer Vision, pages 417–
|
1120 |
+
434. Springer, 2020. 1, 2, 6, 8
|
1121 |
+
[26] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
|
1122 |
+
Deep residual learning for image recognition, 2015. 7
|
1123 |
+
[27] iDentifi. identifi. http://getidentifi.com/, 2020.
|
1124 |
+
(Accessed on 01/08/2020). 2
|
1125 |
+
[28] Hernisa Kacorri, Kris M Kitani, Jeffrey P Bigham, and
|
1126 |
+
Chieko Asakawa.
|
1127 |
+
People with visual impairment training
|
1128 |
+
personal object recognizers: Feasibility and challenges. In
|
1129 |
+
Proceedings of the 2017 CHI Conference on Human Factors
|
1130 |
+
in Computing Systems, pages 5839–5849, 2017. 2
|
1131 |
+
[29] Yin Li, Xiaodi Hou, Christof Koch, James M. Rehg, and
|
1132 |
+
Alan L. Yuille. The secrets of salient object segmentation.
|
1133 |
+
In 2014 IEEE Conference on Computer Vision and Pattern
|
1134 |
+
Recognition, pages 280–287, 2014. 4, 5, 6, 13
|
1135 |
+
[30] Nian Liu, Ni Zhang, Kaiyuan Wan, Ling Shao, and Junwei
|
1136 |
+
Han. Visual saliency transformer, 2021. 2, 4, 6, 7, 8, 14
|
1137 |
+
[31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei,
|
1138 |
+
Zheng Zhang, Stephen Lin, and Baining Guo. Swin trans-
|
1139 |
+
former: Hierarchical vision transformer using shifted win-
|
1140 |
+
dows, 2021. 7
|
1141 |
+
[32] LookTel. Looktel recognizer. 1, 2
|
1142 |
+
[33] Mingcan Ma, Changqun Xia, and Jia Li.
|
1143 |
+
Pyramidal fea-
|
1144 |
+
ture shrinking for salient object detection. Proceedings of
|
1145 |
+
the AAAI Conference on Artificial Intelligence, 35(3):2311–
|
1146 |
+
2318, May 2021. 2, 4, 6, 7, 8, 14
|
1147 |
+
[34] MDN. Fill-rule - svg: Scalable vector graphics: Mdn. 10
|
1148 |
+
9
|
1149 |
+
|
1150 |
+
[35] Federico Perazzi, Philipp Kr¨ahenb¨uhl, Yael Pritch, and
|
1151 |
+
Alexander Hornung. Saliency filters: Contrast based filtering
|
1152 |
+
for salient region detection. In CVPR, pages 733–740, 2012.
|
1153 |
+
8, 12
|
1154 |
+
[36] Federico Perazzi, Jordi Pont-Tuset, Brian McWilliams,
|
1155 |
+
Luc Van Gool, Markus Gross, and Alexander Sorkine-
|
1156 |
+
Hornung. A benchmark dataset and evaluation methodology
|
1157 |
+
for video object segmentation. In The IEEE Conference on
|
1158 |
+
Computer Vision and Pattern Recognition (CVPR), 2016. 4
|
1159 |
+
[37] Xuebin Qin, Hang Dai, Xiaobin Hu, Deng-Ping Fan, Ling
|
1160 |
+
Shao, and Luc Van Gool. Highly accurate dichotomous im-
|
1161 |
+
age segmentation, 2022. 7, 8, 14
|
1162 |
+
[38] Xuebin Qin, Deng-Ping Fan, Chenyang Huang, Cyril Di-
|
1163 |
+
agne, Zichen Zhang, Adri`a Cabeza Sant’Anna, Albert
|
1164 |
+
Su`arez, Martin Jagersand, and Ling Shao. Boundary-aware
|
1165 |
+
segmentation network for mobile and web applications,
|
1166 |
+
2021. 4, 6, 7, 8
|
1167 |
+
[39] Abigale J Stangl, Esha Kothari, Suyog D Jain, Tom Yeh,
|
1168 |
+
Kristen Grauman, and Danna Gurari. Browsewithme: An
|
1169 |
+
online clothes shopping assistant for people with visual im-
|
1170 |
+
pairments. In Proceedings of the 20th International ACM
|
1171 |
+
SIGACCESS Conference on Computers and Accessibility,
|
1172 |
+
pages 107–118, 2018. 1
|
1173 |
+
[40] TapTapSee. Taptapsee. 1, 2
|
1174 |
+
[41] Wang, Lijun, Lu, Huchuan, Wang, Yifan, Feng, Mengyang,
|
1175 |
+
Wang, Dong, Yin, Baocai, Ruan, and Xiang. Learning to
|
1176 |
+
detect salient objects with image-level supervision. In CVPR,
|
1177 |
+
2017. 4, 5, 7, 13
|
1178 |
+
[42] Wenguan Wang, Shuyang Zhao, Jianbing Shen, Steven C. H.
|
1179 |
+
Hoi, and Ali Borji. Salient object detection with pyramid at-
|
1180 |
+
tention and salient edges. In Proceedings of the IEEE/CVF
|
1181 |
+
Conference on Computer Vision and Pattern Recognition
|
1182 |
+
(CVPR), June 2019. 1, 2
|
1183 |
+
[43] Jun Wei, Shuhui Wang, and Qingming Huang. F3net: Fu-
|
1184 |
+
sion, feedback and focus for salient object detection, 2019.
|
1185 |
+
2, 4, 6, 7, 8, 14
|
1186 |
+
[44] Chenxi Xie, Changqun Xia, Mingcan Ma, Zhirui Zhao, Xi-
|
1187 |
+
aowu Chen, and Jia Li. Pyramid grafting network for one-
|
1188 |
+
stage high resolution saliency detection, 2022. 2, 4, 5, 6, 7,
|
1189 |
+
8, 13
|
1190 |
+
[45] Qiong Yan, Li Xu, Jianping Shi, and Jiaya Jia. Hierarchical
|
1191 |
+
saliency detection. In 2013 IEEE Conference on Computer
|
1192 |
+
Vision and Pattern Recognition, pages 1155–1162, 2013. 4,
|
1193 |
+
5, 13
|
1194 |
+
[46] Yang, Chuan, Zhang, Lihe, Lu, Huchuan, Ruan, Xiang,
|
1195 |
+
Yang, and Ming-Hsuan. Saliency detection via graph-based
|
1196 |
+
manifold ranking. In Computer Vision and Pattern Recogni-
|
1197 |
+
tion (CVPR), 2013 IEEE Conference on, pages 3166–3173.
|
1198 |
+
IEEE, 2013. 4, 5, 7, 13
|
1199 |
+
[47] Xiaoyu Zeng, Yanan Wang, Tai-Yin Chiu, Nilavra Bhat-
|
1200 |
+
tacharya, and Danna Gurari.
|
1201 |
+
Vision skills needed to an-
|
1202 |
+
swer visual questions. Proceedings of the ACM on Human-
|
1203 |
+
Computer Interaction, 4(CSCW2):1–31, 2020. 2
|
1204 |
+
[48] Yi Zeng, Pingping Zhang, Jianming Zhang, Zhe Lin, and
|
1205 |
+
Huchuan Lu. Towards high-resolution salient object detec-
|
1206 |
+
tion. In Proceedings of the IEEE/CVF International Confer-
|
1207 |
+
ence on Computer Vision, pages 7234–7243, 2019. 4, 5, 7,
|
1208 |
+
13
|
1209 |
+
Appendix
|
1210 |
+
This document supplements the main paper with additional
|
1211 |
+
information concerning:
|
1212 |
+
1. Dataset Creation (supplements Section 3.1)
|
1213 |
+
• Annotation Task Interface
|
1214 |
+
• Worker Qualification Task
|
1215 |
+
• Analysis of Workers’ Annotation Differences
|
1216 |
+
2. Dataset Analysis: VizWiz-SO vs Existing Datasets
|
1217 |
+
(supplements Section 3.2.2)
|
1218 |
+
3. Experimental Design (supplements Section 4.1)
|
1219 |
+
A. Dataset Creation
|
1220 |
+
A.1. Annotation Task Interface
|
1221 |
+
The task interface displays five images within a tabbed
|
1222 |
+
container on the left and preliminary questions with task
|
1223 |
+
instructions on the right. A screenshot of the task interface
|
1224 |
+
(without instructions) is shown in Figure 4.
|
1225 |
+
To account for occlusions and holes while keeping the
|
1226 |
+
task simple for annotators, we permitted annotators to gen-
|
1227 |
+
erate multiple polygons. For occlusions, annotators could
|
1228 |
+
use as many polygons as necessary for demarcating fore-
|
1229 |
+
ground objects partitioned into multiple polygons.
|
1230 |
+
For
|
1231 |
+
holes, we apply an even-odd fill rule to images featuring
|
1232 |
+
foreground objects with holes. With an even-odd fill rule,
|
1233 |
+
every area inside an even number of enclosed areas be-
|
1234 |
+
comes hollow, and every region inside an odd number of
|
1235 |
+
enclosed areas becomes filled [34]. By treating the image’s
|
1236 |
+
four corners as the first enclosed area, the outermost bound-
|
1237 |
+
ary of the foreground object becomes the second enclosed
|
1238 |
+
area. Moreover, holes within foreground objects represent
|
1239 |
+
the third layer of enclosed areas and become filled, allowing
|
1240 |
+
annotators to demarcate foreground objects featuring holes.
|
1241 |
+
In practice, annotators first trace the outermost boundary of
|
1242 |
+
the foreground object and close the path by clicking the first
|
1243 |
+
point a second time. We then instructed annotators to trace
|
1244 |
+
any holes within the foreground object, and so those holes
|
1245 |
+
end up in odd-numbered layers.
|
1246 |
+
A.2. Worker Qualification Task
|
1247 |
+
We administered a qualification task for workers to sup-
|
1248 |
+
port our collection of high-quality ground truth annotations.
|
1249 |
+
The qualification task required annotating five images, each
|
1250 |
+
of which features a distinct challenging annotation scenario.
|
1251 |
+
All five images are shown in Figure 5. The first two images
|
1252 |
+
show a table and a bench, offering examples with complex
|
1253 |
+
boundaries and holes. The next two images feature a per-
|
1254 |
+
son holding a coffee mug, to support educating a crowd-
|
1255 |
+
worker about our expectations for annotating objects with
|
1256 |
+
10
|
1257 |
+
|
1258 |
+
Figure 4. A screenshot of our annotation task interface.
|
1259 |
+
Figure 5. The five images used for the worker qualification task.
|
1260 |
+
Each was selected to demonstrate a challenging annotation sce-
|
1261 |
+
nario such as complex boundaries, holes, and occlusions.
|
1262 |
+
complex geometries that have many curves and occlusions
|
1263 |
+
that require annotating multiple polygons. The final image
|
1264 |
+
is a spatula. This task verified a crowdworker’s ability to
|
1265 |
+
correctly identify and annotate multiple holes that can arise
|
1266 |
+
within the salient object.
|
1267 |
+
After crowdworkers annotated each qualification image,
|
1268 |
+
the backend code of our website checked if their annotation
|
1269 |
+
was sufficiently similar to the GT annotation (i.e., IoU sim-
|
1270 |
+
ilarity of at least 0.90). Crowdworkers could only proceed
|
1271 |
+
to the following image after they obtained an IoU ≥ 0.90
|
1272 |
+
on the current image. Crowdworkers obtaining an IoU ≥
|
1273 |
+
0.90 on all five qualification assessment images on a per-
|
1274 |
+
image basis gave us substantial confidence that they would
|
1275 |
+
be able to successfully handle complex and challenging out-
|
1276 |
+
lier cases within the original VizWiz Dataset.7
|
1277 |
+
A.3. Analysis of Workers’ Annotation Differences
|
1278 |
+
We collected a larger number of redundant annotations
|
1279 |
+
per image for a random subset of images to better explore
|
1280 |
+
when and why annotation differences are observed from dif-
|
1281 |
+
ferent workers. Specifically, for this analysis, we collected
|
1282 |
+
four annotations as opposed to two for a subset of 1,237 im-
|
1283 |
+
ages. Examples of the redundant annotations collected per
|
1284 |
+
image are shown in Figure 6.
|
1285 |
+
The first example (i.e., row 1 of Figure 6) highlights that
|
1286 |
+
annotation differences can stem from challenging annota-
|
1287 |
+
tion scenarios where objects contain holes (e.g., in mug han-
|
1288 |
+
dle) or are occluded (e.g., by the straw). For instance, the
|
1289 |
+
hole was not annotated in the third annotation. Addition-
|
1290 |
+
ally, only the fourth annotation captured the occlusion that
|
1291 |
+
arises from the straw.
|
1292 |
+
The second example (i.e., row 2 of Figure 6) highlights
|
1293 |
+
that annotation differences can stem from ambiguity regard-
|
1294 |
+
7Some crowdworkers did not pass the qualification assessment due to
|
1295 |
+
time constraints. In these cases, crowdworkers would contact us with the
|
1296 |
+
images they annotated. If we were confident in their annotation abilities,
|
1297 |
+
we manually added these crowdworkers to the qualified worker pool.
|
1298 |
+
11
|
1299 |
+
|
1300 |
+
Image 1
|
1301 |
+
Image 2
|
1302 |
+
Image 3
|
1303 |
+
Image 4
|
1304 |
+
Image 5
|
1305 |
+
Work may be rejected for not following instructions
|
1306 |
+
Step 1: Is the image showing a screenshot?
|
1307 |
+
O Yes
|
1308 |
+
O No
|
1309 |
+
WASYOURTRIP?
|
1310 |
+
Step 2: Is there a single prominent foreground object?
|
1311 |
+
OYes
|
1312 |
+
ONO
|
1313 |
+
ANTTOHEARFRO
|
1314 |
+
OUR ON-LINE SURVEYTONACHANCETO WIN SU
|
1315 |
+
Step 3: Demarcate the prominent foreground object
|
1316 |
+
osehcwyou wantto takethesunvey
|
1317 |
+
typethatweb address inyourbrowse
|
1318 |
+
puter
|
1319 |
+
one:
|
1320 |
+
gli.ols.sgizmo.com/s3/
|
1321 |
+
Prev Image
|
1322 |
+
Next Image
|
1323 |
+
Pad
|
1324 |
+
gli.olsiphone.sgizmo.com/s3
|
1325 |
+
gli.olsipad.sgizmo.com/s3/
|
1326 |
+
Select Full
|
1327 |
+
Undo Last
|
1328 |
+
Clear All
|
1329 |
+
Image
|
1330 |
+
Point
|
1331 |
+
PolygonsFigure 6. Example annotations from our random subset where we
|
1332 |
+
collected four annotations as opposed to two. We find worker dif-
|
1333 |
+
ferences primarily occur in challenging annotation scenarios such
|
1334 |
+
as holes, occlusions, complex boundaries, and object saliency.
|
1335 |
+
ing what is the salient object. As shown, the first two an-
|
1336 |
+
notations flag the image as lacking a foreground object,
|
1337 |
+
the third annotation identifies the child holding the cup as
|
1338 |
+
the salient object, and the fourth annotation identified the
|
1339 |
+
child’s cup as the salient object.
|
1340 |
+
The third example (i.e., in row 3 of Figure 6) highlights
|
1341 |
+
that annotation differences also can arise for objects that
|
1342 |
+
simultaneously have complex boundaries and holes. In an-
|
1343 |
+
notation one, the worker did not fully annotate the salient
|
1344 |
+
object, cutting out part of the object from the annotation.
|
1345 |
+
Only the third and fourth annotations accurately annotate
|
1346 |
+
all holes that are present in the salient object’s boundary
|
1347 |
+
while also having tight boundaries in the annotation.
|
1348 |
+
In summary, we found occlusions, holes, and saliency
|
1349 |
+
ambiguity to be the primary factors contributing to annota-
|
1350 |
+
tion differences. In the case of occlusions, worker differ-
|
1351 |
+
ences can arise when deciding whether to include objects
|
1352 |
+
that are a composite part of the salient object. In the case
|
1353 |
+
of holes, annotation differences can arise regarding which
|
1354 |
+
holes to annotate. Last, we found that it can be ambiguous
|
1355 |
+
as to which object is the most salient.
|
1356 |
+
Figure 7. Example ground truth annotations from the HRSOD
|
1357 |
+
dataset which exemplify that salient objects are not usually not
|
1358 |
+
centered in the image. This is a common trend in the dataset.
|
1359 |
+
B. Dataset Analysis
|
1360 |
+
B.1. VizWiz-SO vs Existing Datasets
|
1361 |
+
We present finer-grained details about typical image res-
|
1362 |
+
olutions for the different salient object detection datasets
|
1363 |
+
to expand upon discussions in the main paper about how
|
1364 |
+
VizWiz-SO relates to other datasets. Specifically, we report
|
1365 |
+
the median image width (Med. W), median image height
|
1366 |
+
(Med. H), and whether the dataset supports high resolu-
|
1367 |
+
tion images (High Res.) as defined by whether the median
|
1368 |
+
image height and width are greater than 1080 and 1920 re-
|
1369 |
+
spectively. Results are reported in Table 4. We observe that
|
1370 |
+
our new dataset, overall, provides higher resolution images
|
1371 |
+
than most datasets.
|
1372 |
+
We also expand on a surprising finding reported in our
|
1373 |
+
main paper that the HRSOD dataset is the only one for
|
1374 |
+
which salient objects do not occupy the typical center po-
|
1375 |
+
sitions. To do so, we visualize the ground truth masks of
|
1376 |
+
some non-centered objects in Figure 7. In row one, we see
|
1377 |
+
that objects are horizontally distributed to left and right po-
|
1378 |
+
sitions of the images. Similarly, we observe in row two that
|
1379 |
+
the salient objects are vertically distributed to the top and
|
1380 |
+
bottom positions of the images.
|
1381 |
+
C. Algorithmic Benchmarking
|
1382 |
+
C.1. Experimental Design
|
1383 |
+
We compute the five metrics used in the benchmarking
|
1384 |
+
section using the following definitions:
|
1385 |
+
Mean Absolute Error [35] represents the average abso-
|
1386 |
+
lute difference between the predicted saliency map and its
|
1387 |
+
12
|
1388 |
+
|
1389 |
+
Annotation 1
|
1390 |
+
Annotation 2
|
1391 |
+
Annotation 3
|
1392 |
+
Annotation 4
|
1393 |
+
WRKDAVIS-S [48]
|
1394 |
+
PASCAL-S [29]
|
1395 |
+
HR [48]
|
1396 |
+
ECSSD [45]
|
1397 |
+
DUT-O [46]
|
1398 |
+
UH [44]
|
1399 |
+
DUTS [41]
|
1400 |
+
Ours
|
1401 |
+
Med. W
|
1402 |
+
1080
|
1403 |
+
375
|
1404 |
+
2704
|
1405 |
+
300
|
1406 |
+
300
|
1407 |
+
3612
|
1408 |
+
300
|
1409 |
+
1296
|
1410 |
+
Med. H
|
1411 |
+
1920
|
1412 |
+
500
|
1413 |
+
3264
|
1414 |
+
400
|
1415 |
+
400
|
1416 |
+
5000
|
1417 |
+
400
|
1418 |
+
968
|
1419 |
+
High Res.
|
1420 |
+
|
1421 |
+
|
1422 |
+
|
1423 |
+
|
1424 |
+
|
1425 |
+
|
1426 |
+
|
1427 |
+
|
1428 |
+
Table 4. Characterization of our VizWiz-SO dataset and seven existing salient object detection datasets with respect to metrics showcasing
|
1429 |
+
the image resolution. This includes median image width (“Med. W”), median image height (“Med. H”), and flag indicating if high
|
1430 |
+
resolution (“High Res.”). (HR=HRSOD; UH=UHRSD)
|
1431 |
+
ground truth per pixel. It can be given as:
|
1432 |
+
MAE =
|
1433 |
+
1
|
1434 |
+
H ∗ W
|
1435 |
+
H
|
1436 |
+
�
|
1437 |
+
r=1
|
1438 |
+
W
|
1439 |
+
�
|
1440 |
+
c=1
|
1441 |
+
|pred(r, c) − gt(r, c)|
|
1442 |
+
(1)
|
1443 |
+
where pred represents the predicted saliency map, gt repre-
|
1444 |
+
sents the ground truth, (H, W) represents the height and
|
1445 |
+
width of the image, and (r, c) represents the pixel co-
|
1446 |
+
ordinates for the given image.
|
1447 |
+
Structure Measure [19] is used to measure the similarity
|
1448 |
+
between the predicted saliency map and the ground truth.
|
1449 |
+
Since, we convert both the predictions and ground truths
|
1450 |
+
into the [0, 1] range, we apply the formula directly to the
|
1451 |
+
predictions and maps. It can defined as follows:
|
1452 |
+
Sm = (1 − α)Sr + αSo
|
1453 |
+
(2)
|
1454 |
+
where, Sr is defined as the region aware similarity score, So
|
1455 |
+
is defined as the object aware similarity score, and α repre-
|
1456 |
+
sents the weight that is used to sum up the values. We set
|
1457 |
+
α = 0.5, therefore making sure that we see equal contribu-
|
1458 |
+
tion from both region and object aware scores.
|
1459 |
+
F-Measure [2] represents the precision and recall ratio
|
1460 |
+
for the given prediction. It can be represented as:
|
1461 |
+
Fm = (1 + β2) ∗ Precision ∗ Recall
|
1462 |
+
β2 ∗ Precision + Recall
|
1463 |
+
(3)
|
1464 |
+
Here precision =
|
1465 |
+
T P
|
1466 |
+
T P +F P and recall =
|
1467 |
+
T P
|
1468 |
+
T P +F N on the
|
1469 |
+
entire prediction image by pixels. We set β2 = 0.3 and re-
|
1470 |
+
port the average of all F-measures as Fm similar to previous
|
1471 |
+
works.
|
1472 |
+
Enhanced Alignment Measure [20] is used as the met-
|
1473 |
+
ric to measure the effectiveness of the saliency prediction
|
1474 |
+
against the ground truth. It captures the pixel-level match-
|
1475 |
+
ing information and image-level statistics into one single
|
1476 |
+
metric by the means of an enhanced alignment matrix φ. It
|
1477 |
+
is defined as follows:
|
1478 |
+
Em =
|
1479 |
+
1
|
1480 |
+
H ∗ W
|
1481 |
+
H
|
1482 |
+
�
|
1483 |
+
r=1
|
1484 |
+
W
|
1485 |
+
�
|
1486 |
+
c=1
|
1487 |
+
φF M(r, c)
|
1488 |
+
(4)
|
1489 |
+
where, φF M represents the enhanced alignment matrix for
|
1490 |
+
the foreground map, (H, W) represents the height and
|
1491 |
+
width of the image, and (r, c) represents the pixel co-
|
1492 |
+
ordinates for the given image.
|
1493 |
+
Intersection over Union also known as Jaccard Index is
|
1494 |
+
used to determine the similarity between sample sets. In
|
1495 |
+
this case it captures the overlap between the ground truth
|
1496 |
+
and prediction map of the salient object. We convert the
|
1497 |
+
predictions in binary map and compute the Jaccard Index
|
1498 |
+
over two classes. It can be defined as follows:
|
1499 |
+
IoU = J(A, B) = |A ∩ B|
|
1500 |
+
|A ∪ B|
|
1501 |
+
(5)
|
1502 |
+
where, A and B are images of same size, consisting of inte-
|
1503 |
+
ger class values {0, 1}.
|
1504 |
+
We further show how the models performed on VizWiz-
|
1505 |
+
SO with qualitative examples shown in Figure 8. These ex-
|
1506 |
+
amples feature a variety of challenges we observed for the
|
1507 |
+
models, such as a large salient object, less complex bound-
|
1508 |
+
aries, lack of text on the salient object, and lower qual-
|
1509 |
+
ity images. For example, we observe how the models fail
|
1510 |
+
to perform adequately in identifying larger salient objects
|
1511 |
+
(rows 4 and 5). We also observe the models perform bet-
|
1512 |
+
ter when salient objects contain text (rows 1 and 2) versus
|
1513 |
+
lack text (rows 5 and 6). Further, we see models perform
|
1514 |
+
worse for images that are lower quality (rows 3, 4, and 5).
|
1515 |
+
Our fine-grained analysis in the main paper suggests each
|
1516 |
+
of these factors offer unique challenges for modern salient
|
1517 |
+
object detection models.
|
1518 |
+
13
|
1519 |
+
|
1520 |
+
Figure 8. Examples of difficult images present in VizWiz-SO, with characteristics such as high coverage ratio, presence of text, less
|
1521 |
+
complex boundaries, and lower image quality. We show how the seven models perform on these cases as compared to the human annotation
|
1522 |
+
(GT=Ground Truth). We see that models such as PFSNet [33], DIS [37], and F3Net [43] do not always give us the correct salient objects
|
1523 |
+
or sometime no predictions at all. We also notice that VST [30] usually predicts salient objects with better accuracy compared to other
|
1524 |
+
models, but also suffer from not detecting the correct salient object.
|
1525 |
+
14
|
1526 |
+
|
1527 |
+
Image
|
1528 |
+
GT
|
1529 |
+
BASNet
|
1530 |
+
F3Net
|
1531 |
+
U2Net
|
1532 |
+
VST
|
1533 |
+
PFSNet
|
1534 |
+
PGNet
|
1535 |
+
DIS
|
B9E4T4oBgHgl3EQf5Q72/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
EdFRT4oBgHgl3EQfBDfd/content/tmp_files/2301.13464v1.pdf.txt
ADDED
@@ -0,0 +1,3033 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TRAINING WITH MIXED-PRECISION FLOATING-POINT ASSIGNMENTS
|
2 |
+
Wonyeol Lee 1 Rahul Sharma 2 Alex Aiken 1
|
3 |
+
ABSTRACT
|
4 |
+
When training deep neural networks, keeping all tensors in high precision (e.g., 32-bit or even 16-bit floats)
|
5 |
+
is often wasteful. However, keeping all tensors in low precision (e.g., 8-bit floats) can lead to unacceptable
|
6 |
+
accuracy loss. Hence, it is important to use a precision assignment—a mapping from all tensors (arising in
|
7 |
+
training) to precision levels (high or low)—that keeps most of the tensors in low precision and leads to sufficiently
|
8 |
+
accurate models. We provide a technique that explores this memory-accuracy tradeoff by generating precision
|
9 |
+
assignments that (i) use less memory and (ii) lead to more accurate models at the same time, compared to the
|
10 |
+
precision assignments considered by prior work in low-precision floating-point training. Our method typically
|
11 |
+
provides > 2× memory reduction over a baseline precision assignment while preserving training accuracy, and
|
12 |
+
gives further reductions by trading off accuracy. Compared to other baselines which sometimes cause training
|
13 |
+
to diverge, our method provides similar or better memory reduction while avoiding divergence.
|
14 |
+
1
|
15 |
+
INTRODUCTION
|
16 |
+
In deep neural network training, floating-point formats are
|
17 |
+
usually used to represent tensors and it is worthwhile to use
|
18 |
+
the smallest bitwidth format that gives acceptable results.
|
19 |
+
For example, it is common to replace tensors using 32-bit
|
20 |
+
floats with tensors that use 16-bit floats (Micikevicius et al.,
|
21 |
+
2018; Kalamkar et al., 2019). The benefits are easy to under-
|
22 |
+
stand: computations using lower-precision floats not only
|
23 |
+
use less memory but are also faster (due to improved vec-
|
24 |
+
tor parallelism, locality, and reduced data movement). The
|
25 |
+
downside is that there is generally some loss of training accu-
|
26 |
+
racy, and in the worst case training may not even converge.
|
27 |
+
For such low-precision floating-point training, the most
|
28 |
+
common approaches use two floating-point formats—one
|
29 |
+
for lower-precision floats (e.g., 8-bit floats) and the other
|
30 |
+
for higher-precision floats (e.g., 16-bit floats)—and assign
|
31 |
+
one of the two formats to each tensor (including weights,
|
32 |
+
activations, and their gradients). The precision assignments
|
33 |
+
studied in previous work fall into one of two assignment
|
34 |
+
schemes (which both have several variants): the uniform
|
35 |
+
assignment uses low precision for almost all tensors
|
36 |
+
(often excepting those in the first and/or last few layers)
|
37 |
+
(Micikevicius et al., 2018), while the operator-based
|
38 |
+
assignment limits low precision to the input tensors of
|
39 |
+
certain operators (e.g., convolutions) (Sun et al., 2019).
|
40 |
+
Prior work has shown that both precision assignment
|
41 |
+
schemes (with well-chosen low-bitwidth floating-point
|
42 |
+
formats) can match the accuracy of 32-bit-float training
|
43 |
+
1Stanford University, USA
|
44 |
+
2Microsoft Research, India.
|
45 |
+
Correspondence to: Wonyeol Lee <[email protected]>.
|
46 |
+
Preprint. Under review.
|
47 |
+
(Micikevicius et al., 2018; Kalamkar et al., 2019; Wang
|
48 |
+
et al., 2018; Sun et al., 2019; Chmiel et al., 2021; Drumond
|
49 |
+
et al., 2018; Cambier et al., 2020; Fox et al., 2021).
|
50 |
+
There is an important limitation in all prior approaches
|
51 |
+
to low-precision floating-point training: they use very
|
52 |
+
few precision assignments (most often just one) for a
|
53 |
+
given set of models, but there are some other models and
|
54 |
+
inputs where the chosen precision assignment (i) results
|
55 |
+
in noticeably worse accuracy than 32-bit-float training,
|
56 |
+
(ii) causes training to even diverge, or (iii) admits a more
|
57 |
+
efficient assignment that achieves similar training accuracy
|
58 |
+
(see Figures 1, 3, and 4).
|
59 |
+
In this paper, we present a new, automated method for choos-
|
60 |
+
ing precision assignments that removes the limitations de-
|
61 |
+
scribed above. To do so, we formally introduce the memory-
|
62 |
+
accuracy tradeoff problem: given a dataset, a model, and
|
63 |
+
two floating-point precision levels (i.e., bitwidths; high and
|
64 |
+
low), find a mixed precision assignment (a mapping from
|
65 |
+
all tensors arising in training to high/low precision) for the
|
66 |
+
model that maximizes training accuracy subject to a given
|
67 |
+
upper bound on the model aggregate (i.e., the total number
|
68 |
+
of bits of all tensors appearing in training). The model aggre-
|
69 |
+
gate is a proxy for the memory and time required for training,
|
70 |
+
as it is roughly proportional to memory footprint and also
|
71 |
+
well-correlated with training time (since training is often
|
72 |
+
dominated by data movement) (Micikevicius et al., 2018).
|
73 |
+
We prove that the memory-accuracy tradeoff problem
|
74 |
+
is theoretically difficult (namely NP-hard) partly due
|
75 |
+
to the exponential number of possible mixed precision
|
76 |
+
assignments (which we often refer to simply as precision
|
77 |
+
assignments for brevity). The large number of possible
|
78 |
+
assignments makes the problem difficult in practice as
|
79 |
+
arXiv:2301.13464v1 [cs.LG] 31 Jan 2023
|
80 |
+
|
81 |
+
Training with Mixed-Precision Floating-Point Assignments
|
82 |
+
(a) SqueezeNet
|
83 |
+
(b) ShuffleNet-v2
|
84 |
+
(c) MobileNet-v2
|
85 |
+
Figure 1: Training trajectory of various models on CIFAR-100. Colors denote precision assignments: all-32-bit πfp32 (red),
|
86 |
+
uniform πunif (yellow), and operator-based πop (blue) (see §3.1); the latter two use the 8-bit (and 16-bit) floats in (Sun et al.,
|
87 |
+
2019) as low (and high) precision numbers. Markers denote the “width multiplier” of a model, which controls the capacity
|
88 |
+
of the model (see §5.3): 1.0 (•), 0.5 (■), 0.25 (▲), and 0.1 (
|
89 |
+
). Some lines of πunif are missing as they converge to small
|
90 |
+
values or diverge. Observe that neither πunif nor πop works best for all models: in some models, πop has a similar accuracy
|
91 |
+
to πfp32; but in other (and all) models, the accuracy drop of πop (and πunif) from πfp32 are noticeably large (i.e., >1%).
|
92 |
+
well: there is no known analytical method for predicting
|
93 |
+
the training accuracy of a given precision assignment, and
|
94 |
+
for any practical model there are far too many precision
|
95 |
+
assignments to simply test them all.
|
96 |
+
We propose a simple (heuristic) approach to the tradeoff
|
97 |
+
problem that prioritizes tensors for low-precision formats
|
98 |
+
based on the tensor’s size (with an additional step described
|
99 |
+
below). More specifically, our algorithm takes as input
|
100 |
+
a single parameter giving a desired upper bound on the
|
101 |
+
model aggregate. Starting with the largest tensor in the
|
102 |
+
model, tensors are assigned low precision in size order
|
103 |
+
(from largest to smallest) until the model aggregate falls
|
104 |
+
below the given upper bound; all remaining tensors are
|
105 |
+
assigned high precision. Our main result is that this method
|
106 |
+
discovers mixed precision assignments that use less memory
|
107 |
+
while achieving higher training accuracy than previous
|
108 |
+
approaches. While we cannot show that our method finds
|
109 |
+
Pareto-optimal memory-accuracy tradeoffs, we do show that
|
110 |
+
our results are closer to Pareto-optimal than prior methods.
|
111 |
+
Some precision assignments initially generated by our
|
112 |
+
algorithm cause training to diverge due to an excessive
|
113 |
+
number of overflows. To address this issue, we propose an
|
114 |
+
overflow handling technique that promotes tensors causing
|
115 |
+
too many overflows from low precision to high precision
|
116 |
+
during training.
|
117 |
+
In our experiments, these promotions
|
118 |
+
consume only a small amount of additional memory (< 3%
|
119 |
+
of the maximum model aggregate) and prevent training
|
120 |
+
from diverging. The overflow handling technique is not
|
121 |
+
specific to our algorithm and can be applied to other
|
122 |
+
precision assignment methods as well.
|
123 |
+
We evaluate a PyTorch implementation of our method using
|
124 |
+
experiments on standard image classification tasks. We first
|
125 |
+
demonstrate that the precision assignments computed by our
|
126 |
+
method alleviate the limitations of existing methods: they
|
127 |
+
indeed explore the tradeoff between memory and accuracy
|
128 |
+
and exhibit a better tradeoff than the uniform and operator-
|
129 |
+
based assignments. We then show the two main components
|
130 |
+
of our method (i.e., precision demotion of larger tensors
|
131 |
+
and precision promotion of overflowing tensors) are both
|
132 |
+
important to produce competitive precision assignments.
|
133 |
+
We also provide some guidance on how users may apply
|
134 |
+
our method to navigate the memory-accuracy tradeoff.
|
135 |
+
To summarize, this work makes four main contributions:
|
136 |
+
• We formally introduce the memory-accuracy tradeoff
|
137 |
+
problem to explore better mixed precision assignments
|
138 |
+
for low-precision floating-point training and prove the
|
139 |
+
NP-hardness of the problem.
|
140 |
+
• We present a novel precision assignment technique, as a
|
141 |
+
heuristic solution to the tradeoff problem, that proposes
|
142 |
+
assignments based on a single parameter denoting a
|
143 |
+
desired upper bound on the model aggregate.
|
144 |
+
• We present a novel technique that can handle an exces-
|
145 |
+
sive number of overflows arising in training while using
|
146 |
+
a small amount of additional memory. The technique can
|
147 |
+
be applied to any (not just our) precision assignments.
|
148 |
+
• We demonstrate that the mixed precision assignments
|
149 |
+
found by our method do explore the tradeoff between
|
150 |
+
memory and training accuracy, and outperform existing
|
151 |
+
precision assignment methods.
|
152 |
+
We remark that this work focuses on low-precision
|
153 |
+
floating-point training, not fixed-point training (which uses
|
154 |
+
fixed-point formats), since we want to target upcoming
|
155 |
+
hardware (e.g., (Andersch et al., 2022)) with native support
|
156 |
+
for low-precision floats (e.g., 8-bit floats) and their oper-
|
157 |
+
ations. Also, this work focuses on low-precision training
|
158 |
+
(which trains a model from scratch), not inference (which
|
159 |
+
assumes a pre-trained model). More discussion is in §2.
|
160 |
+
Our precision assignment method typically provides > 2×
|
161 |
+
memory reduction over the operator-based assignment
|
162 |
+
while maintaining similar training accuracy and gives
|
163 |
+
further reductions by trading off accuracy. Our method
|
164 |
+
also provides similar memory reduction to the uniform
|
165 |
+
assignment, while avoiding the divergence of training often
|
166 |
+
caused by a uniform assignment.
|
167 |
+
|
168 |
+
70
|
169 |
+
accuracy (%)
|
170 |
+
60
|
171 |
+
50
|
172 |
+
test
|
173 |
+
40
|
174 |
+
0
|
175 |
+
50
|
176 |
+
100
|
177 |
+
150
|
178 |
+
200
|
179 |
+
epoch75
|
180 |
+
70
|
181 |
+
test accuracy (%)
|
182 |
+
65
|
183 |
+
60
|
184 |
+
55
|
185 |
+
50
|
186 |
+
45
|
187 |
+
0
|
188 |
+
50
|
189 |
+
100
|
190 |
+
150
|
191 |
+
200
|
192 |
+
epoch75
|
193 |
+
test accuracy (%)
|
194 |
+
70
|
195 |
+
65
|
196 |
+
60
|
197 |
+
55
|
198 |
+
50
|
199 |
+
45
|
200 |
+
0
|
201 |
+
50
|
202 |
+
100
|
203 |
+
150
|
204 |
+
200
|
205 |
+
epochTraining with Mixed-Precision Floating-Point Assignments
|
206 |
+
The paper is organized as follows.
|
207 |
+
After discussing
|
208 |
+
related work (§2), we define the memory-accuracy tradeoff
|
209 |
+
problem and study its hardness (§3). We then describe our
|
210 |
+
algorithm for the problem (§4) and our evaluation (§5). We
|
211 |
+
conclude with limitations and future work (§6).
|
212 |
+
2
|
213 |
+
RELATED WORK
|
214 |
+
Low-precision floating-point training has been exten-
|
215 |
+
sively studied since the work of (Micikevicius et al., 2018).
|
216 |
+
One active research direction is to select appropriate floating-
|
217 |
+
point formats (or their variants) for low- and high-precision
|
218 |
+
numbers in training. Various floating-point formats have
|
219 |
+
been proposed, including FP16 (Micikevicius et al., 2018),
|
220 |
+
BF16 (Kalamkar et al., 2019), FP8 (Wang et al., 2018),
|
221 |
+
HFP8 (Sun et al., 2019), and FP6 (Chmiel et al., 2021),
|
222 |
+
along with some variants such as HBFP (Drumond et al.,
|
223 |
+
2018), S2FP8 (Cambier et al., 2020), and BM (Fox et al.,
|
224 |
+
2021). Recently, the problem of automatically selecting
|
225 |
+
such floating-point formats has been considered: e.g., (Yang
|
226 |
+
et al., 2022). Another research direction is to develop algo-
|
227 |
+
rithmic techniques that improve training accuracy under low
|
228 |
+
precision: e.g., (Sa et al., 2018; Yang et al., 2019a; Zamirai
|
229 |
+
et al., 2020; Björck et al., 2021). Our work is orthogonal
|
230 |
+
and complementary to all these prior works: they consider
|
231 |
+
various floating-point formats or training algorithms but use
|
232 |
+
a fixed precision assignment, which is either the uniform
|
233 |
+
or operator-based assignment (or their variants); our work
|
234 |
+
explores various precision assignments once floating-point
|
235 |
+
formats and training algorithms are fixed (e.g., based on
|
236 |
+
the prior works). The tradeoff between memory and accu-
|
237 |
+
racy in training is also considered in (Yang et al., 2022),
|
238 |
+
but the work differs from ours: they vary floating-point for-
|
239 |
+
mats when a precision assignment is fixed, while we vary
|
240 |
+
precision assignments when floating-point formats are fixed.
|
241 |
+
Low-precision fixed-point training uses fixed-point for-
|
242 |
+
mats as a low-precision representation instead of a floating-
|
243 |
+
point format. Some works use fixed-point formats for for-
|
244 |
+
ward tensors and floating-point formats for backward ten-
|
245 |
+
sors: e.g., (Courbariaux et al., 2015; Jacob et al., 2018; Choi
|
246 |
+
et al., 2018; Yang et al., 2019b; Sun et al., 2020). Other
|
247 |
+
works use only fixed-point formats for all tensors: e.g.,
|
248 |
+
(Gupta et al., 2015; Zhou et al., 2016; Wu et al., 2018; Das
|
249 |
+
et al., 2018; Banner et al., 2018; Sakr & Shanbhag, 2019;
|
250 |
+
Zhang et al., 2020; Rajagopal et al., 2020). Among all these
|
251 |
+
works, some consider various mixed precision assignments
|
252 |
+
with different bitwidth (fixed-point) formats (e.g., (Sakr &
|
253 |
+
Shanbhag, 2019; Zhang et al., 2020)); but they are not ap-
|
254 |
+
plicable to our context (i.e., floating-point training) since
|
255 |
+
they rely on some properties of fixed-point formats that do
|
256 |
+
not hold for floating-point formats (e.g., all numbers in a
|
257 |
+
given format are equally distributed). The approach taken in
|
258 |
+
(Rajagopal et al., 2020) is orthogonal and complementary
|
259 |
+
to ours: they use only the uniform precision assignment, but
|
260 |
+
change the underlying low-precision formats during train-
|
261 |
+
ing; we consider various mixed precision assignments, but
|
262 |
+
fix the underlying low-precision formats during training.
|
263 |
+
Low-precision inference, often called neural network quan-
|
264 |
+
tization (in a narrow sense), aims at reducing the latency
|
265 |
+
or memory of neural network inference (instead of train-
|
266 |
+
ing) by using low-precision numbers (Nagel et al., 2021).
|
267 |
+
Existing approaches typically assume a pre-trained model
|
268 |
+
and try to find low-precision formats for each part of
|
269 |
+
the inference computation, either by retraining the model
|
270 |
+
(called quantization-aware training) or without any retrain-
|
271 |
+
ing (called post-training quantization); see, e.g., (Gholami
|
272 |
+
et al., 2022; Qin et al., 2022) for surveys. Some works on
|
273 |
+
inference consider various mixed precision assignments, but
|
274 |
+
they are not applicable to our context: they focus on making
|
275 |
+
inference more efficient and usually assume a pre-trained
|
276 |
+
model; we focus on making training more efficient and aim
|
277 |
+
at learning a model from scratch.
|
278 |
+
Floating-point tuning is another related topic, which con-
|
279 |
+
siders the following problem: given a program, assign ap-
|
280 |
+
propriate formats (among given candidates) to the program’s
|
281 |
+
floating-point variables such that the program’s output has
|
282 |
+
an error smaller than a given threshold for all given inputs,
|
283 |
+
while also maximizing performance (Rubio-González et al.,
|
284 |
+
2013; 2016; Chiang et al., 2017; Guo & Rubio-González,
|
285 |
+
2018; Menon et al., 2018). This problem is different from
|
286 |
+
the problem we consider: the former is concerned with the
|
287 |
+
floating-point error after a single run of a program, while
|
288 |
+
we are concerned with the training accuracy after a large
|
289 |
+
number of runs of a program (i.e., a gradient computation)
|
290 |
+
where each run affects the next run; further, the former con-
|
291 |
+
siders general-purpose programs, while we consider deep
|
292 |
+
learning programs and exploit their unique features.
|
293 |
+
3
|
294 |
+
PROBLEM
|
295 |
+
In this section, we first provide background on low-
|
296 |
+
precision floating-point training (§3.1), based on which
|
297 |
+
the memory-accuracy tradeoff problem is introduced (§3.2).
|
298 |
+
We then prove the NP-hardness of the problem (§3.3). Our
|
299 |
+
approach in §3–4 is more formal than most related works
|
300 |
+
for two reasons: (i) we show the problem is NP-hard, which
|
301 |
+
has not been considered in prior work; and (ii) to clearly
|
302 |
+
describe the precision assignments to be considered.
|
303 |
+
3.1 Background: Low-Precision Floating-Point Training
|
304 |
+
Let T be the set of real-valued tensors and let [n] denote the
|
305 |
+
set {1, · · · , n}. For a supervised learning task, we usually
|
306 |
+
consider a model network M = (f1, · · · , fn) parameter-
|
307 |
+
ized by θ = (θ1, · · · , θn) ∈ Tn, and a loss network L =
|
308 |
+
(fn+1, · · · , fm), where fi : T2 → T is a primitive operator
|
309 |
+
on tensors (e.g., convolution, batch normalization, maxpool,
|
310 |
+
and softmax). Given an input-output pair (x, y) ∈ T2, the
|
311 |
+
|
312 |
+
Training with Mixed-Precision Floating-Point Assignments
|
313 |
+
𝑓!
|
314 |
+
𝑑𝑓!,!
|
315 |
+
𝑑𝑓!,#
|
316 |
+
𝑓$%!
|
317 |
+
𝑑𝑓$%!,!
|
318 |
+
⋯
|
319 |
+
𝑓$
|
320 |
+
𝑑𝑓$,#
|
321 |
+
𝑑𝑓$,!
|
322 |
+
𝑓&
|
323 |
+
𝑑𝑓&,!
|
324 |
+
⋯
|
325 |
+
⋯
|
326 |
+
⋯
|
327 |
+
#
|
328 |
+
𝑑𝑣&
|
329 |
+
%𝑣#
|
330 |
+
&𝜃!
|
331 |
+
#
|
332 |
+
𝑑𝑣#
|
333 |
+
#
|
334 |
+
𝑑𝜃!
|
335 |
+
%𝑣$%!
|
336 |
+
%𝑣$%#
|
337 |
+
#
|
338 |
+
𝑑𝑣$%!
|
339 |
+
#
|
340 |
+
𝑑𝑣$%#
|
341 |
+
&𝜃$
|
342 |
+
#
|
343 |
+
𝑑𝜃$
|
344 |
+
%𝑣&%!
|
345 |
+
#
|
346 |
+
𝑑𝑣&%!
|
347 |
+
𝑦
|
348 |
+
: forward computation
|
349 |
+
: backward computation
|
350 |
+
%𝑣!
|
351 |
+
%𝑣$
|
352 |
+
%𝑣&
|
353 |
+
#
|
354 |
+
𝑑𝑣!
|
355 |
+
#
|
356 |
+
𝑑𝑣$
|
357 |
+
Figure 2: A diagram showing the tensors and operators
|
358 |
+
used in a gradient computation; see Eq. (1) for details. For
|
359 |
+
brevity, rounding functions rndπ(·) are omitted.
|
360 |
+
model M computes a predicted output y′ of x by iteratively
|
361 |
+
applying fi(·, θi) to x (i ∈ [n]), and L computes a loss
|
362 |
+
from y′ by iteratively applying fi′(·, y) to y′ (i′ ∈ [m]\[n]).
|
363 |
+
A standard way to train M is to minimize the loss value
|
364 |
+
using the gradient descent algorithm: iteratively update θ
|
365 |
+
by following the gradient of the loss with respect to θ.
|
366 |
+
Floating-point training. In practice, we perform a gradient
|
367 |
+
computation usually with tensors represented in floating-
|
368 |
+
point formats. Let π : TS → FP be a precision assignment
|
369 |
+
giving the floating-point format of each tensor, where TS =
|
370 |
+
∆
|
371 |
+
{vi, dv i, θj, dθj | i ∈ [m + 1], j ∈ [n]} is the set of tensors
|
372 |
+
arising in a gradient computation (explained below), and
|
373 |
+
FP =
|
374 |
+
∆ {fp(e, m, b) | e, m ∈ N, b ∈ Z} is the set of floating-
|
375 |
+
point formats. Here fp(e, m, b) denotes a floating-point
|
376 |
+
format that consists of a 1-bit sign, an e-bit exponent, and
|
377 |
+
an m-bit mantissa, and has an (additional) exponent bias
|
378 |
+
of b ∈ Z. A common choice of π is πfp32(t) =
|
379 |
+
∆ fp32 for
|
380 |
+
all t ∈ TS, where fp32 =
|
381 |
+
∆ fp(8, 23, 0) is the standard 32-bit
|
382 |
+
floating-point format.
|
383 |
+
Given a precision assignment π, a gradient computation is
|
384 |
+
typically performed by the backpropagation algorithm: with
|
385 |
+
ˆv1 = rndπ(v1)(x) and ˆdv m+1 = rndπ(dv m+1)(1), compute
|
386 |
+
ˆvi+1 = rndπ(vi+1)(fi(ˆvi, ˆui)),
|
387 |
+
ˆθj = rndπ(θj)(θj),
|
388 |
+
ˆdv i = rndπ(dv i)(df i,1( ˆdv i+1, ˆvi, ˆui)),
|
389 |
+
ˆ
|
390 |
+
dθj = rndπ(dθj)(df j,2( ˆdv j+1, ˆvj, ˆθj)),
|
391 |
+
(1)
|
392 |
+
for i ∈ [m] and j ∈ [n]; see Figure 2 for a diagram. Here
|
393 |
+
rnd : FP × T → T is a function rounding a given input
|
394 |
+
to a given floating-point format, df i,1, df i,2 : T3 → T are
|
395 |
+
the backward operators of fi with respect to its first and
|
396 |
+
second arguments, respectively, and ˆui = ˆθi if i ∈ [n] and
|
397 |
+
y otherwise. We call vi and θj the forward tensors, and
|
398 |
+
dv i and dθj the backward tensors. We put a hat over each
|
399 |
+
tensor to emphasize that its value is the output of a rounding
|
400 |
+
function to a possibly low-precision format; remark that
|
401 |
+
such a rounding function is not used within fi, df i,1, and
|
402 |
+
df i,2, since they typically use large bitwidth floats (e.g.,
|
403 |
+
fp32) and no low-precision floats internally (Kalamkar
|
404 |
+
et al., 2019; Cambier et al., 2020). After the computation,
|
405 |
+
ˆ
|
406 |
+
dθj stores the gradient of the loss value with respect to θj.
|
407 |
+
The overall picture of floating-point training is now de-
|
408 |
+
scribed as follows. In each iteration of the gradient descent
|
409 |
+
algorithm, we compute ˆ
|
410 |
+
dθj via Eq. (1) using a given preci-
|
411 |
+
sion assignment π, training data (x, y), and current weights
|
412 |
+
θ. We then update each θj by θj ← rndfp32(θj − η · ˆ
|
413 |
+
dθj)
|
414 |
+
given a learning rate η > 0, and proceed to the next iteration
|
415 |
+
until the training ends. Here we use fp32 to represent θj
|
416 |
+
by following the convention in low-precision floating-point
|
417 |
+
training: a “master copy” of weights (i.e., θj) is stored sep-
|
418 |
+
arately from the weight values (i.e., ˆθj) used in a gradient
|
419 |
+
computation, and is usually represented by fp32 (Micike-
|
420 |
+
vicius et al., 2018; Kalamkar et al., 2019; Cambier et al.,
|
421 |
+
2020). The memory overhead of this master copy is very
|
422 |
+
small compared to the memory required to store other ten-
|
423 |
+
sors (e.g., activation tensors vi) (Micikevicius et al., 2018).
|
424 |
+
Low-precision floating-point training. In low-precision
|
425 |
+
training, we use a precision assignment π where some
|
426 |
+
tensors have a smaller bitwidth than fp32.
|
427 |
+
Particularly
|
428 |
+
well-studied are π that use two predetermined floating-
|
429 |
+
point bitwidths (which are different) and optionally vary
|
430 |
+
the rest of the format from tensor to tensor.
|
431 |
+
We call
|
432 |
+
C : TS × {lo, hi} → FP a precision-candidate assignment
|
433 |
+
if C(t, lo) has the same bitwidth for all t ∈ TS, the same
|
434 |
+
holds for hi, and the bitwidth for lo is smaller than that for hi.
|
435 |
+
We also define Π(C) =
|
436 |
+
∆ {π : TS → FP | ∀t ∈ TS. π(t) ∈
|
437 |
+
{C(t, lo), C(t, hi)}} to be the set of precision assignments
|
438 |
+
that conform to C.
|
439 |
+
Among various precision assignments in Π(C), two have
|
440 |
+
received the most attention:
|
441 |
+
the uniform assignment
|
442 |
+
πunif,C (Micikevicius et al., 2018) and the operator-based
|
443 |
+
assignment πop,C (Sun et al., 2019). The former assigns
|
444 |
+
low-precision formats to all tensors uniformly1, and the
|
445 |
+
latter to (most of) the input tensors of GEMM operators (in
|
446 |
+
both forward and backward passes):
|
447 |
+
πunif,C(t) =
|
448 |
+
∆ C(t, lo) for all t ∈ TS,
|
449 |
+
πop,C(t) =
|
450 |
+
∆
|
451 |
+
�
|
452 |
+
�
|
453 |
+
�
|
454 |
+
�
|
455 |
+
�
|
456 |
+
�
|
457 |
+
�
|
458 |
+
C(t, lo) if t ∈ {vi, θi, dv i+1} for some i
|
459 |
+
and fi is a GEMM operator
|
460 |
+
(but not the first/last one)
|
461 |
+
C(t, hi) otherwise,
|
462 |
+
(2)
|
463 |
+
where a GEMM operator refers to a general matrix multi-
|
464 |
+
plication operator which arises in, e.g., fully-connected or
|
465 |
+
convolutional layers. A particular variant πop′,C of πop,C
|
466 |
+
has received much attention as well (Kalamkar et al., 2019;
|
467 |
+
PyTorch, 2022), which assigns low-precision formats to
|
468 |
+
(most of) the input and output tensors of GEMM operators:
|
469 |
+
it is defined as πop,C except that {vi, θi, dv i+1} in Eq. (2)
|
470 |
+
is replaced by {vi, θi, vi+1, dv i, dθi, dv i+1}. For several
|
471 |
+
choices of C, these assignments have been shown to produce
|
472 |
+
training accuracy similar to that by πfp32 on many datasets
|
473 |
+
and models (see §1–2).
|
474 |
+
1For simplicity we define πunif,C without the common
|
475 |
+
exceptions for tensors near v1 and/or vm+1.
|
476 |
+
|
477 |
+
Training with Mixed-Precision Floating-Point Assignments
|
478 |
+
3.2
|
479 |
+
Memory-Accuracy Tradeoff Problem
|
480 |
+
We now introduce the following problem based on §3.1,
|
481 |
+
to address the limitation of existing approaches for
|
482 |
+
low-precision floating-point training discussed in §1:
|
483 |
+
Problem 3.1 (Memory-accuracy tradeoff). Given training
|
484 |
+
data {(xi, yi)}, a model and loss network M and L, a
|
485 |
+
precision-candidate assignment C, and a lower bound
|
486 |
+
r ∈ [0, 1] on the low-precision ratio, find π ∈ Π(C) that
|
487 |
+
maximizes acc(π) subject to ratiolo(π) ≥ r.
|
488 |
+
Here acc(π) denotes the accuracy of the model M when
|
489 |
+
trained with π on {(xi, yi)}, and ratiolo(π) denotes the
|
490 |
+
low-precision ratio of π, i.e., the portion of the tensors
|
491 |
+
represented in low-precision under π, among all tensors
|
492 |
+
appearing in a gradient computation:2
|
493 |
+
ratiolo(π) =
|
494 |
+
∆ size({t ∈ TS | π(t) = C(t, lo)})
|
495 |
+
size(TS)
|
496 |
+
∈ [0, 1]
|
497 |
+
where size(T) =
|
498 |
+
∆
|
499 |
+
�
|
500 |
+
t∈T size(t) denotes the total size
|
501 |
+
(i.e., number of elements) of all tensors in T ⊆ TS. For
|
502 |
+
instance, ratiolo(πhi) = 0 and ratiolo(πlo) = 1 for the
|
503 |
+
all-high-precision assignment πhi and the all-low-precision
|
504 |
+
assignment πlo. The problem asks for a precision assign-
|
505 |
+
ment that maximizes training accuracy under a memory
|
506 |
+
constraint, which is expressed as a fraction of the memory
|
507 |
+
required to train the model using πhi.
|
508 |
+
3.3
|
509 |
+
NP-Hardness of the Problem
|
510 |
+
We prove that the memory-accuracy tradeoff problem from
|
511 |
+
§3.2 is NP-hard by showing that there is a polynomial-time
|
512 |
+
reduction from the knapsack problem to this problem:
|
513 |
+
Theorem 3.2. Problem 3.1 is NP-hard.
|
514 |
+
Proof sketch. Recall the knapsack problem: given n items
|
515 |
+
with weights wi ∈ N and profits pi ∈ N (i ∈ [n]), find a
|
516 |
+
subset of the items that maximizes the total profit while its
|
517 |
+
total weight does not exceed a given threshold W ∈ N.
|
518 |
+
Given an instance (w, p, W) of the knapsack problem, we
|
519 |
+
construct an instance of Problem 3.1 such that we get the
|
520 |
+
following (informal) correspondence between the two: wi
|
521 |
+
corresponds to the size of the parameter tensor θi; pi to the
|
522 |
+
i-th component of the input data; W to the lower bound r
|
523 |
+
on the low-precision ratio (in an inverse way); and selecting
|
524 |
+
the i-th item corresponds to assigning a high-precision
|
525 |
+
format to the tensor θi (and related tensors), which roughly
|
526 |
+
decreases the low-precision ratio by wi while increasing
|
527 |
+
the accuracy of the model (after training) by pi. Based
|
528 |
+
2As explained in §1, the low-precision ratio is a proxy for
|
529 |
+
the reduction in memory as well as training time (because the
|
530 |
+
low-precision ratio increases as the model aggregate decreases).
|
531 |
+
Note that it is not always possible to simply measure training time,
|
532 |
+
as some floating-point bitwidths of interest (e.g., 8-bit) are not
|
533 |
+
supported natively by current hardware.
|
534 |
+
on this informal correspondence, we formally prove that
|
535 |
+
an optimal solution to the above instance of Problem 3.1
|
536 |
+
can be converted in linear time to an optimal solution to
|
537 |
+
the given knapsack problem (w, p, W). That is, we have
|
538 |
+
a linear-time reduction from the knapsack problem (which
|
539 |
+
is NP-hard (Karp, 1972)) to Problem 3.1 which is therefore
|
540 |
+
NP-hard. For a detailed proof, see Appendix A.
|
541 |
+
Intuitively, the proof relies on two aspects of Problem 3.1:
|
542 |
+
the size of the search space (i.e., |Π(C)|) is exponential in
|
543 |
+
the size of the problem (especially |TS|), and some values
|
544 |
+
representable in a high-precision format underflow to 0 in
|
545 |
+
a lower-precision format. Note that underflows are relevant
|
546 |
+
in low-precision training: they frequently arise in practice,
|
547 |
+
degrading the results of training (Micikevicius et al., 2018).
|
548 |
+
The NP-hardness result indicates that it is unlikely any
|
549 |
+
polynomial-time algorithm solves the problem exactly.
|
550 |
+
4
|
551 |
+
ALGORITHM
|
552 |
+
In this section, we propose a novel (heuristic) algorithm for
|
553 |
+
the memory-accuracy tradeoff problem (§4.1), and a new
|
554 |
+
technique to handle overflows arising in training (§4.2).
|
555 |
+
4.1
|
556 |
+
Precision Demotion for Saving Memory
|
557 |
+
Consider an input to the memory-accuracy trade-off prob-
|
558 |
+
lem (Problem 3.1): a model and loss network M = (f1,
|
559 |
+
· · · , fn) and L = (fn+1, · · · , fm), a precision-candidate
|
560 |
+
assignment C, and a lower bound r on the low-precision
|
561 |
+
ratio. Given the input, our algorithm works in two steps.
|
562 |
+
Tensor grouping. We first group tensors in TS such that
|
563 |
+
each group consists of all the tensors between two “adjacent”
|
564 |
+
GEMM operators (see below for details). This grouping re-
|
565 |
+
duces the search space over precision assignments, from all
|
566 |
+
of Π(C) to a subset in which the same precision is assigned
|
567 |
+
to the tensors in the same group. This specific grouping
|
568 |
+
strategy is based on two observations: a majority of floating-
|
569 |
+
point operations are carried out in GEMM operators, and it
|
570 |
+
is standard (e.g., in PyTorch) to use the same precision for a
|
571 |
+
forward tensor and its corresponding backward tensor.
|
572 |
+
Formally, we group tensors as follows. Let fk and fk′
|
573 |
+
(k < k′) be GEMM operators that are “adjacent”, i.e., there
|
574 |
+
is no GEMM operator in {fk+1, · · · , fk′−1}. For each such
|
575 |
+
(fk, fk′), we create a group {vi, dv i, θj, dθj | i ∈ (k, k′] ∩
|
576 |
+
[m + 1], j ∈ (k, k′] ∩ [n]}. After that, we create two more
|
577 |
+
groups for the remaining tensors: one for the tensors near v1
|
578 |
+
and the other for tensors near vm+1. As a result, we obtain
|
579 |
+
a set of disjoint groups of tensors {T1, T2, · · · } ⊆ 2TS.
|
580 |
+
Precision demotion. Given the groups of tensors, T1, T2,
|
581 |
+
· · · , we construct a precision assignment π as follows: ini-
|
582 |
+
tialize π to the all-high-precision assignment and update
|
583 |
+
π by demoting the precision of all tensors in a group to
|
584 |
+
low precision, one group at a time, until the low-precision
|
585 |
+
|
586 |
+
Training with Mixed-Precision Floating-Point Assignments
|
587 |
+
ratio of π becomes greater than r. We demote the pre-
|
588 |
+
cision of groups in decreasing order of their sizes (i.e.,
|
589 |
+
the total number of elements in tensors); that is, the pre-
|
590 |
+
cision of a larger size group is demoted earlier. Formally,
|
591 |
+
let {T ′
|
592 |
+
1, T ′
|
593 |
+
2, · · · } be the reordering of {T1, T2, · · · } such
|
594 |
+
that size(T ′
|
595 |
+
1) ≥ size(T ′
|
596 |
+
2) ≥ · · · . After initializing π by
|
597 |
+
π(t) = C(t, hi) for all t, we iterate over i ∈ N and update π
|
598 |
+
to π(t) = C(t, lo) for all t ∈ T ′
|
599 |
+
i, until ratiolo(π) ≥ r is first
|
600 |
+
satisfied. The resulting π is the output of our algorithm.
|
601 |
+
The intuition behind using group size as the priority order
|
602 |
+
for precision demotion is based on the fact that it is actually
|
603 |
+
optimal in a very simplified setting. Suppose that an input
|
604 |
+
x to the model M stores a quantity of information I and
|
605 |
+
the forward computation of M is nothing but a process of
|
606 |
+
extracting the information in the input into a small number
|
607 |
+
of values, i.e., the tensor vn+1. Assume that passing through
|
608 |
+
each group Oi = {fk+1, · · · , fk′} of operators (corre-
|
609 |
+
sponding to the group Ti of tensors) reduces the amount of
|
610 |
+
information by a factor αi ∈ (0, 1), and using low precision
|
611 |
+
on the group Ti further reduces the amount of information
|
612 |
+
by a constant factor β ∈ (0, 1) for all i. Then, the amount
|
613 |
+
of information left in vn+1 becomes I × (α1α2 · · · ) × βl,
|
614 |
+
where l is the number of groups in low precision. In this
|
615 |
+
simplified setting, maximizing the amount of information in
|
616 |
+
vn+1 is equivalent to minimizing the number of groups in
|
617 |
+
low precision, which is achieved precisely by demoting the
|
618 |
+
largest groups first (when there is a constraint on the low-
|
619 |
+
precision ratio). We show empirically (§5.4) that using the
|
620 |
+
decreasing size order in precision demotion indeed produces
|
621 |
+
better precision assignments than using other orders.
|
622 |
+
4.2
|
623 |
+
Precision Promotion for Handling Overflows
|
624 |
+
While our algorithm in §4.1 exerts a constraint on memory
|
625 |
+
usage, it places no explicit constraint on training accuracy,
|
626 |
+
and so not surprisingly for some models and datasets
|
627 |
+
the resulting precision assignment causes training to
|
628 |
+
diverge—accuracy decreases significantly and remains low
|
629 |
+
after some point. We observe that when training begins to
|
630 |
+
diverge (and a bit before that), many overflows occur in the
|
631 |
+
rounding function of some tensors ˆvi, i.e., an input tensor to
|
632 |
+
the function rndπ(vi)(·) in Eq. (1) contains many elements
|
633 |
+
whose magnitude is larger than the maximum representable
|
634 |
+
number of the format π(vi) (Figure 6(a-b); §5.4). This
|
635 |
+
rapid increase in overflows in individual tensors is a signal
|
636 |
+
that training may diverge.
|
637 |
+
Precision promotion. Based on this observation, after each
|
638 |
+
gradient computation we update the current precision as-
|
639 |
+
signment π by promoting to high precision (i.e., C(t, hi))
|
640 |
+
any forward tensor t whose overflow ratio is greater than a
|
641 |
+
given threshold Θ ∈ (0, 1); this updated precision assign-
|
642 |
+
ment is used in the next gradient computation. Here the
|
643 |
+
overflow ratio of t ∈ TS denotes the number of overflows
|
644 |
+
arising in the rounding function of ˆt in Eq. (1), divided by
|
645 |
+
the number of elements in ˆt. We show empirically (§5.4)
|
646 |
+
that training always converges using this technique and the
|
647 |
+
additional memory cost of promotion is small (in our exper-
|
648 |
+
iments, < 3% of the maximum model aggregate). For the
|
649 |
+
experiments, we use Θ = 0.01; in fact we found that a wide
|
650 |
+
range of values for Θ (0.1, 0.01, and 0.001) all work well.
|
651 |
+
Note that this technique is not specific to our algorithm and
|
652 |
+
can also be applied to other precision assignment methods.
|
653 |
+
We apply precision promotion only to forward tensors for
|
654 |
+
two reasons. First, dynamic loss scaling (Micikevicius
|
655 |
+
et al., 2018; Sun et al., 2019; Nvidia, 2019; PyTorch, 2022)
|
656 |
+
already handles overflows in backward tensors, but not in
|
657 |
+
forward tensors: loss scaling multiplies the backward loss
|
658 |
+
tensor dv m+1 by a constant before performing backward
|
659 |
+
computation, to scale up all backward tensors; the dynamic
|
660 |
+
version adjusts the constant during training in a way that
|
661 |
+
avoids overflows in backward tensors. Note that dynamic
|
662 |
+
loss scaling does not affect forward tensors at all. Second,
|
663 |
+
we cannot use a similar idea to handle overflows in forward
|
664 |
+
tensors, because forward tensors are not linear in the input
|
665 |
+
tensor v1 whereas backward tensors are linear in the back-
|
666 |
+
ward loss tensor dv m+1 (by the linearity of differentiation).
|
667 |
+
Precision promotion incurs little if any computational over-
|
668 |
+
head: checking whether a single rounding operation over-
|
669 |
+
flows is cheap, and we only apply rounding functions to the
|
670 |
+
output tensor of an arithmetic-intensive operator (e.g., con-
|
671 |
+
volution and batch normalization), amortizing the cost of the
|
672 |
+
overflow checks over a large number of other operations.
|
673 |
+
5
|
674 |
+
EXPERIMENTS
|
675 |
+
In this section, we evaluate our precision assignment
|
676 |
+
technique (developed in §4) on standard training tasks to
|
677 |
+
answer three research questions:
|
678 |
+
• Does our technique explore the tradeoff between
|
679 |
+
memory and accuracy and achieve a better tradeoff than
|
680 |
+
existing (fixed) precision assignments (§5.3)?
|
681 |
+
• Are the two main components of our technique,
|
682 |
+
precision demotion/promotion of larger/overflowing
|
683 |
+
tensors, important for good performance (§5.4)?
|
684 |
+
• How can we choose the parameter r in our technique
|
685 |
+
(i.e., a lower bound on the low-precision ratio) (§5.5)?
|
686 |
+
5.1
|
687 |
+
Implementation
|
688 |
+
We have implemented our precision assignment technique
|
689 |
+
using PyTorch (Paszke et al., 2019). Given a model and loss
|
690 |
+
network, and a dataset, our implementation takes as param-
|
691 |
+
eters a precision-candidate assignment C and a lower bound
|
692 |
+
r on the low-precision ratio; it then automatically assigns
|
693 |
+
precisions to tensors (appearing in training) according to
|
694 |
+
our technique and uses those assigned precisions in gradient
|
695 |
+
computations. To make these procedures automatic, our
|
696 |
+
implementation works as follows:
|
697 |
+
|
698 |
+
Training with Mixed-Precision Floating-Point Assignments
|
699 |
+
• For each PyTorch class for a primitive operator (e.g.,
|
700 |
+
torch.nn.Conv2d), our implementation provides its
|
701 |
+
wrapped version (e.g., mpa.nn.Conv2d) which records
|
702 |
+
auxiliary information for our technique (e.g., floating-
|
703 |
+
point format of input/output tensors) and applies proper
|
704 |
+
rounding functions in forward/backward computations
|
705 |
+
based on the auxiliary information. Models should now
|
706 |
+
use the wrapped classes instead of the original ones.
|
707 |
+
• Our implementation first constructs a computation graph
|
708 |
+
(of a given model and loss network) dynamically by
|
709 |
+
running a forward computation on a minibatch of input
|
710 |
+
data. The computation graph and other information (e.g.,
|
711 |
+
each tensor’s size) are recorded in the wrapped classes.
|
712 |
+
• Using the auxiliary information just recorded, our
|
713 |
+
implementation then constructs a precision assignment
|
714 |
+
according to §4.1, uses it in gradient computations, and
|
715 |
+
updates it after each gradient computation according
|
716 |
+
to §4.2. We record the precision assignment also in the
|
717 |
+
wrapped classes to automatically apply proper rounding
|
718 |
+
functions in gradient computations.
|
719 |
+
As no current hardware natively supports low-precision
|
720 |
+
formats used in the experiments (e.g., 8-bit floats) and
|
721 |
+
their operations, we simulate them with 32-bit floats
|
722 |
+
and 32-bit operations followed by rounding functions as
|
723 |
+
described in Eq. (1). We implement the rounding functions
|
724 |
+
based on the QPyTorch library (Zhang et al., 2019); a few
|
725 |
+
extensions are required though, e.g., to support exponent
|
726 |
+
bias and signal overflows for dynamic loss scaling. We
|
727 |
+
automatically apply these rounding functions after each
|
728 |
+
primitive operator, by using PyTorch’s hook feature (e.g.,
|
729 |
+
nn.Module.register_*hook).
|
730 |
+
5.2
|
731 |
+
Experiment Setups
|
732 |
+
Datasets and models. As benchmarks for our experiments,
|
733 |
+
we use the image classification task and three datasets for the
|
734 |
+
task: CIFAR-10 and CIFAR-100 (Krizhevsky, 2009), and
|
735 |
+
ImageNet (Russakovsky et al., 2015); we choose them since
|
736 |
+
they have been widely used in recent works on low-precision
|
737 |
+
training as a standard choice (Wang et al., 2018; Sakr &
|
738 |
+
Shanbhag, 2019; Rajagopal et al., 2020; Chmiel et al., 2021).
|
739 |
+
For the task and datasets, we use four well-known models:
|
740 |
+
SqueezeNet (Iandola et al., 2016), ShuffleNet-v2 (Ma et al.,
|
741 |
+
2018), MobileNet-v2 (Sandler et al., 2018), and ResNet-
|
742 |
+
18 (He et al., 2016); they are chosen since models with
|
743 |
+
relatively few weights, such as these, are generally known
|
744 |
+
to be more difficult to train with low precision than those
|
745 |
+
with more weights (Sun et al., 2019). We considered other
|
746 |
+
tasks (e.g., language modeling) and related models (e.g.,
|
747 |
+
RNN/transformer-based models) but did not include them
|
748 |
+
in our experiments because substantial additional implemen-
|
749 |
+
tation effort orthogonal to our main contributions would be
|
750 |
+
required: these models use some PyTorch operators that do
|
751 |
+
not support per-tensor precision assignments, so applying
|
752 |
+
our technique to these models requires significant modifica-
|
753 |
+
tions to PyTorch internals.
|
754 |
+
Precision-candidate and precision assignments. For the
|
755 |
+
experiments, we use the precision-candidate assignment
|
756 |
+
C studied in (Sun et al., 2019), which uses 16-bit (and
|
757 |
+
8-bit) floats for high (and low) precision; in particular,
|
758 |
+
C(t, hi) = fp(6, 9, 0) for all (forward/backward) tensors
|
759 |
+
t, and C(t, lo) = fp(4, 3, 4) for all forward tensors t and
|
760 |
+
fp(5, 2, 0) otherwise. We choose this particular C because it
|
761 |
+
uses sub-32-bit floating-point formats for both low and high
|
762 |
+
precision and the precision assignment πop,C was shown to
|
763 |
+
achieve accuracy comparable to 32-bit training (Sun et al.,
|
764 |
+
2019). The three floating-point formats used in C allow
|
765 |
+
subnormals but no infinities and NaNs, which are rounded
|
766 |
+
to the largest or smallest representable numbers. While no
|
767 |
+
current hardware is available for the latter two 8-bit formats,
|
768 |
+
they will be supported natively on NVIDIA’s forthcoming
|
769 |
+
H100 GPU (Andersch et al., 2022). Because our technique
|
770 |
+
is parameterized by a precision-candidate assignment, it is
|
771 |
+
easily applied to other assignments as well.
|
772 |
+
We evaluate our technique by varying its parameter r
|
773 |
+
(i.e., a lower bound on low-precision ratio) over deciles
|
774 |
+
r ∈ {0, 0.1, 0.2, · · · , 1}. We write πours,r to denote the
|
775 |
+
precision assignment chosen by our technique (described
|
776 |
+
in §4) for a given r; e.g., πours,0 is the all-high-precision
|
777 |
+
assignment, and πours,1 is the all-low-precision assignment
|
778 |
+
equipped with our precision promotion technique (§4.2).
|
779 |
+
By following (Sun et al., 2019), all precision assignments
|
780 |
+
(including πours,r) in our experiments use high precision
|
781 |
+
(i.e., 16 bits) for all backward weight tensors (i.e., ˆ
|
782 |
+
dθj).
|
783 |
+
Other setups and compute time. All experiments were
|
784 |
+
performed on NVIDIA V100 GPUs; total compute time for
|
785 |
+
all experiments was 1,008 GPU-days. We train all models in
|
786 |
+
a standard way: we apply dynamic loss scaling (a standard
|
787 |
+
technique used in low-precision floating-point training; see
|
788 |
+
§4.2 for details) except for 32-bit training, and use standard
|
789 |
+
settings (e.g., learning rate); see Appendix B for details.
|
790 |
+
Due to random variations in training, we perform four runs
|
791 |
+
of training for each configuration and report the average and
|
792 |
+
the range of measured quantities.
|
793 |
+
5.3
|
794 |
+
Comparison with Existing Precision Assignments
|
795 |
+
To compare our technique with existing precision assign-
|
796 |
+
ments for floating-point training, we train each model
|
797 |
+
with the following precision assignments: all-32-bit πfp32,
|
798 |
+
uniform πunif (Micikevicius et al., 2018), operator-based
|
799 |
+
πop (Sun et al., 2019), its variant πop′ (Kalamkar et al.,
|
800 |
+
2019; PyTorch, 2022), and ours πours,r (see §3.1 and §5.2
|
801 |
+
for their definitions).
|
802 |
+
We choose πunif, πop, and πop′
|
803 |
+
as baselines because existing precision assignments for
|
804 |
+
floating-point training fall into one of the three assignments
|
805 |
+
(or their variants) (see §1–2).
|
806 |
+
|
807 |
+
Training with Mixed-Precision Floating-Point Assignments
|
808 |
+
Figure 3: Results of training ShuffleNet-v2 on ImageNet with πfp32, πunif (Micikevicius et al., 2018), πop (Sun et al.,
|
809 |
+
2019), πop′ (Kalamkar et al., 2019), and πours,r. Left: Each line shows the average training trajectory for each precision
|
810 |
+
assignment; πours,r is colored from navy to yellow (darker for smaller r). Right: Each point shows the memory-accuracy
|
811 |
+
tradeoff of each precision assignment; a red-dashed line shows the accuracy of πfp32; and shaded areas show the variation
|
812 |
+
among four training runs. In the right figure, top-right points are better than bottom-left ones. Observe that there are •s
|
813 |
+
above and to the right of
|
814 |
+
and
|
815 |
+
, respectively. ⋆ is missing as its y-value is too small.
|
816 |
+
(a) CIFAR-10, SqueezeNet
|
817 |
+
(b) CIFAR-100, SqueezeNet
|
818 |
+
(c) CIFAR-100, SqueezeNet†
|
819 |
+
(d) CIFAR-10, ShuffleNet-v2
|
820 |
+
(e) CIFAR-100, ShuffleNet-v2
|
821 |
+
(f) CIFAR-100, ShuffleNet-v2†
|
822 |
+
(g) CIFAR-10, MobileNet-v2
|
823 |
+
(h) CIFAR-100, MobileNet-v2
|
824 |
+
(i) CIFAR-100, MobileNet-v2†
|
825 |
+
(j) CIFAR-10, ResNet-18
|
826 |
+
(k) CIFAR-100, ResNet-18
|
827 |
+
(l) CIFAR-100, ResNet-18†
|
828 |
+
Figure 4: Memory-accuracy tradeoffs of πunif (Micikevicius et al., 2018), πop (Sun et al., 2019), πop′ (Kalamkar et al.,
|
829 |
+
2019), and πours,r for four models and their smaller variants on CIFAR-10 and CIFAR-100. The variant models have width
|
830 |
+
multiplier 0.25 and are marked by †. Top-right points are better than bottom-left ones. In all but three plots, there are •s
|
831 |
+
above and to the right of
|
832 |
+
and
|
833 |
+
, respectively; even in the three plots (g,h,k), •s have almost the same tradeoffs to
|
834 |
+
and
|
835 |
+
. In half of all plots, ⋆ has much smaller y-values than other points. The training trajectories for the above plots
|
836 |
+
and the results of other smaller models are in Appendix C.1.
|
837 |
+
|
838 |
+
92
|
839 |
+
90
|
840 |
+
88
|
841 |
+
fp32
|
842 |
+
op
|
843 |
+
86
|
844 |
+
op
|
845 |
+
unif
|
846 |
+
80
|
847 |
+
ours
|
848 |
+
78
|
849 |
+
0.00
|
850 |
+
0.25
|
851 |
+
0.50
|
852 |
+
0.75
|
853 |
+
1.00
|
854 |
+
low-prec. ratio72
|
855 |
+
70
|
856 |
+
68
|
857 |
+
fp32
|
858 |
+
op
|
859 |
+
66
|
860 |
+
op
|
861 |
+
unif
|
862 |
+
2
|
863 |
+
ours
|
864 |
+
0
|
865 |
+
0.00
|
866 |
+
0.25
|
867 |
+
0.50
|
868 |
+
0.75
|
869 |
+
1.00
|
870 |
+
low-prec. ratio66
|
871 |
+
test accuracy (%)
|
872 |
+
64
|
873 |
+
X
|
874 |
+
62
|
875 |
+
fp32
|
876 |
+
60
|
877 |
+
op
|
878 |
+
op
|
879 |
+
unif
|
880 |
+
2
|
881 |
+
ours
|
882 |
+
0
|
883 |
+
0.00
|
884 |
+
0.25
|
885 |
+
0.50
|
886 |
+
0.75
|
887 |
+
1.00
|
888 |
+
low-prec. ratio94
|
889 |
+
(%)
|
890 |
+
92
|
891 |
+
test accuracy (
|
892 |
+
90
|
893 |
+
fp32
|
894 |
+
88
|
895 |
+
op
|
896 |
+
op
|
897 |
+
unif
|
898 |
+
80
|
899 |
+
ours
|
900 |
+
78
|
901 |
+
0.00
|
902 |
+
0.25
|
903 |
+
0.50
|
904 |
+
0.75
|
905 |
+
1.00
|
906 |
+
low-prec. ratio74
|
907 |
+
(%)
|
908 |
+
72
|
909 |
+
test accuracy (
|
910 |
+
70
|
911 |
+
fp32
|
912 |
+
68
|
913 |
+
op
|
914 |
+
op
|
915 |
+
unif
|
916 |
+
2
|
917 |
+
ours
|
918 |
+
0
|
919 |
+
0.00
|
920 |
+
0.25
|
921 |
+
0.50
|
922 |
+
0.75
|
923 |
+
1.00
|
924 |
+
low-prec. ratio68
|
925 |
+
accuracy (%)
|
926 |
+
66
|
927 |
+
64
|
928 |
+
fp32
|
929 |
+
62
|
930 |
+
op
|
931 |
+
test a
|
932 |
+
op
|
933 |
+
unif
|
934 |
+
40
|
935 |
+
ours
|
936 |
+
38
|
937 |
+
0.00
|
938 |
+
0.25
|
939 |
+
0.50
|
940 |
+
0.75
|
941 |
+
1.00
|
942 |
+
low-prec. ratio96
|
943 |
+
test accuracy (%)
|
944 |
+
94
|
945 |
+
92
|
946 |
+
fp32
|
947 |
+
op
|
948 |
+
90
|
949 |
+
op
|
950 |
+
unif
|
951 |
+
76
|
952 |
+
ours
|
953 |
+
0.00
|
954 |
+
0.25
|
955 |
+
0.50
|
956 |
+
0.75
|
957 |
+
1.00
|
958 |
+
low-prec. ratio80
|
959 |
+
(%)
|
960 |
+
78
|
961 |
+
test accuracy (
|
962 |
+
76
|
963 |
+
fp32
|
964 |
+
74
|
965 |
+
op
|
966 |
+
op
|
967 |
+
unif
|
968 |
+
2
|
969 |
+
ours
|
970 |
+
0
|
971 |
+
0.00
|
972 |
+
0.25
|
973 |
+
0.50
|
974 |
+
0.75
|
975 |
+
1.00
|
976 |
+
low-prec. ratio70
|
977 |
+
68
|
978 |
+
66
|
979 |
+
fp32
|
980 |
+
op
|
981 |
+
64
|
982 |
+
op
|
983 |
+
unif
|
984 |
+
52
|
985 |
+
ours
|
986 |
+
50
|
987 |
+
0.00
|
988 |
+
0.25
|
989 |
+
0.50
|
990 |
+
0.75
|
991 |
+
1.00
|
992 |
+
low-prec. ratio60
|
993 |
+
test accuracy (%)
|
994 |
+
40
|
995 |
+
fp32
|
996 |
+
op
|
997 |
+
20
|
998 |
+
op
|
999 |
+
unif
|
1000 |
+
ours
|
1001 |
+
0
|
1002 |
+
0
|
1003 |
+
20
|
1004 |
+
40
|
1005 |
+
60
|
1006 |
+
80
|
1007 |
+
epoch66
|
1008 |
+
test accuracy (%)
|
1009 |
+
64
|
1010 |
+
62
|
1011 |
+
fp32
|
1012 |
+
op
|
1013 |
+
60
|
1014 |
+
op
|
1015 |
+
unif
|
1016 |
+
34
|
1017 |
+
ours
|
1018 |
+
32
|
1019 |
+
0.00
|
1020 |
+
0.25
|
1021 |
+
0.50
|
1022 |
+
0.75
|
1023 |
+
1.00
|
1024 |
+
low-prec. ratio94
|
1025 |
+
(%)
|
1026 |
+
92
|
1027 |
+
test accuracy (
|
1028 |
+
90
|
1029 |
+
fp32
|
1030 |
+
88
|
1031 |
+
op
|
1032 |
+
op
|
1033 |
+
unif
|
1034 |
+
72
|
1035 |
+
ours
|
1036 |
+
70
|
1037 |
+
0.00
|
1038 |
+
0.25
|
1039 |
+
0.50
|
1040 |
+
0.75
|
1041 |
+
1.00
|
1042 |
+
low-prec. ratio72
|
1043 |
+
(%)
|
1044 |
+
70
|
1045 |
+
test accuracy (
|
1046 |
+
68
|
1047 |
+
66
|
1048 |
+
fp32
|
1049 |
+
op
|
1050 |
+
op
|
1051 |
+
unif
|
1052 |
+
2
|
1053 |
+
ours
|
1054 |
+
0
|
1055 |
+
0.00
|
1056 |
+
0.25
|
1057 |
+
0.50
|
1058 |
+
0.75
|
1059 |
+
1.00
|
1060 |
+
low-prec. ratio56
|
1061 |
+
test accuracy (%)
|
1062 |
+
54
|
1063 |
+
52
|
1064 |
+
fp32
|
1065 |
+
op
|
1066 |
+
50
|
1067 |
+
op
|
1068 |
+
unif
|
1069 |
+
2
|
1070 |
+
ours
|
1071 |
+
0
|
1072 |
+
0.00
|
1073 |
+
0.25
|
1074 |
+
0.50
|
1075 |
+
0.75
|
1076 |
+
1.00
|
1077 |
+
low-prec. ratioTraining with Mixed-Precision Floating-Point Assignments
|
1078 |
+
We train the four models mentioned in §5.2 on CIFAR-10
|
1079 |
+
and CIFAR-100, and ShuffleNet-v2 on ImageNet. We also
|
1080 |
+
train smaller variants of the four models (which are more
|
1081 |
+
difficult to train with low precision) on CIFAR-100. We
|
1082 |
+
obtain these variant models by following (Sun et al., 2019),
|
1083 |
+
i.e., by applying a well-known approach for model reduction
|
1084 |
+
that uses a parameter called the width multiplier (Howard
|
1085 |
+
et al., 2017): each variant model reduces the number of
|
1086 |
+
channels in most tensors by a width multiplier; we use three
|
1087 |
+
values {0.5, 0.25, 0.1} for the width multiplier. We train
|
1088 |
+
just one model on ImageNet due to the large amount of
|
1089 |
+
computation involved: for each model, 44 training runs (11
|
1090 |
+
choices for r and 4 runs for each choice) are required for
|
1091 |
+
πours,r and each run on ImageNet takes nearly a half day
|
1092 |
+
with 16 GPUs. We use ShuffleNet-v2 for ImageNet since
|
1093 |
+
the model shows interesting memory-accuracy tradeoffs
|
1094 |
+
when trained on the (smaller) CIFAR datasets.
|
1095 |
+
ImageNet. Figure 3 presents training results of ShuffleNet-
|
1096 |
+
v2 on ImageNet: its left graph plots the average training
|
1097 |
+
trajectory for each precision assignment, and its right graph
|
1098 |
+
shows how each precision assignment trades off between
|
1099 |
+
memory and accuracy, where memory is represented (in-
|
1100 |
+
versely) by the low-precision ratio of the assignment and ac-
|
1101 |
+
curacy is the best test accuracy of the model during training.
|
1102 |
+
Each point in the right graph shows the average accuracy
|
1103 |
+
of four runs of training, while the shaded area shows the
|
1104 |
+
variation in accuracy among those four training runs.
|
1105 |
+
Figure 3 shows three points.
|
1106 |
+
First, as the parameter r
|
1107 |
+
increases, the average accuracy drop of πours,r from πfp32
|
1108 |
+
increases (up to 5%). In contrast, πunif and πop′ have a
|
1109 |
+
much larger average accuracy drop (more than 30%), as
|
1110 |
+
some training runs diverge when πunif and πop′ are used.
|
1111 |
+
Second, the tradeoff given by πours,r is better (i.e., closer
|
1112 |
+
to Pareto-optimal) than by πop: πours,r for r ∈ {0.3, 0.4}
|
1113 |
+
has both higher accuracy and larger low-precision ratio (i.e.,
|
1114 |
+
memory reduction) than πop. In particular, πours,0.4 has
|
1115 |
+
1.6× the memory reduction of πop. Third, πours,r provides
|
1116 |
+
options that πop cannot (which has an accuracy drop of
|
1117 |
+
>1%). If we want accuracy closer to πfp32, say within 0.5%,
|
1118 |
+
we can use πours,0.2 with 2.6% more memory than πop. If
|
1119 |
+
we can tolerate a larger accuracy loss, say ≈ 3%, then we
|
1120 |
+
can use πours,0.7 with 2.9× the memory reduction of πop.
|
1121 |
+
CIFAR-10/100. Figure 4 presents the memory-accuracy
|
1122 |
+
tradeoffs of precision assignments for the four models on
|
1123 |
+
CIFAR-10 and CIFAR-100, and their smaller variants (with
|
1124 |
+
width multiplier 0.25) on CIFAR-100. The results for other
|
1125 |
+
smaller variants are similar and included in Appendix C.1.
|
1126 |
+
The conclusions from Figure 3 hold for Figure 4: πours,r
|
1127 |
+
provides a range of options by varying r and exhibits a
|
1128 |
+
better tradeoff than πunif, πop, and πop′ in almost all cases.
|
1129 |
+
We give a detailed comparison as follows. First, in half of
|
1130 |
+
all 12 plots, πunif shows a similar tradeoff to πours,1. But
|
1131 |
+
in the remaining half, πunif has an accuracy drop much
|
1132 |
+
larger than all other precision assignments including πours,r,
|
1133 |
+
since using πunif often makes training diverge while using,
|
1134 |
+
e.g., πours,1 does not do so. Second, in all but two plots,
|
1135 |
+
πours,r shows a strictly better tradeoff than πop: πours,r has
|
1136 |
+
noticeably larger (> 2×) memory reduction than πop while
|
1137 |
+
maintaining similar accuracy. Even in the two plots, πours,r
|
1138 |
+
has a tradeoff very close to πop. Note that in three plots,
|
1139 |
+
πop has an accuracy drop of >1% while πours,r provides
|
1140 |
+
several options that have smaller accuracy drops and more
|
1141 |
+
memory savings at the same time. Third, πours,r shows a
|
1142 |
+
strictly better (or similar) tradeoff than πop′ in all but two
|
1143 |
+
(or two) plots. Note that πop′ has accuracy smaller than πop
|
1144 |
+
in all but one plots. Also it has an accuracy drop of >1% in
|
1145 |
+
half of all plots, and sometimes makes training even diverge
|
1146 |
+
(in one plot here and three other plots in Appendix C.1).
|
1147 |
+
5.4 Ablation Study: Precision Demotion and Promotion
|
1148 |
+
Precision demotion.
|
1149 |
+
To evaluate the decision to use
|
1150 |
+
precision demotion in decreasing-size order, we train
|
1151 |
+
the four models on CIFAR-100 with πours,r, πours[inc],r
|
1152 |
+
(which demotes tensor groups in increasing-size order) and
|
1153 |
+
πours[rand],r (which demotes tensor groups in random or-
|
1154 |
+
der). For πours[rand], three different random orders are used
|
1155 |
+
in each case. The results, presented in Figure 5 (and Ap-
|
1156 |
+
pendix C.2), show that the order of precision demotion has a
|
1157 |
+
significant impact on the resulting memory-accuracy trade-
|
1158 |
+
off, and that decreasing order provides the best results in
|
1159 |
+
nearly all cases. Increasing order consistently shows the
|
1160 |
+
worst results, suggesting our intuition (given in §4.1) for
|
1161 |
+
choosing decreasing order has some basis in reality.
|
1162 |
+
Precision promotion. To understand whether precision pro-
|
1163 |
+
motion of overflowing tensors is important to our technique,
|
1164 |
+
we train ShuffleNet-v2 on ImageNet using πours[no-promo],r
|
1165 |
+
which does not promote tensors. The results, presented in
|
1166 |
+
Figure 6(a), show that several training trajectories diverge
|
1167 |
+
in early epochs and fail to recover afterwards. Figure 6(b)
|
1168 |
+
plots the top-5 tensor overflow ratios for the highlighted
|
1169 |
+
trajectory in Figure 6(a). The overflow ratios first spike
|
1170 |
+
about when divergence occurs around epoch 11. A closer
|
1171 |
+
look shows that the spike in overflow ratio occurs shortly
|
1172 |
+
before divergence, and starts first in a few tensors and then
|
1173 |
+
propagates to others. These observations indicate that an
|
1174 |
+
excessive number of overflows in a few tensors are the cause
|
1175 |
+
of the training divergence.
|
1176 |
+
Finally, Figure 6(c-d) shows that precision promotion is
|
1177 |
+
effective at preventing the divergence of training while
|
1178 |
+
sacrificing only a small amount of memory reduction. The
|
1179 |
+
figure shows ShuffleNet-v2 on ImageNet trained using our
|
1180 |
+
technique with and without precision promotion. Figure 6(c)
|
1181 |
+
shows that without precision promotion large accuracy
|
1182 |
+
drops occur due to divergence, whereas with precision pro-
|
1183 |
+
motion training converges. Figure 6(d) shows that the total
|
1184 |
+
|
1185 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1186 |
+
(a) SqueezeNet
|
1187 |
+
(b) ShuffleNet-v2
|
1188 |
+
(c) MobileNet-v2
|
1189 |
+
Figure 5: Memory-accuracy tradeoffs of πours,r, πours[inc],r, and πours[rand],r for three models on CIFAR-100. Observe
|
1190 |
+
that •s are above and to the right of other points in nearly all cases. The results of ResNet-18 are in Appendix C.2.
|
1191 |
+
(a)
|
1192 |
+
(b)
|
1193 |
+
(c)
|
1194 |
+
(d)
|
1195 |
+
Figure 6: Training ShuffleNet-v2 on ImageNet with πours,r and πours[no-promo],r. (a) Training trajectories of πours[no-promo],r
|
1196 |
+
for different r; colors denote r values (darker for smaller r). (b) Top-5 overflow ratios of tensors at each epoch, for the
|
1197 |
+
highlighted trajectory in (a); the largest ratio is blue and the fifth largest red. (c) Memory-accuracy tradeoffs of πours,r
|
1198 |
+
and πours[no-promo],r. (d) Low-precision ratio when training ends vs. when training starts, for πours,r and πours[no-promo],r.
|
1199 |
+
The results on CIFAR-10 are in Appendix C.2.
|
1200 |
+
size of tensors promoted to high precision is small for all r
|
1201 |
+
values. See Appendix C.2 for similar results for CIFAR-10.
|
1202 |
+
5.5
|
1203 |
+
Choosing the value of r
|
1204 |
+
The time and space savings of our method are most signif-
|
1205 |
+
icant when a model is regularly retrained, which commonly
|
1206 |
+
occurs when new data is periodically incorporated into
|
1207 |
+
an existing model. Assuming that new data has a similar
|
1208 |
+
distribution to existing data, we can choose a single r (a
|
1209 |
+
parameter in our method) by conducting one set of exper-
|
1210 |
+
iments where we train with πfp32 and πours,r for different r
|
1211 |
+
and then choose the r value that maximizes model aggregate
|
1212 |
+
savings while still having an acceptable drop in accuracy.
|
1213 |
+
To simulate this scenario, we create five datasets ImageNet-
|
1214 |
+
200-i (i ∈ [5]) as follows, so that each of them contains
|
1215 |
+
different but similar data: randomly select 1/5 of the classes
|
1216 |
+
in ImageNet (which has 1000 classes in total), and split the
|
1217 |
+
training data of each class evenly into five new datasets.
|
1218 |
+
For each ImageNet-200-i, we train ShuffleNet-v2 with
|
1219 |
+
πfp32 and πours,r and present the results in Figure 7. Based
|
1220 |
+
on the tradeoff results of πours,r, we can choose r = 0.4 if
|
1221 |
+
we desire an average of < 1% accuracy drop from πfp32, and
|
1222 |
+
we can choose r = 0.9 if an average ≈ 3% accuracy drop
|
1223 |
+
is tolerable. We make two more observations: the tradeoff
|
1224 |
+
result of πours,r is similar across all five datasets even
|
1225 |
+
though each dataset is different, and for each r the variance
|
1226 |
+
Figure 7:
|
1227 |
+
Memory-accuracy tradeoffs of πours,r for
|
1228 |
+
ShuffleNet-v2 on ImageNet-200-i (i ∈ [5]).
|
1229 |
+
in the accuracy of πours,r from different datasets and runs of
|
1230 |
+
training is similar to that of πfp32. Thus we expect that on
|
1231 |
+
a new but similar dataset, πours,r would have an accuracy
|
1232 |
+
drop similar to Figure 7 with acceptable variance.
|
1233 |
+
6
|
1234 |
+
LIMITATIONS AND FUTURE WORK
|
1235 |
+
Our work has the same limitation present in prior works
|
1236 |
+
on low-precision floating-point training: as 8-bit floats and
|
1237 |
+
operations are not handled natively in hardware, but rather
|
1238 |
+
simulated in software, we cannot directly measure the poten-
|
1239 |
+
tial speedup of our method, though we do expect speedups
|
1240 |
+
to be proportional to the reduction in the model aggregate.
|
1241 |
+
We leave it as future work to perform such experiments
|
1242 |
+
on future hardware (e.g., NVIDIA’s H100) that natively
|
1243 |
+
supports more low-precision formats. Another direction
|
1244 |
+
for future work is to integrate our method into systems for
|
1245 |
+
automatically optimizing deep learning computations (e.g.,
|
1246 |
+
(Jia et al., 2019; Unger et al., 2022)) to accelerate training.
|
1247 |
+
|
1248 |
+
72
|
1249 |
+
accuracy (%)
|
1250 |
+
70
|
1251 |
+
68
|
1252 |
+
test
|
1253 |
+
ours[inc]
|
1254 |
+
ours[rand]
|
1255 |
+
66
|
1256 |
+
ours
|
1257 |
+
0.00
|
1258 |
+
0.25
|
1259 |
+
0.50
|
1260 |
+
0.75
|
1261 |
+
1.00
|
1262 |
+
low-prec. ratio74-
|
1263 |
+
test accuracy (%)
|
1264 |
+
72
|
1265 |
+
70
|
1266 |
+
ours[inc]
|
1267 |
+
68
|
1268 |
+
ours[rand]
|
1269 |
+
ours
|
1270 |
+
0.00
|
1271 |
+
0.25
|
1272 |
+
0.50
|
1273 |
+
0.75
|
1274 |
+
1.00
|
1275 |
+
low-prec. ratio74
|
1276 |
+
(%)
|
1277 |
+
accuracy(
|
1278 |
+
72
|
1279 |
+
70
|
1280 |
+
test
|
1281 |
+
ours[inc]
|
1282 |
+
ours[rand]
|
1283 |
+
68
|
1284 |
+
ours
|
1285 |
+
0.00
|
1286 |
+
0.25
|
1287 |
+
0.50
|
1288 |
+
0.75
|
1289 |
+
1.00
|
1290 |
+
low-prec. ratio80
|
1291 |
+
60
|
1292 |
+
40
|
1293 |
+
20
|
1294 |
+
0
|
1295 |
+
20
|
1296 |
+
40
|
1297 |
+
60
|
1298 |
+
80
|
1299 |
+
epoch1.0
|
1300 |
+
0.8
|
1301 |
+
overflow ratio
|
1302 |
+
0.6
|
1303 |
+
0.4
|
1304 |
+
top-1
|
1305 |
+
top-2
|
1306 |
+
top-3
|
1307 |
+
0.2
|
1308 |
+
top-4
|
1309 |
+
top-5
|
1310 |
+
0.0
|
1311 |
+
0
|
1312 |
+
20
|
1313 |
+
40
|
1314 |
+
60
|
1315 |
+
80
|
1316 |
+
epoch80
|
1317 |
+
(%)
|
1318 |
+
60
|
1319 |
+
accuracy (
|
1320 |
+
40
|
1321 |
+
test
|
1322 |
+
20
|
1323 |
+
ours[no-promo]
|
1324 |
+
ours
|
1325 |
+
0
|
1326 |
+
0.00
|
1327 |
+
0.25
|
1328 |
+
0.50
|
1329 |
+
0.75
|
1330 |
+
1.00
|
1331 |
+
low-prec. ratio1.0
|
1332 |
+
low-prec. ratio (end)
|
1333 |
+
0.8
|
1334 |
+
0.6
|
1335 |
+
0.2
|
1336 |
+
ours[no-promo]
|
1337 |
+
ours
|
1338 |
+
0.0
|
1339 |
+
0.0
|
1340 |
+
0.2
|
1341 |
+
0.4
|
1342 |
+
0.6
|
1343 |
+
0.8
|
1344 |
+
1.0
|
1345 |
+
low-prec. ratio (start)64
|
1346 |
+
62
|
1347 |
+
茶
|
1348 |
+
60
|
1349 |
+
58
|
1350 |
+
fp32
|
1351 |
+
ours
|
1352 |
+
0.00
|
1353 |
+
0.25
|
1354 |
+
0.50
|
1355 |
+
0.75
|
1356 |
+
1.00
|
1357 |
+
low-prec. ratioTraining with Mixed-Precision Floating-Point Assignments
|
1358 |
+
REFERENCES
|
1359 |
+
Andersch,
|
1360 |
+
M.,
|
1361 |
+
Palmer,
|
1362 |
+
G.,
|
1363 |
+
Krashinsky,
|
1364 |
+
R.,
|
1365 |
+
Stam,
|
1366 |
+
N.,
|
1367 |
+
Mehta,
|
1368 |
+
V.,
|
1369 |
+
Brito,
|
1370 |
+
G.,
|
1371 |
+
and
|
1372 |
+
Ramaswamy,
|
1373 |
+
S.
|
1374 |
+
NVIDIA
|
1375 |
+
Hopper
|
1376 |
+
Architecture
|
1377 |
+
In-Depth.
|
1378 |
+
https://developer.nvidia.com/blog/
|
1379 |
+
nvidia-hopper-architecture-in-depth/,
|
1380 |
+
2022.
|
1381 |
+
Banner, R., Hubara, I., Hoffer, E., and Soudry, D. Scalable
|
1382 |
+
methods for 8-bit training of neural networks.
|
1383 |
+
In
|
1384 |
+
NeurIPS, pp. 5151–5159, 2018.
|
1385 |
+
Björck, J., Chen, X., De Sa, C., Gomes, C. P., and
|
1386 |
+
Weinberger, K. Low-Precision Reinforcement Learning:
|
1387 |
+
Running Soft Actor-Critic in Half Precision. In ICML,
|
1388 |
+
pp. 980–991, 2021.
|
1389 |
+
Cambier, L., Bhiwandiwalla, A., Gong, T., Elibol, O. H.,
|
1390 |
+
Nekuii, M., and Tang, H. Shifted and Squeezed 8-bit
|
1391 |
+
Floating Point format for Low-Precision Training of
|
1392 |
+
Deep Neural Networks. In ICLR, 2020.
|
1393 |
+
Chiang, W., Baranowski, M., Briggs, I., Solovyev, A.,
|
1394 |
+
Gopalakrishnan, G., and Rakamaric, Z.
|
1395 |
+
Rigorous
|
1396 |
+
floating-point mixed-precision tuning.
|
1397 |
+
In POPL, pp.
|
1398 |
+
300–315, 2017.
|
1399 |
+
Chmiel, B., Ben-Uri, L., Shkolnik, M., Hoffer, E., Banner,
|
1400 |
+
R., and Soudry, D. Neural gradients are near-lognormal:
|
1401 |
+
improved quantized and sparse training. In ICLR, 2021.
|
1402 |
+
Choi, J., Wang, Z., Venkataramani, S., Chuang, P. I., Srini-
|
1403 |
+
vasan, V., and Gopalakrishnan, K. PACT: Parameterized
|
1404 |
+
Clipping Activation for Quantized Neural Networks.
|
1405 |
+
arXiv:1805.06085, 2018.
|
1406 |
+
Courbariaux, M., Bengio, Y., and David, J.-P. BinaryCon-
|
1407 |
+
nect:
|
1408 |
+
Training Deep Neural Networks with binary
|
1409 |
+
weights during propagations. In NeurIPS, pp. 3123–3131,
|
1410 |
+
2015.
|
1411 |
+
Das, D., Mellempudi, N., Mudigere, D., Kalamkar, D.,
|
1412 |
+
Avancha, S., Banerjee, K., Sridharan, S., Vaidyanathan,
|
1413 |
+
K., Kaul, B., Georganas, E., Heinecke, A., Dubey, P.,
|
1414 |
+
Corbal, J., Shustrov, N., Dubtsov, R., Fomenko, E., and
|
1415 |
+
Pirogov, V. Mixed Precision Training of Convolutional
|
1416 |
+
Neural Networks using Integer Operations. In ICLR,
|
1417 |
+
2018.
|
1418 |
+
Drumond, M., LIN, T., Jaggi, M., and Falsafi, B. Training
|
1419 |
+
DNNs with Hybrid Block Floating Point. In NeurIPS,
|
1420 |
+
pp. 451–461, 2018.
|
1421 |
+
Fox, S., Rasoulinezhad, S., Faraone, J., david boland, and
|
1422 |
+
Leong, P. A Block Minifloat Representation for Training
|
1423 |
+
Deep Neural Networks. In ICLR, 2021.
|
1424 |
+
Gholami, A., Kim, S., Dong, Z., Yao, Z., Mahoney, M. W.,
|
1425 |
+
and Keutzer, K.
|
1426 |
+
A Survey of Quantization Methods
|
1427 |
+
for Efficient Neural Network Inference. In Low-Power
|
1428 |
+
Computer Vision: Improving the Efficiency of Artificial
|
1429 |
+
Intelligence, pp. 291–326. CRC Press, 2022.
|
1430 |
+
URL
|
1431 |
+
https://arxiv.org/abs/2103.13630.
|
1432 |
+
Guo, H. and Rubio-González, C. Exploiting Community
|
1433 |
+
Structure for Floating-Point Precision Tuning. In ISSTA,
|
1434 |
+
pp. 333–343, 2018.
|
1435 |
+
Gupta, S., Agrawal, A., Gopalakrishnan, K., and Narayanan,
|
1436 |
+
P. Deep Learning with Limited Numerical Precision. In
|
1437 |
+
ICML, pp. 1737–1746, 2015.
|
1438 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep Residual Learn-
|
1439 |
+
ing for Image Recognition. In CVPR, pp. 770–778, 2016.
|
1440 |
+
Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D.,
|
1441 |
+
Wang, W., Weyand, T., Andreetto, M., and Adam, H.
|
1442 |
+
MobileNets: Efficient Convolutional Neural Networks
|
1443 |
+
for Mobile Vision Applications. arXiv:1704.04861, 2017.
|
1444 |
+
Iandola, F. N., Moskewicz, M. W., Ashraf, K., Han, S.,
|
1445 |
+
Dally, W. J., and Keutzer, K. SqueezeNet: AlexNet-level
|
1446 |
+
accuracy with 50x fewer parameters and <1MB model
|
1447 |
+
size. arXiv:1602.07360, 2016.
|
1448 |
+
Jacob, B., Kligys, S., Chen, B., Zhu, M., Tang, M., Howard,
|
1449 |
+
A., Adam, H., and Kalenichenko, D.
|
1450 |
+
Quantization
|
1451 |
+
and Training of Neural Networks for Efficient Integer-
|
1452 |
+
Arithmetic-Only Inference. In CVPR, pp. 2704–2713,
|
1453 |
+
2018.
|
1454 |
+
Jia, Z., Zaharia, M., and Aiken, A. Beyond Data and Model
|
1455 |
+
Parallelism for Deep Neural Networks. In MLSys, pp.
|
1456 |
+
1–13, 2019.
|
1457 |
+
Kalamkar, D. D., Mudigere, D., Mellempudi, N., Das,
|
1458 |
+
D., Banerjee, K., Avancha, S., Vooturi, D. T., Jammala-
|
1459 |
+
madaka, N., Huang, J., Yuen, H., Yang, J., Park, J.,
|
1460 |
+
Heinecke, A., Georganas, E., Srinivasan, S., Kundu,
|
1461 |
+
A., Smelyanskiy, M., Kaul, B., and Dubey, P.
|
1462 |
+
A
|
1463 |
+
Study of BFLOAT16 for Deep Learning Training.
|
1464 |
+
arXiv:1905.12322, 2019.
|
1465 |
+
Karp, R. M. Reducibility Among Combinatorial Problems.
|
1466 |
+
In Complexity of Computer Computations, pp. 85–103,
|
1467 |
+
1972.
|
1468 |
+
Krizhevsky, A.
|
1469 |
+
Learning multiple layers of features
|
1470 |
+
from tiny images.
|
1471 |
+
Technical report, 2009.
|
1472 |
+
URL
|
1473 |
+
https://www.cs.toronto.edu/~kriz/
|
1474 |
+
learning-features-2009-TR.pdf.
|
1475 |
+
Ma, N., Zhang, X., Zheng, H.-T., and Sun, J. ShuffleNet
|
1476 |
+
V2: Practical Guidelines for Efficient CNN Architecture
|
1477 |
+
Design. In ECCV, pp. 122–138, 2018.
|
1478 |
+
|
1479 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1480 |
+
Menon, H., Lam, M. O., Osei-Kuffuor, D., Schordan,
|
1481 |
+
M., Lloyd, S., Mohror, K., and Hittinger, J. ADAPT:
|
1482 |
+
Algorithmic Differentiation Applied to Floating-Point
|
1483 |
+
Precision Tuning. In SC, pp. 48:1–48:13, 2018.
|
1484 |
+
Micikevicius, P., Narang, S., Alben, J., Diamos, G., Elsen,
|
1485 |
+
E., Garcia, D., Ginsburg, B., Houston, M., Kuchaiev, O.,
|
1486 |
+
Venkatesh, G., and Wu, H. Mixed Precision Training. In
|
1487 |
+
ICLR, 2018.
|
1488 |
+
Nagel, M., Fournarakis, M., Amjad, R. A., Bondarenko, Y.,
|
1489 |
+
van Baalen, M., and Blankevoort, T. A White Paper on
|
1490 |
+
Neural Network Quantization. arXiv:2106.08295, 2021.
|
1491 |
+
Nvidia.
|
1492 |
+
Documentation of apex.amp.
|
1493 |
+
https:
|
1494 |
+
//nvidia.github.io/apex/amp.html, 2019.
|
1495 |
+
Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury,
|
1496 |
+
J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N.,
|
1497 |
+
Antiga, L., Desmaison, A., Kopf, A., Yang, E., DeVito,
|
1498 |
+
Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner,
|
1499 |
+
B., Fang, L., Bai, J., and Chintala, S. PyTorch: An
|
1500 |
+
Imperative Style, High-Performance Deep Learning
|
1501 |
+
Library. In NeurIPS, pp. 8024–8035, 2019.
|
1502 |
+
PyTorch.
|
1503 |
+
Documentation
|
1504 |
+
of
|
1505 |
+
torch.amp.
|
1506 |
+
https://pytorch.org/docs/stable/amp.
|
1507 |
+
html#gradient-scaling, 2022.
|
1508 |
+
Qin, H., Ding, Y., Fan, W., Leff, C., Bahri, M., and Shaw,
|
1509 |
+
E. Awesome Model Quantization. https://github.
|
1510 |
+
com/htqin/awesome-model-quantization,
|
1511 |
+
2022.
|
1512 |
+
Rajagopal, A., Vink, D. A., Venieris, S. I., and Bouganis, C.-
|
1513 |
+
S. Multi-Precision Policy Enforced Training (MuPPET):
|
1514 |
+
A Precision-Switching Strategy for Quantised Fixed-
|
1515 |
+
Point Training of CNNs. In ICML, pp. 7943–7952, 2020.
|
1516 |
+
Rubio-González, C., Nguyen, C., Nguyen, H. D., Demmel,
|
1517 |
+
J., Kahan, W., Sen, K., Bailey, D. H., Iancu, C.,
|
1518 |
+
and Hough, D.
|
1519 |
+
Precimonious: Tuning Assistant for
|
1520 |
+
Floating-Point Precision. In SC, pp. 27:1–27:12, 2013.
|
1521 |
+
Rubio-González, C., Nguyen, C., Mehne, B., Sen, K.,
|
1522 |
+
Demmel, J., Kahan, W., Iancu, C., Lavrijsen, W., Bailey,
|
1523 |
+
D. H., and Hough, D. Floating-Point Precision Tuning
|
1524 |
+
Using Blame Analysis. In ICSE, pp. 1074–1085, 2016.
|
1525 |
+
Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S.,
|
1526 |
+
Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein,
|
1527 |
+
M., Berg, A. C., and Fei-Fei, L. ImageNet Large Scale
|
1528 |
+
Visual Recognition Challenge. International Journal of
|
1529 |
+
Computer Vision, 115(3):211–252, 2015.
|
1530 |
+
Sa, C. D., Leszczynski, M., Zhang, J., Marzoev, A.,
|
1531 |
+
Aberger, C. R., Olukotun, K., and Ré, C. High-Accuracy
|
1532 |
+
Low-Precision Training. arXiv:1803.03383, 2018.
|
1533 |
+
Sakr, C. and Shanbhag, N. Per-Tensor Fixed-Point Quantiza-
|
1534 |
+
tion of the Back-Propagation Algorithm. In ICLR, 2019.
|
1535 |
+
Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., and
|
1536 |
+
Chen, L.-C.
|
1537 |
+
MobileNetV2: Inverted Residuals and
|
1538 |
+
Linear Bottlenecks. In CVPR, pp. 4510–4520, 2018.
|
1539 |
+
Sun, X., Choi, J., Chen, C.-Y., Wang, N., Venkataramani,
|
1540 |
+
S., Srinivasan, V. V., Cui, X., Zhang, W., and Gopalakr-
|
1541 |
+
ishnan, K. Hybrid 8-bit Floating Point (HFP8) Training
|
1542 |
+
and Inference for Deep Neural Networks. In NeurIPS,
|
1543 |
+
pp. 4901–4910, 2019.
|
1544 |
+
Sun, X., Wang, N., Chen, C.-Y., Ni, J., Agrawal, A., Cui,
|
1545 |
+
X., Venkataramani, S., El Maghraoui, K., Srinivasan,
|
1546 |
+
V. V., and Gopalakrishnan, K. Ultra-Low Precision 4-bit
|
1547 |
+
Training of Deep Neural Networks.
|
1548 |
+
In NeurIPS, pp.
|
1549 |
+
1796–1807, 2020.
|
1550 |
+
Unger, C., Jia, Z., Wu, W., Lin, S., Baines, M., Narvaez, C.
|
1551 |
+
E. Q., Ramakrishnaiah, V., Prajapati, N., McCormick, P.,
|
1552 |
+
Mohd-Yusof, J., Luo, X., Mudigere, D., Park, J., Smelyan-
|
1553 |
+
skiy, M., and Aiken, A. Unity: Accelerating DNN Train-
|
1554 |
+
ing Through Joint Optimization of Algebraic Transforma-
|
1555 |
+
tions and Parallelization. In OSDI, pp. 267–284, 2022.
|
1556 |
+
Wang, N., Choi, J., Brand, D., Chen, C.-Y., and Gopalakrish-
|
1557 |
+
nan, K. Training Deep Neural Networks with 8-bit Float-
|
1558 |
+
ing Point Numbers. In NeurIPS, pp. 7686–7695, 2018.
|
1559 |
+
Wu, S., Li, G., Chen, F., and Shi, L. Training and Inference
|
1560 |
+
with Integers in Deep Neural Networks. In ICLR, 2018.
|
1561 |
+
Yang, C., Wu, Z., Chee, J., Sa, C. D., and Udell, M.
|
1562 |
+
How Low Can We Go: Trading Memory for Error in
|
1563 |
+
Low-Precision Training. In ICLR, 2022.
|
1564 |
+
Yang, G., Zhang, T., Kirichenko, P., Bai, J., Wilson, A. G.,
|
1565 |
+
and De Sa, C. SWALP : Stochastic Weight Averaging in
|
1566 |
+
Low Precision Training. In ICML, pp. 7015–7024, 2019a.
|
1567 |
+
Yang, J., Shen, X., Xing, J., Tian, X., Li, H., Deng, B.,
|
1568 |
+
Huang, J., and Hua, X.-s. Quantization Networks. In
|
1569 |
+
CVPR, pp. 7308–7316, 2019b.
|
1570 |
+
Zamirai, P., Zhang, J., Aberger, C. R., and Sa, C. D.
|
1571 |
+
Revisiting BFloat16 Training. arXiv:2010.06192, 2020.
|
1572 |
+
Zhang, T., Lin, Z., Yang, G., and Sa, C. D. QPyTorch:
|
1573 |
+
A Low-Precision Arithmetic Simulation Framework.
|
1574 |
+
arXiv:1910.04540, 2019.
|
1575 |
+
Zhang, X., Liu, S., Zhang, R., Liu, C., Huang, D., Zhou,
|
1576 |
+
S., Guo, J., Guo, Q., Du, Z., Zhi, T., and Chen, Y.
|
1577 |
+
Fixed-Point Back-Propagation Training. In CVPR, pp.
|
1578 |
+
2327–2335, 2020.
|
1579 |
+
|
1580 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1581 |
+
Zhou, S., Ni, Z., Zhou, X., Wen, H., Wu, Y., and Zou,
|
1582 |
+
Y.
|
1583 |
+
DoReFa-Net:
|
1584 |
+
Training Low Bitwidth Convolu-
|
1585 |
+
tional Neural Networks with Low Bitwidth Gradients.
|
1586 |
+
arXiv:1606.06160, 2016.
|
1587 |
+
|
1588 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1589 |
+
A
|
1590 |
+
PROBLEM: DEFERRED PROOF
|
1591 |
+
Theorem 3.2. Problem 3.1 is NP-hard.
|
1592 |
+
Proof. We prove the NP-hardness of Problem 3.1 (the
|
1593 |
+
memory-accuracy tradeoff problem) by reducing the
|
1594 |
+
knapsack problem (which is NP-hard) to the tradeoff
|
1595 |
+
problem.
|
1596 |
+
More precisely, we prove that the knapsack
|
1597 |
+
problem can be solved in polynomial time if we assume
|
1598 |
+
an oracle for the tradeoff problem.
|
1599 |
+
Recall the knapsack problem: given n items with weights
|
1600 |
+
wi ∈ N and profits pi ∈ N (i ∈ [n]), and given a threshold
|
1601 |
+
W ∈ N, decide which items to choose such that the total
|
1602 |
+
profit of the chosen items is maximized while their total
|
1603 |
+
weight does not exceed W. That is, find α ∈ {0, 1}n that
|
1604 |
+
maximizes �
|
1605 |
+
i∈[n] αipi subject to �
|
1606 |
+
i∈[n] αiwi ≤ W. This
|
1607 |
+
problem is well-known to be NP-hard (Karp, 1972).
|
1608 |
+
Given an instance of the knapsack problem (w, p, W), we
|
1609 |
+
construct an instance of the tradeoff problem as follows:
|
1610 |
+
• Notations.
|
1611 |
+
The following construct uses a constant
|
1612 |
+
k ∈ N and floating-point formats fphi, fplo ∈ FP (one
|
1613 |
+
for high precision and the other for low precision).
|
1614 |
+
Below we will specify the conditions they should satisfy,
|
1615 |
+
and show that some k, fphi, and fplo indeed satisfy the
|
1616 |
+
conditions. We write rndhi(·) and rndlo(·) as shorthand
|
1617 |
+
for rndfphi(·) and rndfplo(·).
|
1618 |
+
• Training setups. We consider a very simple setting for
|
1619 |
+
training: the gradient descent algorithm with a learning
|
1620 |
+
rate η = 2−l (l ∈ N) is applied for just one epoch; all
|
1621 |
+
parameters are initialized to 0 and their master copies
|
1622 |
+
are represented in fphi; and the negative loss of a model
|
1623 |
+
on training data (i.e., −L(fθ(x), y) using notations to
|
1624 |
+
be described below) is used as the accuracy of the model.
|
1625 |
+
Here l ∈ N can be any natural number.
|
1626 |
+
• Model and loss networks. A model network M and
|
1627 |
+
a loss network L are given as Figure 8, where M has n
|
1628 |
+
parameter tensors θi ∈ Rwi of size wi (i ∈ [n]). For an
|
1629 |
+
input-output pair (x, y) ∈ Rn × R, M and L compute a
|
1630 |
+
predicted output fθ(x) ∈ R and a loss L(fθ(x), y) ∈ R
|
1631 |
+
as follows (assuming that no rounding functions are
|
1632 |
+
applied):
|
1633 |
+
fθ(x) =
|
1634 |
+
∆ �
|
1635 |
+
i∈[n]
|
1636 |
+
�
|
1637 |
+
j∈[wi]
|
1638 |
+
θi,jxi,
|
1639 |
+
L(fθ(x), y) =
|
1640 |
+
∆ 2−k|fθ(x) − y|.
|
1641 |
+
Roughly speaking, M is (a variant of) a linear classifier
|
1642 |
+
and L is a ℓ1-loss (scaled by 2−k).
|
1643 |
+
• Training data. Training data consists of a single input-
|
1644 |
+
output pair (x, y) ∈ Rn × R that satisfies the following:
|
1645 |
+
xi = rndlo(
|
1646 |
+
�
|
1647 |
+
pi/wi),
|
1648 |
+
y < −2−(k+l) �
|
1649 |
+
i∈[n]
|
1650 |
+
wix2
|
1651 |
+
i
|
1652 |
+
for all i ∈ [n]. Here y can take any value as long as
|
1653 |
+
it satisfies the above inequality. Note that xi can be
|
1654 |
+
different from
|
1655 |
+
�
|
1656 |
+
pi/wi since the latter value may not
|
1657 |
+
be representable in fplo.
|
1658 |
+
• Precision-candidate
|
1659 |
+
assignment.
|
1660 |
+
A
|
1661 |
+
precision-
|
1662 |
+
candidate assignment C : TS×{hi, lo} → FP is given as:
|
1663 |
+
C(t, hi) =
|
1664 |
+
∆ fphi,
|
1665 |
+
C(t, lo) =
|
1666 |
+
∆ fplo
|
1667 |
+
for all t ∈ TS.
|
1668 |
+
That is, for all tensors, fphi is used as a high-precision
|
1669 |
+
format and fphi as a low-precision format. Here fphi and
|
1670 |
+
fplo should satisfy the following:
|
1671 |
+
ehi ≥ elo,
|
1672 |
+
mhi ≥ mlo,
|
1673 |
+
(3)
|
1674 |
+
|rndlo(s) − s| < |s| · err
|
1675 |
+
for all s ∈ S1,
|
1676 |
+
(4)
|
1677 |
+
rndlo(s) = 0
|
1678 |
+
for all s ∈ S2,
|
1679 |
+
(5)
|
1680 |
+
rndhi(s) = s
|
1681 |
+
for all s ∈ S2 ∪ S3,
|
1682 |
+
(6)
|
1683 |
+
where ehi and mhi (and elo and mlo) denote the number
|
1684 |
+
of exponent bits and mantissa bits of fphi (and fplo), and
|
1685 |
+
err =
|
1686 |
+
∆ 1/(6n · maxi∈[n]pi),
|
1687 |
+
S1 =
|
1688 |
+
∆ {
|
1689 |
+
�
|
1690 |
+
pi/wi | i ∈ [n]},
|
1691 |
+
S2 =
|
1692 |
+
∆ {2−k} ∪ {2−kxi | i ∈ [n]},
|
1693 |
+
S3 =
|
1694 |
+
∆ {2−(k+l)xi | i ∈ [n]}.
|
1695 |
+
Eq. (4) says that the relative error of representing each
|
1696 |
+
s ∈ S1 in fplo should be less than err. Eq. (5) says that
|
1697 |
+
each s ∈ S2 should underflow to 0 when represented
|
1698 |
+
in fplo. Eq. (6) says that each s ∈ S2 ∪ S3 should be
|
1699 |
+
representable in fphi.
|
1700 |
+
• Low-precision ratio. A lower bound r ∈ [0, 1] on the
|
1701 |
+
low-precision ratio is given as:
|
1702 |
+
r =
|
1703 |
+
∆ max
|
1704 |
+
�
|
1705 |
+
0, 1 − 2W + 1
|
1706 |
+
size(TS)
|
1707 |
+
�
|
1708 |
+
∈ [0, 1].
|
1709 |
+
So r decreases linearly as W increases.
|
1710 |
+
We make three points on the above construction.
|
1711 |
+
• First, each part of the knapsack problem (w, p, W) is
|
1712 |
+
used in the following parts of the construction: wi is
|
1713 |
+
used mainly in the size of the parameter tensor θi; pi
|
1714 |
+
in the input xi; and W in the lower bound r.
|
1715 |
+
• Second, there exist k ∈ N and fphi, fplo ∈ FP that sat-
|
1716 |
+
isfy Eqs. (3)–(6). This can be shown as follows: first,
|
1717 |
+
by taking sufficiently many exponent and mantissa bits
|
1718 |
+
for fplo, we can make Eq. (4) satisfied; next, by taking a
|
1719 |
+
sufficiently large k, we can make Eq. (5) satisfied; finally,
|
1720 |
+
by taking sufficiently many exponent and mantissa bits
|
1721 |
+
for fphi, we can make Eq. (3) and Eq. (6) satisfied (since
|
1722 |
+
xi is representable in fplo and 2−(k+l) is a power of two).
|
1723 |
+
• Third, some well-known models (e.g., ShuffleNet-v2)
|
1724 |
+
have a similar structure to M in that they apply the fol-
|
1725 |
+
lowing operations as a subroutine: split a tensor into mul-
|
1726 |
+
tiple tensors, apply some operators to each split tensor,
|
1727 |
+
and combine the resulting tensors into a single tensor.
|
1728 |
+
|
1729 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1730 |
+
⫶
|
1731 |
+
𝑥! !∈ #
|
1732 |
+
.∑!∈ # ∑$∈ %! 𝑣#&!,$.
|
1733 |
+
2() 𝑣*#&+ − 𝑦
|
1734 |
+
𝑣,,+
|
1735 |
+
𝜃+,$ ⋅ 𝑣+ .$∈ %"
|
1736 |
+
𝑣,,#
|
1737 |
+
𝜃+,$ .$∈ %"
|
1738 |
+
𝜃#,$ ⋅ 𝑣# .$∈ %#
|
1739 |
+
𝜃#,$ .$∈ %#
|
1740 |
+
𝑦
|
1741 |
+
𝑣,∈ℝ#
|
1742 |
+
𝑣+∈ℝ
|
1743 |
+
𝑣#∈ℝ
|
1744 |
+
𝜃#∈ℝ%#
|
1745 |
+
𝜃+∈ℝ%"
|
1746 |
+
𝑣*#∈ℝ%#
|
1747 |
+
𝑣#&+∈ℝ%"
|
1748 |
+
𝑣*#&+∈ℝ
|
1749 |
+
𝑣*#&*∈ℝ
|
1750 |
+
𝑦∈ℝ
|
1751 |
+
⫶
|
1752 |
+
⫶
|
1753 |
+
split
|
1754 |
+
conv
|
1755 |
+
conv
|
1756 |
+
sum
|
1757 |
+
loss
|
1758 |
+
ℳ
|
1759 |
+
ℒ
|
1760 |
+
Figure 8: The model network M and the loss network L used in the proof of Theorem 3.2.
|
1761 |
+
We now prove that the knapsack problem (w, p, W) can
|
1762 |
+
be solved in polynomial time, if an oracle to the above
|
1763 |
+
tradeoff problem is given. Suppose that π ∈ Π(C) is an
|
1764 |
+
optimal solution to the above tradeoff problem (given by
|
1765 |
+
the oracle). Define an item selection α ∈ {0, 1}n for the
|
1766 |
+
knapsack problem as:
|
1767 |
+
αi =
|
1768 |
+
∆
|
1769 |
+
�
|
1770 |
+
1
|
1771 |
+
if π(dθi) = π(dv n+i) = π(dv 2n+1) = fphi
|
1772 |
+
0
|
1773 |
+
otherwise
|
1774 |
+
for each i ∈ [n]. Note that α can be constructed from π in
|
1775 |
+
linear time. Thus, it suffices to show that α is an optimal
|
1776 |
+
solution to the knapsack problem (w, p, W), which is
|
1777 |
+
equivalent to the following two claims:
|
1778 |
+
• Claim 1: We have �
|
1779 |
+
i∈[n] αiwi ≤ W.
|
1780 |
+
• Claim 2: For any α′ ∈ {0, 1}n with �
|
1781 |
+
i∈[n] α′
|
1782 |
+
iwi ≤ W,
|
1783 |
+
we have �
|
1784 |
+
i∈[n] α′
|
1785 |
+
ipi ≤ �
|
1786 |
+
i∈[n] αipi.
|
1787 |
+
We now prove each claim as follows.
|
1788 |
+
Proof of Claim 1. If α = (0, · · · , 0), then the claim clearly
|
1789 |
+
holds. Suppose that α ̸= (0, · · · , 0). Then, we have
|
1790 |
+
1 −
|
1791 |
+
1 + 2 �
|
1792 |
+
i∈[n] αiwi
|
1793 |
+
size(TS)
|
1794 |
+
≥ ratiolo(π)
|
1795 |
+
≥ r ≥ 1 − 1 + 2W
|
1796 |
+
size(TS).
|
1797 |
+
Here the first inequality uses α ̸= (0, · · · , 0) and the defini-
|
1798 |
+
tion of α and M; the second inequality uses the fact that π is
|
1799 |
+
a valid solution to the above tradeoff problem; and the third
|
1800 |
+
inequality uses the definition of r. Hence, the claim holds.
|
1801 |
+
Proof of Claim 2. Suppose that the claim does not hold.
|
1802 |
+
Then, there exists α′ ∈ {0, 1}n such that
|
1803 |
+
�
|
1804 |
+
i∈[n]
|
1805 |
+
α′
|
1806 |
+
iwi ≤ W,
|
1807 |
+
�
|
1808 |
+
i∈[n]
|
1809 |
+
α′
|
1810 |
+
ipi >
|
1811 |
+
�
|
1812 |
+
i∈[n]
|
1813 |
+
αipi.
|
1814 |
+
Define a precision assignment π′ ∈ Π(C) as:
|
1815 |
+
π′(dv 2n+1) =
|
1816 |
+
∆ fphi,
|
1817 |
+
π′(dθi) =
|
1818 |
+
∆ π′(dv n+i) =
|
1819 |
+
∆ fphi
|
1820 |
+
for all i ∈ [n] with α′
|
1821 |
+
i = 1,
|
1822 |
+
π′(t) =
|
1823 |
+
∆ fplo
|
1824 |
+
for all other t ∈ TS.
|
1825 |
+
Then, we have ratiolo(π′) ≥ r by �
|
1826 |
+
i∈[n] α′
|
1827 |
+
iwi ≤ W and
|
1828 |
+
the definition of π′, M, and r. Hence, it suffices to show
|
1829 |
+
acc(π) < acc(π′), because this would contradict to the fact
|
1830 |
+
that π is an optimal solution.
|
1831 |
+
To show acc(π) < acc(π′), we prove the following two
|
1832 |
+
lemmas: the first lemma gives a closed form of acc(π) and
|
1833 |
+
acc(π′), and the second lemma shows that �
|
1834 |
+
i∈[n] βiwix2
|
1835 |
+
i
|
1836 |
+
is close to �
|
1837 |
+
i∈[n] βipi (where the former summation
|
1838 |
+
appears in acc(π) and acc(π′)).
|
1839 |
+
Lemma A.1. The following hold:
|
1840 |
+
acc(π) = 2−ky + 2−(2k+l) �
|
1841 |
+
i∈[n]
|
1842 |
+
αiwix2
|
1843 |
+
i ,
|
1844 |
+
acc(π′) = 2−ky + 2−(2k+l) �
|
1845 |
+
i∈[n]
|
1846 |
+
α′
|
1847 |
+
iwix2
|
1848 |
+
i .
|
1849 |
+
Proof. We prove the equation for acc(π) only, since the
|
1850 |
+
equation for acc(π′) can be proved similarly.
|
1851 |
+
First, we show that for all i ∈ [n] and j ∈ [wi],
|
1852 |
+
ˆ
|
1853 |
+
dθi,j = αi · 2−kxi.
|
1854 |
+
(7)
|
1855 |
+
Pick any i ∈ [n] and j ∈ [wi]. Note that by the definition
|
1856 |
+
of M, we have
|
1857 |
+
ˆ
|
1858 |
+
dθi,j = rndπ(dθi)
|
1859 |
+
�
|
1860 |
+
rndπ(dv n+i)(rndπ(dv 2n+1)(2−k))
|
1861 |
+
· rndvi(rndv0(xi))
|
1862 |
+
�
|
1863 |
+
= rndπ(dθi)
|
1864 |
+
�
|
1865 |
+
rndπ(dv n+i)(rndπ(dv 2n+1)(2−k)) · xi
|
1866 |
+
�
|
1867 |
+
,
|
1868 |
+
where the second equality uses Eq. (3) and that xi is
|
1869 |
+
representable in fplo. We prove Eq. (7) by case analysis
|
1870 |
+
on αi. Suppose αi = 1. Then, by the definition of αi,
|
1871 |
+
|
1872 |
+
Training with Mixed-Precision Floating-Point Assignments
|
1873 |
+
π(dθi) = π(dv n+i) = π(dv 2n+1) = fphi. From this, we
|
1874 |
+
get the desired equation:
|
1875 |
+
ˆ
|
1876 |
+
dθi,j = rndhi
|
1877 |
+
�
|
1878 |
+
rndhi(rndhi(2−k)) · xi
|
1879 |
+
�
|
1880 |
+
= rndhi(2−k · xi) = 2−kxi,
|
1881 |
+
where the last two equalities use Eq. (6). Suppose now
|
1882 |
+
αi = 0. Then, by the definition of αi, at least one of π(dθi),
|
1883 |
+
π(dv n+i), and π(dv 2n+1) is fplo. If π(dv n+i) = fplo or
|
1884 |
+
π(dv 2n+1) = fplo, we get the desired equation:
|
1885 |
+
ˆ
|
1886 |
+
dθi,j = rndπ(dθi)
|
1887 |
+
�
|
1888 |
+
rndlo(2−k) · xi
|
1889 |
+
�
|
1890 |
+
= rndπ(dθi)(0 · xi) = 0,
|
1891 |
+
where the first equation uses Eq. (3) and Eq. (6), and the
|
1892 |
+
second equation uses Eq. (5). The remaining case is when
|
1893 |
+
π(dv n+i) = π(dv 2n+1) = fphi and π(dθi) = fplo. We get
|
1894 |
+
the desired equation in this case as well:
|
1895 |
+
ˆ
|
1896 |
+
dθi,j = rndlo
|
1897 |
+
�
|
1898 |
+
rndhi(rndhi(2−k)) · xi
|
1899 |
+
�
|
1900 |
+
= rndlo(2−k · xi) = 0,
|
1901 |
+
where the second equality uses Eq. (6), and the last equality
|
1902 |
+
uses Eq. (5). Hence, we have proved Eq. (7).
|
1903 |
+
Next, let θi be the i-th parameter tensor before training
|
1904 |
+
starts, and θ′
|
1905 |
+
i be the corresponding tensor after training ends
|
1906 |
+
(i ∈ [n]). Then, by the definition of the tradeoff problem
|
1907 |
+
constructed above, we have θi,j = 0 and
|
1908 |
+
θ′
|
1909 |
+
i,j = θi,j − rndhi(2−l · ˆ
|
1910 |
+
dθi,j)
|
1911 |
+
= 0 − rndhi(2−l · (αi · 2−kxi))
|
1912 |
+
= αi · (−2−(k+l)xi),
|
1913 |
+
where the second equality uses Eq. (7) and the third equality
|
1914 |
+
uses Eq. (6). Using this equation, we finally obtain the
|
1915 |
+
conclusion of this lemma:
|
1916 |
+
acc(π) = −L(fθ′(x), y)
|
1917 |
+
= −2−k���y −
|
1918 |
+
�
|
1919 |
+
i∈[n]
|
1920 |
+
�
|
1921 |
+
j∈[wi]
|
1922 |
+
θ′
|
1923 |
+
i,jxi
|
1924 |
+
���
|
1925 |
+
= −2−k���y −
|
1926 |
+
�
|
1927 |
+
i∈[n]
|
1928 |
+
�
|
1929 |
+
j∈[wi]
|
1930 |
+
αi · (−2−(k+l)xi) · xi
|
1931 |
+
���
|
1932 |
+
= −2−k���y +
|
1933 |
+
�
|
1934 |
+
i∈[n]
|
1935 |
+
αi · 2−(k+l)wix2
|
1936 |
+
i
|
1937 |
+
���
|
1938 |
+
= 2−k�
|
1939 |
+
y +
|
1940 |
+
�
|
1941 |
+
i∈[n]
|
1942 |
+
αi · 2−(k+l)wix2
|
1943 |
+
i
|
1944 |
+
�
|
1945 |
+
= 2−ky + 2−(2k+l) �
|
1946 |
+
i∈[n]
|
1947 |
+
αiwix2
|
1948 |
+
i ,
|
1949 |
+
where the first two equalities use the definition of accuracy,
|
1950 |
+
and the second last equality uses the definition of y. This
|
1951 |
+
concludes the proof of the lemma.
|
1952 |
+
■
|
1953 |
+
Lemma A.2. For any β ∈ {0, 1}n,
|
1954 |
+
���
|
1955 |
+
�
|
1956 |
+
i∈[n]
|
1957 |
+
βiwix2
|
1958 |
+
i −
|
1959 |
+
�
|
1960 |
+
i∈[n]
|
1961 |
+
βipi
|
1962 |
+
��� < 1
|
1963 |
+
2.
|
1964 |
+
Proof. We first show that for any i ∈ [n],
|
1965 |
+
|wix2
|
1966 |
+
i − pi| < 1
|
1967 |
+
2n.
|
1968 |
+
Pick any i ∈ [n]. By Eq. (4) and the definition of xi, we have
|
1969 |
+
���xi −
|
1970 |
+
� pi
|
1971 |
+
wi
|
1972 |
+
��� <
|
1973 |
+
� pi
|
1974 |
+
wi
|
1975 |
+
·
|
1976 |
+
1
|
1977 |
+
6n · maxj∈[n] pj
|
1978 |
+
≤
|
1979 |
+
� pi
|
1980 |
+
wi
|
1981 |
+
·
|
1982 |
+
1
|
1983 |
+
6npi
|
1984 |
+
.
|
1985 |
+
From this, we have
|
1986 |
+
� pi
|
1987 |
+
wi
|
1988 |
+
�
|
1989 |
+
1 −
|
1990 |
+
1
|
1991 |
+
6npi
|
1992 |
+
�
|
1993 |
+
< xi <
|
1994 |
+
� pi
|
1995 |
+
wi
|
1996 |
+
�
|
1997 |
+
1 +
|
1998 |
+
1
|
1999 |
+
6npi
|
2000 |
+
�
|
2001 |
+
,
|
2002 |
+
pi
|
2003 |
+
wi
|
2004 |
+
�
|
2005 |
+
1 −
|
2006 |
+
1
|
2007 |
+
6npi
|
2008 |
+
�2
|
2009 |
+
< x2
|
2010 |
+
i < pi
|
2011 |
+
wi
|
2012 |
+
�
|
2013 |
+
1 +
|
2014 |
+
1
|
2015 |
+
6npi
|
2016 |
+
�2
|
2017 |
+
.
|
2018 |
+
From this, we obtain the desired result:
|
2019 |
+
|wix2
|
2020 |
+
i − pi| < pi
|
2021 |
+
��
|
2022 |
+
1 +
|
2023 |
+
1
|
2024 |
+
6npi
|
2025 |
+
�2
|
2026 |
+
− 1
|
2027 |
+
�
|
2028 |
+
= pi
|
2029 |
+
�
|
2030 |
+
1
|
2031 |
+
3npi
|
2032 |
+
+
|
2033 |
+
1
|
2034 |
+
(6npi)2
|
2035 |
+
�
|
2036 |
+
< pi
|
2037 |
+
�
|
2038 |
+
1
|
2039 |
+
3npi
|
2040 |
+
+
|
2041 |
+
1
|
2042 |
+
6npi
|
2043 |
+
�
|
2044 |
+
= pi ·
|
2045 |
+
1
|
2046 |
+
2npi
|
2047 |
+
= 1
|
2048 |
+
2n,
|
2049 |
+
where the second inequality uses 6npi > 1 (as n, pi ∈ N).
|
2050 |
+
Using this result, we can show the conclusion as follows:
|
2051 |
+
���
|
2052 |
+
�
|
2053 |
+
i∈[n]
|
2054 |
+
βiwix2
|
2055 |
+
i −
|
2056 |
+
�
|
2057 |
+
i∈[n]
|
2058 |
+
βipi
|
2059 |
+
��� =
|
2060 |
+
���
|
2061 |
+
�
|
2062 |
+
i∈[n]
|
2063 |
+
βi(wix2
|
2064 |
+
i − pi)
|
2065 |
+
���
|
2066 |
+
≤
|
2067 |
+
�
|
2068 |
+
i∈[n]
|
2069 |
+
|βi| · |wix2
|
2070 |
+
i − pi|
|
2071 |
+
<
|
2072 |
+
�
|
2073 |
+
i∈[n]
|
2074 |
+
1
|
2075 |
+
2n = 1
|
2076 |
+
2,
|
2077 |
+
where the last inequality uses |βi| ≤ 1. This completes the
|
2078 |
+
proof of the lemma.
|
2079 |
+
■
|
2080 |
+
Using the two lemmas, we prove acc(π) < acc(π′) as fol-
|
2081 |
+
lows. First, by Lemma A.2 and �
|
2082 |
+
i∈[n] αipi < �
|
2083 |
+
i∈[n] α′
|
2084 |
+
ipi,
|
2085 |
+
we have
|
2086 |
+
�
|
2087 |
+
i∈[n]
|
2088 |
+
αiwix2
|
2089 |
+
i <
|
2090 |
+
�
|
2091 |
+
i∈[n]
|
2092 |
+
αipi + 1
|
2093 |
+
2
|
2094 |
+
≤
|
2095 |
+
�
|
2096 |
+
i∈[n]
|
2097 |
+
α′
|
2098 |
+
ipi − 1
|
2099 |
+
2 <
|
2100 |
+
�
|
2101 |
+
i∈[n]
|
2102 |
+
α′
|
2103 |
+
iwix2
|
2104 |
+
i ,
|
2105 |
+
where the second inequality comes from αi, α′
|
2106 |
+
i ∈ {0, 1}
|
2107 |
+
and pi ∈ N. From this, and by Lemma A.1, we obtain
|
2108 |
+
acc(π) < acc(π′) as desired. This concludes the proof of
|
2109 |
+
Claim 2, thereby finishing the proof of the theorem.
|
2110 |
+
|
2111 |
+
Training with Mixed-Precision Floating-Point Assignments
|
2112 |
+
Remark A.3. In the proof of Theorem 3.2, we proved
|
2113 |
+
the NP-hardness of Problem 3.1 by making use of only a
|
2114 |
+
few limited aspects of the problem. For instance, we used
|
2115 |
+
the fact that some values representable in a high-precision
|
2116 |
+
format round to zero in a low-precision format; on the other
|
2117 |
+
hand, many other values representable in a high-precision
|
2118 |
+
format round to non-zero values in a low-precision format,
|
2119 |
+
and this indeed occurs in practical training (even more
|
2120 |
+
frequently than underflows). Also, we used a simple setting
|
2121 |
+
for training in which a gradient descent algorithm is applied
|
2122 |
+
for one epoch, training data consist of one input-output pair,
|
2123 |
+
and test data is the same as training data; on the other hand,
|
2124 |
+
in practical training, a gradient descent algorithm is applied
|
2125 |
+
for many epochs, training data consists of many input-output
|
2126 |
+
pairs, and test data is different from training data.
|
2127 |
+
Problem 3.1 is general enough so that it embraces all the
|
2128 |
+
aforementioned aspects of floating-points and training,
|
2129 |
+
including those that are not considered in the proof of
|
2130 |
+
Theorem 3.2. Since those aspects are likely to make the
|
2131 |
+
problem even more difficult, we conjecture that the problem
|
2132 |
+
would be more intractable than being NP-hard.
|
2133 |
+
B
|
2134 |
+
EXPERIMENTS: DEFERRED DETAILS
|
2135 |
+
The datasets we use have the following licenses:
|
2136 |
+
• CIFAR-10 and CIFAR-100: These datasets are under
|
2137 |
+
the MIT license.
|
2138 |
+
• ImageNet: This dataset can be used “only for non-
|
2139 |
+
commercial research and educational purposes.” For
|
2140 |
+
more details, see its webpage (Stanford Vision Lab,
|
2141 |
+
2020).
|
2142 |
+
The implementations of models we use have the following
|
2143 |
+
licenses:
|
2144 |
+
• SqueezeNet for CIFAR-10 and CIFAR-100: We adapt
|
2145 |
+
an implementation of the model in a public GitHub
|
2146 |
+
repository (Pathak, 2020), whose license information
|
2147 |
+
is not available.
|
2148 |
+
• ShuffleNet-v2,
|
2149 |
+
MobileNet-v2,
|
2150 |
+
and ResNet-18 for
|
2151 |
+
CIFAR-10 and CIFAR-100: We adapt an implementation
|
2152 |
+
of these models in a public GitHub repository (kuangliu,
|
2153 |
+
2021), which is under the MIT license.
|
2154 |
+
• ShuffleNet-v2 for ImageNet and ImageNet-200-i: We
|
2155 |
+
adapt an implementation of the model in the torchvision
|
2156 |
+
library (PyTorch, 2022b), which is under the BSD
|
2157 |
+
3-Clause license.
|
2158 |
+
The details of how we train models are as follows:
|
2159 |
+
• Four models on CIFAR-10 and CIFAR-100: We train
|
2160 |
+
the four models with a standard setup (kuangliu, 2021).
|
2161 |
+
In particular, we run the (non-Nesterov) SGD optimizer
|
2162 |
+
for 200 epochs with minibatch size of 128 (over 1
|
2163 |
+
GPU), learning rate of 0.1, momentum of 0.9, weight
|
2164 |
+
decay of 5 × 10−4, and the cosine annealing scheduler
|
2165 |
+
for learning rate. For dynamic loss scaling, we use
|
2166 |
+
initial scale of 216, growth factor of 2, back-off factor
|
2167 |
+
of 0.5, and growth interval of 1 epoch, as suggested in
|
2168 |
+
PyTorch (PyTorch, 2022a).
|
2169 |
+
• ShuffleNet-v2 on ImageNet: We train the model with the
|
2170 |
+
default setup given in PyTorch’s GitHub repository (Py-
|
2171 |
+
Torch, 2022c), except that we use larger minibatch size
|
2172 |
+
and learning rate as in (Kalamkar et al., 2019; PyTorch,
|
2173 |
+
2022d; Krizhevsky, 2014; Goyal et al., 2017) to reduce
|
2174 |
+
the wall-clock time of training. In particular, we run
|
2175 |
+
the (non-Nesterov) SGD optimizer for 90 epochs with
|
2176 |
+
minibatch size of 1024 (over 16 GPUs), learning rate
|
2177 |
+
of 0.4, momentum of 0.9, weight decay of 10−4, and
|
2178 |
+
the cosine annealing scheduler for learning rate. For
|
2179 |
+
dynamic loss scale, we use initial scale of 216, growth
|
2180 |
+
factor of 2, back-off factor of 0.5, and growth interval
|
2181 |
+
of 0.5 epoch, as suggested in PyTorch (PyTorch, 2022a).
|
2182 |
+
• ShuffleNet-v2 on ImageNet-200-i: We train the model
|
2183 |
+
with the same settings for ImageNet except that we use
|
2184 |
+
the default values for minibatch size and learning rate
|
2185 |
+
given in (PyTorch, 2022c), i.e., minibatch size of 256
|
2186 |
+
(over 4 GPUs) and learning rate of 0.1.
|
2187 |
+
C
|
2188 |
+
EXPERIMENTS: DEFERRED RESULTS
|
2189 |
+
C.1
|
2190 |
+
Comparison with Existing Precision Assignments
|
2191 |
+
Figure 9 presents results omitted in Figure 4: training
|
2192 |
+
results of smaller variant models (which have width
|
2193 |
+
multiplier 0.5 or 0.1) on CIFAR-100 with πfp32, πunif,
|
2194 |
+
πop, πop′, and πours,r. The figure shows similar results
|
2195 |
+
to Figure 4: the results for the variant models with width
|
2196 |
+
multiplier 0.5 (and 0.1) are similar to those for the original
|
2197 |
+
models (and the variant models with width multiplier 0.25).
|
2198 |
+
Figures 10 and 11 show the average training trajectories
|
2199 |
+
for the configurations presented in Figures 4 and 9.
|
2200 |
+
C.2 Ablation Study: Precision Demotion and Promotion
|
2201 |
+
Figure 12 presents results omitted in Figure 5: training re-
|
2202 |
+
sults of ResNet-18 on CIFAR-100 with πours,r, πours[inc],r,
|
2203 |
+
and πours[rand],r.
|
2204 |
+
The figure shows similar results to
|
2205 |
+
Figure 5 except that it shows smaller differences in memory-
|
2206 |
+
accuracy tradeoff between the three precision assignments.
|
2207 |
+
Figure 13 presents results omitted in Figure 6: training
|
2208 |
+
results of four models on CIFAR-10 with πours,r and
|
2209 |
+
πours[no-promo],r.
|
2210 |
+
The figure shows similar results to
|
2211 |
+
Figure 6 except that the training of ResNet-18 on CIFAR-10
|
2212 |
+
does not diverge even with πours[no-promo],r for all r values.
|
2213 |
+
|
2214 |
+
Training with Mixed-Precision Floating-Point Assignments
|
2215 |
+
(a) CIFAR-100, SqueezeNet‡
|
2216 |
+
(b) CIFAR-100, SqueezeNet¶
|
2217 |
+
(c) CIFAR-100, ShuffleNet-v2‡
|
2218 |
+
(d) CIFAR-100, ShuffleNet-v2¶
|
2219 |
+
(e) CIFAR-100, MobileNet-v2‡
|
2220 |
+
(f) CIFAR-100, MobileNet-v2¶
|
2221 |
+
(g) CIFAR-100, ResNet-18‡
|
2222 |
+
(h) CIFAR-100, ResNet-18¶
|
2223 |
+
Figure 9: Continued from Figure 4. Memory-accuracy tradeoffs of πunif (Micikevicius et al., 2018), πop (Sun et al., 2019),
|
2224 |
+
πop′ (Kalamkar et al., 2019), and πours,r for smaller variants of four models on CIFAR-100. The variant models have width
|
2225 |
+
multiplier 0.5 (marked by ‡) or 0.1 (marked by ¶). Top-right points are better than bottom-left ones. In all but one plots,
|
2226 |
+
there are •s above and to the right of
|
2227 |
+
and
|
2228 |
+
, respectively; even in the one plot (g), •s have almost the same tradeoffs to
|
2229 |
+
and
|
2230 |
+
. In three of all plots, ⋆ has much smaller y-values than other points; ⋆ is missing in (h) as its y-value is too small.
|
2231 |
+
|
2232 |
+
66
|
2233 |
+
test accuracy (%)
|
2234 |
+
64
|
2235 |
+
62
|
2236 |
+
fp32
|
2237 |
+
60
|
2238 |
+
op
|
2239 |
+
op
|
2240 |
+
unif
|
2241 |
+
2
|
2242 |
+
ours
|
2243 |
+
0
|
2244 |
+
0.00
|
2245 |
+
0.25
|
2246 |
+
0.50
|
2247 |
+
0.75
|
2248 |
+
1.00
|
2249 |
+
low-prec. ratio44
|
2250 |
+
accuracy (%)
|
2251 |
+
40
|
2252 |
+
36
|
2253 |
+
fp32
|
2254 |
+
32
|
2255 |
+
op
|
2256 |
+
test a
|
2257 |
+
op
|
2258 |
+
unif
|
2259 |
+
24
|
2260 |
+
ours
|
2261 |
+
0.00
|
2262 |
+
0.25
|
2263 |
+
0.50
|
2264 |
+
0.75
|
2265 |
+
1.00
|
2266 |
+
low-prec. ratio70
|
2267 |
+
test accuracy (%)
|
2268 |
+
68
|
2269 |
+
66
|
2270 |
+
fp32
|
2271 |
+
64
|
2272 |
+
op
|
2273 |
+
op
|
2274 |
+
unif
|
2275 |
+
2
|
2276 |
+
ours
|
2277 |
+
0
|
2278 |
+
0.00
|
2279 |
+
0.25
|
2280 |
+
0.50
|
2281 |
+
0.75
|
2282 |
+
1.00
|
2283 |
+
low-prec. ratio56
|
2284 |
+
52
|
2285 |
+
X
|
2286 |
+
48
|
2287 |
+
44
|
2288 |
+
fp32
|
2289 |
+
op
|
2290 |
+
op
|
2291 |
+
unif
|
2292 |
+
24
|
2293 |
+
ours
|
2294 |
+
0.00
|
2295 |
+
0.25
|
2296 |
+
0.50
|
2297 |
+
0.75
|
2298 |
+
1.00
|
2299 |
+
low-prec. ratio72
|
2300 |
+
test accuracy (%)
|
2301 |
+
70
|
2302 |
+
68
|
2303 |
+
fp32
|
2304 |
+
66
|
2305 |
+
op
|
2306 |
+
op
|
2307 |
+
unif
|
2308 |
+
2
|
2309 |
+
ours
|
2310 |
+
0
|
2311 |
+
0.00
|
2312 |
+
0.25
|
2313 |
+
0.50
|
2314 |
+
0.75
|
2315 |
+
1.00
|
2316 |
+
low-prec. ratio56
|
2317 |
+
test accuracy (%)
|
2318 |
+
52
|
2319 |
+
48
|
2320 |
+
fp32
|
2321 |
+
44
|
2322 |
+
op
|
2323 |
+
op
|
2324 |
+
unif
|
2325 |
+
36
|
2326 |
+
ours
|
2327 |
+
32
|
2328 |
+
0.00
|
2329 |
+
0.25
|
2330 |
+
0.50
|
2331 |
+
0.75
|
2332 |
+
1.00
|
2333 |
+
low-prec. ratio76
|
2334 |
+
test accuracy (%)
|
2335 |
+
74
|
2336 |
+
72
|
2337 |
+
fp32
|
2338 |
+
op
|
2339 |
+
70
|
2340 |
+
op
|
2341 |
+
unif
|
2342 |
+
2
|
2343 |
+
ours
|
2344 |
+
0
|
2345 |
+
0.00
|
2346 |
+
0.25
|
2347 |
+
0.50
|
2348 |
+
0.75
|
2349 |
+
1.00
|
2350 |
+
low-prec. ratio60
|
2351 |
+
test accuracy (%)
|
2352 |
+
56
|
2353 |
+
52
|
2354 |
+
fp32
|
2355 |
+
op
|
2356 |
+
48
|
2357 |
+
op
|
2358 |
+
unif
|
2359 |
+
40
|
2360 |
+
+
|
2361 |
+
ours
|
2362 |
+
36
|
2363 |
+
0.00
|
2364 |
+
0.25
|
2365 |
+
0.50
|
2366 |
+
0.75
|
2367 |
+
1.00
|
2368 |
+
low-prec. ratioTraining with Mixed-Precision Floating-Point Assignments
|
2369 |
+
(a) CIFAR-10, SqueezeNet
|
2370 |
+
(b) CIFAR-100, SqueezeNet
|
2371 |
+
(c) CIFAR-100, SqueezeNet†
|
2372 |
+
(d) CIFAR-10, ShuffleNet-v2
|
2373 |
+
(e) CIFAR-100, ShuffleNet-v2
|
2374 |
+
(f) CIFAR-100, ShuffleNet-v2†
|
2375 |
+
(g) CIFAR-10, MobileNet-v2
|
2376 |
+
(h) CIFAR-100, MobileNet-v2
|
2377 |
+
(i) CIFAR-100, MobileNet-v2†
|
2378 |
+
(j) CIFAR-10, ResNet-18
|
2379 |
+
(k) CIFAR-100, ResNet-18
|
2380 |
+
(l) CIFAR-100, ResNet-18†
|
2381 |
+
Figure 10: Training trajectories for the configurations shown in Figure 4. Each line shows the average training trajectory
|
2382 |
+
for each precision assignment. πours,r is colored from navy to yellow (darker for smaller r).
|
2383 |
+
|
2384 |
+
100
|
2385 |
+
(%)
|
2386 |
+
90
|
2387 |
+
test accuracy (
|
2388 |
+
80
|
2389 |
+
fp32
|
2390 |
+
op
|
2391 |
+
70
|
2392 |
+
op'
|
2393 |
+
unif
|
2394 |
+
ours
|
2395 |
+
60
|
2396 |
+
0
|
2397 |
+
50
|
2398 |
+
100
|
2399 |
+
150
|
2400 |
+
200
|
2401 |
+
epoch70
|
2402 |
+
accuracy (%)
|
2403 |
+
60
|
2404 |
+
fp32
|
2405 |
+
50
|
2406 |
+
op
|
2407 |
+
test a
|
2408 |
+
op'
|
2409 |
+
unif
|
2410 |
+
40
|
2411 |
+
ours
|
2412 |
+
0
|
2413 |
+
50
|
2414 |
+
100
|
2415 |
+
150
|
2416 |
+
200
|
2417 |
+
epoch60
|
2418 |
+
test accuracy (%)
|
2419 |
+
50
|
2420 |
+
40
|
2421 |
+
fp32
|
2422 |
+
op
|
2423 |
+
op'
|
2424 |
+
30
|
2425 |
+
unif
|
2426 |
+
ours
|
2427 |
+
20
|
2428 |
+
0
|
2429 |
+
50
|
2430 |
+
100
|
2431 |
+
150
|
2432 |
+
200
|
2433 |
+
epoch100
|
2434 |
+
(%)
|
2435 |
+
90
|
2436 |
+
test accuracy (
|
2437 |
+
80
|
2438 |
+
fp32
|
2439 |
+
op
|
2440 |
+
op'
|
2441 |
+
70
|
2442 |
+
unif
|
2443 |
+
ours
|
2444 |
+
60
|
2445 |
+
0
|
2446 |
+
50
|
2447 |
+
100
|
2448 |
+
150
|
2449 |
+
200
|
2450 |
+
epoch80
|
2451 |
+
test accuracy (%)
|
2452 |
+
70
|
2453 |
+
60
|
2454 |
+
fp32
|
2455 |
+
op
|
2456 |
+
50
|
2457 |
+
op'
|
2458 |
+
unif
|
2459 |
+
ours
|
2460 |
+
40
|
2461 |
+
0
|
2462 |
+
50
|
2463 |
+
100
|
2464 |
+
150
|
2465 |
+
200
|
2466 |
+
epoch70
|
2467 |
+
test accuracy (%)
|
2468 |
+
60
|
2469 |
+
50
|
2470 |
+
fp32
|
2471 |
+
op
|
2472 |
+
40
|
2473 |
+
op'
|
2474 |
+
unif
|
2475 |
+
ours
|
2476 |
+
30
|
2477 |
+
0
|
2478 |
+
50
|
2479 |
+
100
|
2480 |
+
150
|
2481 |
+
200
|
2482 |
+
epoch100
|
2483 |
+
(%)
|
2484 |
+
90
|
2485 |
+
test accuracy (
|
2486 |
+
80
|
2487 |
+
fp32
|
2488 |
+
op
|
2489 |
+
op'
|
2490 |
+
70
|
2491 |
+
unif
|
2492 |
+
ours
|
2493 |
+
60
|
2494 |
+
0
|
2495 |
+
50
|
2496 |
+
100
|
2497 |
+
150
|
2498 |
+
200
|
2499 |
+
epoch80
|
2500 |
+
test accuracy (%)
|
2501 |
+
70
|
2502 |
+
60
|
2503 |
+
fp32
|
2504 |
+
op
|
2505 |
+
50
|
2506 |
+
op'
|
2507 |
+
unif
|
2508 |
+
ours
|
2509 |
+
40
|
2510 |
+
0
|
2511 |
+
50
|
2512 |
+
100
|
2513 |
+
150
|
2514 |
+
200
|
2515 |
+
epoch70
|
2516 |
+
test accuracy (%)
|
2517 |
+
60
|
2518 |
+
50
|
2519 |
+
fp32
|
2520 |
+
op
|
2521 |
+
40
|
2522 |
+
op'
|
2523 |
+
unif
|
2524 |
+
ours
|
2525 |
+
30
|
2526 |
+
0
|
2527 |
+
50
|
2528 |
+
100
|
2529 |
+
150
|
2530 |
+
200
|
2531 |
+
epoch100
|
2532 |
+
(%)
|
2533 |
+
90
|
2534 |
+
test accuracy (
|
2535 |
+
80
|
2536 |
+
fp32
|
2537 |
+
op
|
2538 |
+
70
|
2539 |
+
op'
|
2540 |
+
unif
|
2541 |
+
ours
|
2542 |
+
60
|
2543 |
+
0
|
2544 |
+
50
|
2545 |
+
100
|
2546 |
+
150
|
2547 |
+
200
|
2548 |
+
epoch80
|
2549 |
+
accuracy (%)
|
2550 |
+
70
|
2551 |
+
fp32
|
2552 |
+
60
|
2553 |
+
op
|
2554 |
+
test a
|
2555 |
+
,do
|
2556 |
+
unif
|
2557 |
+
50
|
2558 |
+
ours
|
2559 |
+
0
|
2560 |
+
50
|
2561 |
+
100
|
2562 |
+
150
|
2563 |
+
200
|
2564 |
+
epoch70
|
2565 |
+
accuracy (%)
|
2566 |
+
60
|
2567 |
+
fp32
|
2568 |
+
50
|
2569 |
+
op
|
2570 |
+
test a
|
2571 |
+
op'
|
2572 |
+
unif
|
2573 |
+
40
|
2574 |
+
ours
|
2575 |
+
0
|
2576 |
+
50
|
2577 |
+
100
|
2578 |
+
150
|
2579 |
+
200
|
2580 |
+
epochTraining with Mixed-Precision Floating-Point Assignments
|
2581 |
+
(a) CIFAR-100, SqueezeNet‡
|
2582 |
+
(b) CIFAR-100, SqueezeNet¶
|
2583 |
+
(c) CIFAR-100, ShuffleNet-v2‡
|
2584 |
+
(d) CIFAR-100, ShuffleNet-v2¶
|
2585 |
+
(e) CIFAR-100, MobileNet-v2‡
|
2586 |
+
(f) CIFAR-100, MobileNet-v2¶
|
2587 |
+
(g) CIFAR-100, ResNet-18‡
|
2588 |
+
(h) CIFAR-100, ResNet-18¶
|
2589 |
+
Figure 11: Training trajectories for the configurations shown in Figure 9. Each line shows the average training trajectory
|
2590 |
+
for each precision assignment. πours,r is colored from navy to yellow (darker for smaller r).
|
2591 |
+
|
2592 |
+
70
|
2593 |
+
test accuracy (%)
|
2594 |
+
60
|
2595 |
+
50
|
2596 |
+
fp32
|
2597 |
+
op
|
2598 |
+
40
|
2599 |
+
op'
|
2600 |
+
unif
|
2601 |
+
ours
|
2602 |
+
30
|
2603 |
+
0
|
2604 |
+
50
|
2605 |
+
100
|
2606 |
+
150
|
2607 |
+
200
|
2608 |
+
epoch50
|
2609 |
+
test accuracy (%)
|
2610 |
+
40
|
2611 |
+
30
|
2612 |
+
fp32
|
2613 |
+
op
|
2614 |
+
20
|
2615 |
+
op'
|
2616 |
+
unif
|
2617 |
+
ours
|
2618 |
+
10
|
2619 |
+
0
|
2620 |
+
50
|
2621 |
+
100
|
2622 |
+
150
|
2623 |
+
200
|
2624 |
+
epoch70
|
2625 |
+
accuracy (%)
|
2626 |
+
60
|
2627 |
+
fp32
|
2628 |
+
50
|
2629 |
+
op
|
2630 |
+
test
|
2631 |
+
op'
|
2632 |
+
unif
|
2633 |
+
40
|
2634 |
+
ours
|
2635 |
+
0
|
2636 |
+
50
|
2637 |
+
100
|
2638 |
+
150
|
2639 |
+
200
|
2640 |
+
epoch60
|
2641 |
+
50
|
2642 |
+
40
|
2643 |
+
fp32
|
2644 |
+
op
|
2645 |
+
30
|
2646 |
+
op
|
2647 |
+
unif
|
2648 |
+
ours
|
2649 |
+
20
|
2650 |
+
0
|
2651 |
+
50
|
2652 |
+
100
|
2653 |
+
150
|
2654 |
+
200
|
2655 |
+
epoch70
|
2656 |
+
accuracy (%)
|
2657 |
+
60
|
2658 |
+
fp32
|
2659 |
+
50
|
2660 |
+
op
|
2661 |
+
test a
|
2662 |
+
op'
|
2663 |
+
unif
|
2664 |
+
40
|
2665 |
+
ours
|
2666 |
+
0
|
2667 |
+
50
|
2668 |
+
100
|
2669 |
+
150
|
2670 |
+
200
|
2671 |
+
epoch60
|
2672 |
+
50
|
2673 |
+
40
|
2674 |
+
fp32
|
2675 |
+
op
|
2676 |
+
30
|
2677 |
+
op'
|
2678 |
+
unif
|
2679 |
+
ours
|
2680 |
+
20
|
2681 |
+
0
|
2682 |
+
50
|
2683 |
+
100
|
2684 |
+
150
|
2685 |
+
200
|
2686 |
+
epoch80
|
2687 |
+
70
|
2688 |
+
60
|
2689 |
+
fp32
|
2690 |
+
op
|
2691 |
+
50
|
2692 |
+
,do
|
2693 |
+
unif
|
2694 |
+
ours
|
2695 |
+
40
|
2696 |
+
0
|
2697 |
+
50
|
2698 |
+
100
|
2699 |
+
150
|
2700 |
+
200
|
2701 |
+
epoch70
|
2702 |
+
test accuracy (%)
|
2703 |
+
60
|
2704 |
+
50
|
2705 |
+
fp32
|
2706 |
+
op
|
2707 |
+
40
|
2708 |
+
op'
|
2709 |
+
unif
|
2710 |
+
ours
|
2711 |
+
30
|
2712 |
+
0
|
2713 |
+
50
|
2714 |
+
100
|
2715 |
+
150
|
2716 |
+
200
|
2717 |
+
epochTraining with Mixed-Precision Floating-Point Assignments
|
2718 |
+
(a) ResNet-18
|
2719 |
+
Figure 12: Continued from Figure 5. Memory-accuracy tradeoffs of πours,r, πours[inc],r, and πours[rand],r for ResNet-18
|
2720 |
+
on CIFAR-100. Observe that •s are above and to the right of other points in nearly all cases.
|
2721 |
+
(a) CIFAR-10, SqueezeNet
|
2722 |
+
(b) CIFAR-10, ShuffleNet-v2
|
2723 |
+
(c) CIFAR-10, MobileNet-v2
|
2724 |
+
(d) CIFAR-10, ResNet-18
|
2725 |
+
Figure 13: Continued from Figure 6. Training four models on CIFAR-10 with πours,r and πours[no-promo],r. Column 1:
|
2726 |
+
Training trajectories of πours[no-promo],r for different r; colors denote r values (darker for smaller r). Column 2: Top-5
|
2727 |
+
overflow ratios of tensors at each epoch, for the highlighted trajectory in (a); the largest ratio is blue and the fifth largest
|
2728 |
+
red. Column 3: Memory-accuracy tradeoffs of πours,r and πours[no-promo],r. Column 4: Low-precision ratio when training
|
2729 |
+
ends vs. when training starts, for πours,r and πours[no-promo],r.
|
2730 |
+
|
2731 |
+
82
|
2732 |
+
(%)
|
2733 |
+
80
|
2734 |
+
test accuracy (
|
2735 |
+
78
|
2736 |
+
76
|
2737 |
+
ours[inc]
|
2738 |
+
ours[rand]
|
2739 |
+
ours
|
2740 |
+
0.00
|
2741 |
+
0.25
|
2742 |
+
0.50
|
2743 |
+
0.75
|
2744 |
+
1.00
|
2745 |
+
low-prec. ratio100
|
2746 |
+
test accuracy (%)
|
2747 |
+
80
|
2748 |
+
60
|
2749 |
+
40
|
2750 |
+
20
|
2751 |
+
0
|
2752 |
+
0
|
2753 |
+
50
|
2754 |
+
100
|
2755 |
+
150
|
2756 |
+
200
|
2757 |
+
epoch1.0
|
2758 |
+
0.8
|
2759 |
+
overflow ratio
|
2760 |
+
0.6
|
2761 |
+
0.4
|
2762 |
+
top-1
|
2763 |
+
top-2
|
2764 |
+
top-3
|
2765 |
+
0.2
|
2766 |
+
top-4
|
2767 |
+
top-5
|
2768 |
+
0.0
|
2769 |
+
0
|
2770 |
+
50
|
2771 |
+
100
|
2772 |
+
150
|
2773 |
+
200
|
2774 |
+
epoch100
|
2775 |
+
(%)
|
2776 |
+
80
|
2777 |
+
accuracy (
|
2778 |
+
60
|
2779 |
+
40
|
2780 |
+
test
|
2781 |
+
20
|
2782 |
+
ours[no-promo]
|
2783 |
+
ours
|
2784 |
+
0
|
2785 |
+
0.00
|
2786 |
+
0.25
|
2787 |
+
0.50
|
2788 |
+
0.75
|
2789 |
+
1.00
|
2790 |
+
low-prec. ratio1.0
|
2791 |
+
ratio (end)
|
2792 |
+
0.8
|
2793 |
+
0.6
|
2794 |
+
low-prec.
|
2795 |
+
0.2
|
2796 |
+
ours[no-promo]
|
2797 |
+
ours
|
2798 |
+
0.0
|
2799 |
+
0.0
|
2800 |
+
0.2
|
2801 |
+
0.4
|
2802 |
+
0.6
|
2803 |
+
0.8
|
2804 |
+
1.0
|
2805 |
+
low-prec. ratio (start)100
|
2806 |
+
test accuracy (%)
|
2807 |
+
80
|
2808 |
+
60
|
2809 |
+
40
|
2810 |
+
20
|
2811 |
+
0
|
2812 |
+
0
|
2813 |
+
50
|
2814 |
+
100
|
2815 |
+
150
|
2816 |
+
200
|
2817 |
+
epoch1.0
|
2818 |
+
0.8
|
2819 |
+
overflow ratio
|
2820 |
+
0.6
|
2821 |
+
0.4
|
2822 |
+
top-1
|
2823 |
+
top-2
|
2824 |
+
top-3
|
2825 |
+
0.2
|
2826 |
+
top-4
|
2827 |
+
top-5
|
2828 |
+
0.0
|
2829 |
+
0
|
2830 |
+
50
|
2831 |
+
100
|
2832 |
+
150
|
2833 |
+
200
|
2834 |
+
epoch100
|
2835 |
+
(%)
|
2836 |
+
80
|
2837 |
+
accuracy (
|
2838 |
+
60
|
2839 |
+
40
|
2840 |
+
test
|
2841 |
+
20
|
2842 |
+
ours[no-promo]
|
2843 |
+
ours
|
2844 |
+
0
|
2845 |
+
0.00
|
2846 |
+
0.25
|
2847 |
+
0.50
|
2848 |
+
0.75
|
2849 |
+
1.00
|
2850 |
+
low-prec. ratio1.0
|
2851 |
+
ratio (end)
|
2852 |
+
0.8
|
2853 |
+
0.6
|
2854 |
+
low-prec.
|
2855 |
+
0.2
|
2856 |
+
ours[no-promo]
|
2857 |
+
ours
|
2858 |
+
0.0
|
2859 |
+
0.0
|
2860 |
+
0.2
|
2861 |
+
0.4
|
2862 |
+
0.6
|
2863 |
+
0.8
|
2864 |
+
1.0
|
2865 |
+
low-prec. ratio (start)100
|
2866 |
+
test accuracy (%)
|
2867 |
+
80
|
2868 |
+
60
|
2869 |
+
40
|
2870 |
+
20
|
2871 |
+
0
|
2872 |
+
0
|
2873 |
+
50
|
2874 |
+
100
|
2875 |
+
150
|
2876 |
+
200
|
2877 |
+
epoch1.0
|
2878 |
+
0.8
|
2879 |
+
overflow ratio
|
2880 |
+
0.6
|
2881 |
+
0.4
|
2882 |
+
top-1
|
2883 |
+
top-2
|
2884 |
+
top-3
|
2885 |
+
0.2
|
2886 |
+
top-4
|
2887 |
+
top-5
|
2888 |
+
0.0
|
2889 |
+
0
|
2890 |
+
50
|
2891 |
+
100
|
2892 |
+
150
|
2893 |
+
200
|
2894 |
+
epoch100
|
2895 |
+
(%)
|
2896 |
+
80
|
2897 |
+
accuracy (
|
2898 |
+
60
|
2899 |
+
40
|
2900 |
+
test
|
2901 |
+
20
|
2902 |
+
ours[no-promo]
|
2903 |
+
ours
|
2904 |
+
0
|
2905 |
+
0.00
|
2906 |
+
0.25
|
2907 |
+
0.50
|
2908 |
+
0.75
|
2909 |
+
1.00
|
2910 |
+
low-prec. ratio1.0
|
2911 |
+
ratio (end)
|
2912 |
+
0.8
|
2913 |
+
0.6
|
2914 |
+
low-prec.
|
2915 |
+
0.2
|
2916 |
+
ours[no-promo]
|
2917 |
+
ours
|
2918 |
+
0.0
|
2919 |
+
0.0
|
2920 |
+
0.2
|
2921 |
+
0.4
|
2922 |
+
0.6
|
2923 |
+
0.8
|
2924 |
+
1.0
|
2925 |
+
low-prec. ratio (start)100
|
2926 |
+
test accuracy (%)
|
2927 |
+
80
|
2928 |
+
60
|
2929 |
+
40
|
2930 |
+
20
|
2931 |
+
0
|
2932 |
+
0
|
2933 |
+
50
|
2934 |
+
100
|
2935 |
+
150
|
2936 |
+
200
|
2937 |
+
epoch0.10
|
2938 |
+
0.08
|
2939 |
+
overflow ratio
|
2940 |
+
0.06
|
2941 |
+
0.04
|
2942 |
+
top-1
|
2943 |
+
top-2
|
2944 |
+
top-3
|
2945 |
+
0.02
|
2946 |
+
top-4
|
2947 |
+
top-5
|
2948 |
+
0.00
|
2949 |
+
0
|
2950 |
+
50
|
2951 |
+
100
|
2952 |
+
150
|
2953 |
+
200
|
2954 |
+
epoch100
|
2955 |
+
(%)
|
2956 |
+
80
|
2957 |
+
accuracy (
|
2958 |
+
60
|
2959 |
+
40
|
2960 |
+
test
|
2961 |
+
20
|
2962 |
+
ours[no-promo]
|
2963 |
+
ours
|
2964 |
+
0
|
2965 |
+
0.00
|
2966 |
+
0.25
|
2967 |
+
0.50
|
2968 |
+
0.75
|
2969 |
+
1.00
|
2970 |
+
low-prec. ratio1.0
|
2971 |
+
low-prec. ratio (end)
|
2972 |
+
0.8
|
2973 |
+
0.6
|
2974 |
+
0.2
|
2975 |
+
ours[no-promo]
|
2976 |
+
ours
|
2977 |
+
0.0
|
2978 |
+
0.0
|
2979 |
+
0.2
|
2980 |
+
0.4
|
2981 |
+
0.6
|
2982 |
+
0.8
|
2983 |
+
1.0
|
2984 |
+
low-prec. ratio (start)Training with Mixed-Precision Floating-Point Assignments
|
2985 |
+
REFERENCES (FOR APPENDIX)
|
2986 |
+
Goyal, P., Dollár, P., Girshick, R. B., Noordhuis, P.,
|
2987 |
+
Wesolowski, L., Kyrola, A., Tulloch, A., Jia, Y., and He,
|
2988 |
+
K. Accurate, Large Minibatch SGD: Training ImageNet
|
2989 |
+
in 1 Hour. arXiv:1706.02677, 2017.
|
2990 |
+
Kalamkar, D. D., Mudigere, D., Mellempudi, N., Das,
|
2991 |
+
D., Banerjee, K., Avancha, S., Vooturi, D. T., Jammala-
|
2992 |
+
madaka, N., Huang, J., Yuen, H., Yang, J., Park, J.,
|
2993 |
+
Heinecke, A., Georganas, E., Srinivasan, S., Kundu,
|
2994 |
+
A., Smelyanskiy, M., Kaul, B., and Dubey, P.
|
2995 |
+
A
|
2996 |
+
Study of BFLOAT16 for Deep Learning Training.
|
2997 |
+
arXiv:1905.12322, 2019.
|
2998 |
+
Karp, R. M. Reducibility Among Combinatorial Problems.
|
2999 |
+
In Complexity of Computer Computations, pp. 85–103,
|
3000 |
+
1972.
|
3001 |
+
Krizhevsky,
|
3002 |
+
A.
|
3003 |
+
One weird trick for parallelizing
|
3004 |
+
convolutional neural networks. arXiv:1404.5997, 2014.
|
3005 |
+
kuangliu.
|
3006 |
+
https://github.com/kuangliu/
|
3007 |
+
pytorch-cifar, 2021.
|
3008 |
+
Pathak,
|
3009 |
+
G.
|
3010 |
+
https://github.com/gsp-27/
|
3011 |
+
pytorch_Squeezenet, 2020.
|
3012 |
+
PyTorch.
|
3013 |
+
Documentation
|
3014 |
+
of
|
3015 |
+
torch.amp.
|
3016 |
+
https://pytorch.org/docs/stable/amp.
|
3017 |
+
html#gradient-scaling, 2022a.
|
3018 |
+
PyTorch.
|
3019 |
+
https://github.com/pytorch/
|
3020 |
+
vision/tree/main/torchvision/models,
|
3021 |
+
2022b.
|
3022 |
+
PyTorch.
|
3023 |
+
https://github.com/pytorch/
|
3024 |
+
vision/tree/main/references/
|
3025 |
+
classification, 2022c.
|
3026 |
+
PyTorch.
|
3027 |
+
https://github.com/pytorch/
|
3028 |
+
vision/tree/main/references/
|
3029 |
+
classification#resnext, 2022d.
|
3030 |
+
Stanford Vision Lab.
|
3031 |
+
https://image-net.org/
|
3032 |
+
download.php, 2020.
|
3033 |
+
|
EdFRT4oBgHgl3EQfBDfd/content/tmp_files/load_file.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
FdFKT4oBgHgl3EQfay5f/content/2301.11809v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6728405ea0ae102d125272759cc69555d4880a05e27fd02892ccd223424b67f1
|
3 |
+
size 685190
|
FdFKT4oBgHgl3EQfay5f/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a37107154f9c906fd6be1076813c7addf514f5a1385ec1ff76d44959cf8a507f
|
3 |
+
size 1835053
|
FdFKT4oBgHgl3EQfay5f/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34bd47b65683f641ae4af5070ec43b2fc0398cdaa12e0ba39c95d8df401a223d
|
3 |
+
size 62736
|
G9A0T4oBgHgl3EQfBv-A/content/2301.01980v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:292ba3fa1cfc831e150bcdf1b481e3ba48de42d580ea0b3635331f393efd19ae
|
3 |
+
size 483634
|
G9A0T4oBgHgl3EQfBv-A/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5292d426d5efa51bb088ab57984578f2be1269f888ca8d029bcfe0e9d4feade0
|
3 |
+
size 92891
|
HdE5T4oBgHgl3EQfWQ_B/content/2301.05558v1.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73937f34fea79f1c27dc5baaa58e333e8e454df2b23a7b522d34066d6084d0a7
|
3 |
+
size 3522691
|
HdE5T4oBgHgl3EQfWQ_B/vector_store/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b6c13fc2cef74a733d85463a907de460bfce7c8973b9e459c8bc431d8f8f983
|
3 |
+
size 3604525
|
HdE5T4oBgHgl3EQfWQ_B/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9ba04a0a118bfe289a554ce07a29da8f1c46b7080861858ae80d0e5e9d01f87
|
3 |
+
size 144019
|
I9A0T4oBgHgl3EQfCP_M/vector_store/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f66fff92099d4bc35f86e779913af2f03d00eac75708c51fe165fa3b6687b1b4
|
3 |
+
size 287773
|