jackkuo commited on
Commit
3b3baca
·
verified ·
1 Parent(s): 1735e97

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +68 -0
  2. 0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss +3 -0
  3. 0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf +3 -0
  4. 0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss +3 -0
  5. 0tFIT4oBgHgl3EQf3Cv8/vector_store/index.pkl +3 -0
  6. 29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss +3 -0
  7. 29FRT4oBgHgl3EQfnzcq/content/tmp_files/2301.13606v1.pdf.txt +1307 -0
  8. 29FRT4oBgHgl3EQfnzcq/content/tmp_files/load_file.txt +0 -0
  9. 2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss +3 -0
  10. 39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf +3 -0
  11. 39AyT4oBgHgl3EQfcPct/vector_store/index.pkl +3 -0
  12. 39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss +3 -0
  13. 39FAT4oBgHgl3EQfEhxj/vector_store/index.pkl +3 -0
  14. 3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf +3 -0
  15. 3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss +3 -0
  16. 3dFLT4oBgHgl3EQfry9C/vector_store/index.pkl +3 -0
  17. 3tA0T4oBgHgl3EQfNP_j/content/tmp_files/2301.02145v1.pdf.txt +1565 -0
  18. 3tA0T4oBgHgl3EQfNP_j/content/tmp_files/load_file.txt +0 -0
  19. 3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf +3 -0
  20. 4dAzT4oBgHgl3EQf9f77/content/tmp_files/2301.01922v1.pdf.txt +1151 -0
  21. 4dAzT4oBgHgl3EQf9f77/content/tmp_files/load_file.txt +0 -0
  22. 5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf +3 -0
  23. 5NE0T4oBgHgl3EQfegCm/vector_store/index.pkl +3 -0
  24. 5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf +3 -0
  25. 5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss +3 -0
  26. 5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf +3 -0
  27. 5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss +3 -0
  28. 5dE4T4oBgHgl3EQf1Q2P/vector_store/index.pkl +3 -0
  29. 69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf +3 -0
  30. 69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss +3 -0
  31. 69E2T4oBgHgl3EQf7ggl/content/tmp_files/2301.04209v1.pdf.txt +1660 -0
  32. 69E2T4oBgHgl3EQf7ggl/content/tmp_files/load_file.txt +0 -0
  33. 6dE4T4oBgHgl3EQfBwu-/content/tmp_files/2301.04855v1.pdf.txt +1475 -0
  34. 6dE4T4oBgHgl3EQfBwu-/content/tmp_files/load_file.txt +0 -0
  35. 8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf +3 -0
  36. 8NE3T4oBgHgl3EQfqQrl/vector_store/index.pkl +3 -0
  37. 99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf +3 -0
  38. B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf +3 -0
  39. B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss +3 -0
  40. B9AyT4oBgHgl3EQf4PrK/vector_store/index.pkl +3 -0
  41. BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss +3 -0
  42. BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf +3 -0
  43. BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss +3 -0
  44. BdFQT4oBgHgl3EQf9zeq/vector_store/index.pkl +3 -0
  45. CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf +3 -0
  46. CdE4T4oBgHgl3EQfeQ2g/vector_store/index.pkl +3 -0
  47. CtFRT4oBgHgl3EQfwTji/content/tmp_files/2301.13638v1.pdf.txt +595 -0
  48. CtFRT4oBgHgl3EQfwTji/content/tmp_files/load_file.txt +276 -0
  49. DNE0T4oBgHgl3EQfygIs/content/tmp_files/2301.02659v1.pdf.txt +1969 -0
  50. DNE0T4oBgHgl3EQfygIs/content/tmp_files/load_file.txt +0 -0
.gitattributes CHANGED
@@ -9199,3 +9199,71 @@ ONFJT4oBgHgl3EQfHix_/content/2301.11452v1.pdf filter=lfs diff=lfs merge=lfs -tex
9199
  K9E1T4oBgHgl3EQfswUE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9200
  zdAyT4oBgHgl3EQfO_bl/content/2301.00018v1.pdf filter=lfs diff=lfs merge=lfs -text
9201
  ytFQT4oBgHgl3EQfBjWV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9199
  K9E1T4oBgHgl3EQfswUE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9200
  zdAyT4oBgHgl3EQfO_bl/content/2301.00018v1.pdf filter=lfs diff=lfs merge=lfs -text
9201
  ytFQT4oBgHgl3EQfBjWV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9202
+ LtE0T4oBgHgl3EQfiwHh/content/2301.02451v1.pdf filter=lfs diff=lfs merge=lfs -text
9203
+ zNAyT4oBgHgl3EQfn_iY/content/2301.00499v1.pdf filter=lfs diff=lfs merge=lfs -text
9204
+ i9FAT4oBgHgl3EQfaB2u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9205
+ u9A0T4oBgHgl3EQfL_8x/content/2301.02125v1.pdf filter=lfs diff=lfs merge=lfs -text
9206
+ 5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf filter=lfs diff=lfs merge=lfs -text
9207
+ GdA0T4oBgHgl3EQfBf_u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9208
+ OtE0T4oBgHgl3EQf0wJ3/content/2301.02690v1.pdf filter=lfs diff=lfs merge=lfs -text
9209
+ UNE5T4oBgHgl3EQfbQ8x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9210
+ BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9211
+ adE4T4oBgHgl3EQfoA0n/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9212
+ mdFST4oBgHgl3EQfKTg2/content/2301.13736v1.pdf filter=lfs diff=lfs merge=lfs -text
9213
+ 29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9214
+ K9E1T4oBgHgl3EQfswUE/content/2301.03368v1.pdf filter=lfs diff=lfs merge=lfs -text
9215
+ 0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf filter=lfs diff=lfs merge=lfs -text
9216
+ sNFIT4oBgHgl3EQfyiuq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9217
+ MtE4T4oBgHgl3EQf8w72/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9218
+ oNE3T4oBgHgl3EQfLAnj/content/2301.04360v1.pdf filter=lfs diff=lfs merge=lfs -text
9219
+ 5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9220
+ 0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9221
+ 39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf filter=lfs diff=lfs merge=lfs -text
9222
+ 69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9223
+ GNAyT4oBgHgl3EQfrfkX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9224
+ 69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf filter=lfs diff=lfs merge=lfs -text
9225
+ GdAyT4oBgHgl3EQfrflY/content/2301.00561v1.pdf filter=lfs diff=lfs merge=lfs -text
9226
+ nNAzT4oBgHgl3EQfqP3M/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9227
+ zNAyT4oBgHgl3EQfn_iY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9228
+ stFAT4oBgHgl3EQfgh1S/content/2301.08588v1.pdf filter=lfs diff=lfs merge=lfs -text
9229
+ GdAyT4oBgHgl3EQfrflY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9230
+ u9A0T4oBgHgl3EQfL_8x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9231
+ B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf filter=lfs diff=lfs merge=lfs -text
9232
+ 3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf filter=lfs diff=lfs merge=lfs -text
9233
+ BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf filter=lfs diff=lfs merge=lfs -text
9234
+ gdE0T4oBgHgl3EQf6QJf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9235
+ _NFQT4oBgHgl3EQf7jbZ/content/2301.13443v1.pdf filter=lfs diff=lfs merge=lfs -text
9236
+ 5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf filter=lfs diff=lfs merge=lfs -text
9237
+ BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9238
+ 3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf filter=lfs diff=lfs merge=lfs -text
9239
+ TtE0T4oBgHgl3EQfVAA3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9240
+ CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf filter=lfs diff=lfs merge=lfs -text
9241
+ qNE0T4oBgHgl3EQfqwHK/content/2301.02558v1.pdf filter=lfs diff=lfs merge=lfs -text
9242
+ ztAyT4oBgHgl3EQfn_j3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9243
+ OtE4T4oBgHgl3EQfkA1f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9244
+ nNAzT4oBgHgl3EQfqP3M/content/2301.01627v1.pdf filter=lfs diff=lfs merge=lfs -text
9245
+ 39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9246
+ XNE0T4oBgHgl3EQfmQGx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9247
+ YtAyT4oBgHgl3EQf9fqe/content/2301.00876v1.pdf filter=lfs diff=lfs merge=lfs -text
9248
+ FdAyT4oBgHgl3EQfrPl7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9249
+ B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9250
+ 99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf filter=lfs diff=lfs merge=lfs -text
9251
+ 8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf filter=lfs diff=lfs merge=lfs -text
9252
+ qNE0T4oBgHgl3EQfqwHK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9253
+ 3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9254
+ OtE0T4oBgHgl3EQf0wJ3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9255
+ zdAyT4oBgHgl3EQfO_bl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9256
+ 5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf filter=lfs diff=lfs merge=lfs -text
9257
+ MtE4T4oBgHgl3EQf8w72/content/2301.05351v1.pdf filter=lfs diff=lfs merge=lfs -text
9258
+ xNFQT4oBgHgl3EQfADUm/content/2301.13221v1.pdf filter=lfs diff=lfs merge=lfs -text
9259
+ 2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9260
+ VtE3T4oBgHgl3EQf0gtr/content/2301.04738v1.pdf filter=lfs diff=lfs merge=lfs -text
9261
+ t9AyT4oBgHgl3EQfaPc2/content/2301.00237v1.pdf filter=lfs diff=lfs merge=lfs -text
9262
+ iNAzT4oBgHgl3EQfM_sr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9263
+ _dE1T4oBgHgl3EQfogTY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9264
+ 0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9265
+ 5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9266
+ gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf filter=lfs diff=lfs merge=lfs -text
9267
+ VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9268
+ etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
9269
+ ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf filter=lfs diff=lfs merge=lfs -text
0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0efb3f35d8b73e82bdec5193a7eb1d4809cfb236e15aaba49a1089970017839
3
+ size 6094893
0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512fc59ec5567758d031b4109f8a8aff0117bea332c47893f62270694226624c
3
+ size 2451766
0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a8b69bc21ce4f85915bce44d737f26bb22e93cb537a2de591ebb19de141640
3
+ size 10879021
0tFIT4oBgHgl3EQf3Cv8/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d247ba1b03845d8814ec23608eef3afddc75707834abf9685bf5432d2ae62688
3
+ size 151912
29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0399ee2a7214c7bb50c7c3cd8d27f67abd774801860d0aad8f7a8c186bdd60f
3
+ size 4194349
29FRT4oBgHgl3EQfnzcq/content/tmp_files/2301.13606v1.pdf.txt ADDED
@@ -0,0 +1,1307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Multi-video Moment Ranking with Multimodal Clue
2
+ Danyang Hou1,2, Liang Pang1, Yanyan Lan4, Huawei Shen1,3, Xueqi Cheng2,3
3
+ 1 Data Intelligence System Research Center, Institute of Computing Technology, CAS, Beijing, China
4
+ 2 CAS Key Lab of Network Data Science and Technology,
5
+ Institute of Computing Technology, CAS, Beijing, China
6
+ 3 University of Chinese Academy of Sciences, Beijing, China
7
+ 4 Institute for AI Industry Research, Tsinghua University, Beijing, China
8
+ Abstract
9
+ Video corpus moment retrieval (VCMR) is the task of re-
10
+ trieving a relevant video moment from a large corpus of
11
+ untrimmed videos via a natural language query. State-of-
12
+ the-art work for VCMR is based on two-stage method. In
13
+ this paper, we focus on improving two problems of two-stage
14
+ method: (1) Moment prediction bias: The predicted mo-
15
+ ments for most queries come from the top retrieved videos,
16
+ ignoring the possibility that the target moment is in the
17
+ bottom retrieved videos, which is caused by the incon-
18
+ sistency of Shared Normalization during training and in-
19
+ ference.
20
+ (2) Latent key content: Different modalities of
21
+ video have different key information for moment localiza-
22
+ tion. To this end, we propose a two-stage model MultI-video
23
+ raNking with mUlTimodal cluE (MINUTE). MINUTE uses
24
+ Shared Normalization during both training and inference
25
+ to rank candidate moments from multiple videos to solve
26
+ moment predict bias, making it more efficient to predict tar-
27
+ get moment. In addition, Mutilmdaol Clue Mining (MCM)
28
+ of MINUTE can discover key content of different modali-
29
+ ties in video to localize moment more accurately. MINUTE
30
+ outperforms the baselines on TVR and DiDeMo datasets,
31
+ achieving a new state-of-the-art of VCMR. Our code will be
32
+ available at GitHub.
33
+ 1. Introduction
34
+ The rise of video-sharing applications has led to a dra-
35
+ matic increase in the number of videos on the Internet.
36
+ Faced with such a huge video corpus, users need an accu-
37
+ rate retrieval tool to meet the needs of fine-grained cross-
38
+ modal information. We have the opportunity to address this
39
+ challenge thanks to the recently proposed video corpus mo-
40
+ ment retrieval (VCMR) [9, 16] task that requires retrieving
41
+ a video moment via a natural language query from a collec-
42
+ tion of untrimmed videos, where the moment is a temporal
43
+ 1
44
+ 2
45
+ 3
46
+ 4
47
+ 5
48
+ 6
49
+ 7
50
+ 8
51
+ 9
52
+ 10
53
+ Number of retrieved videos
54
+ Moment prediction accuracy
55
+ Moment prediction
56
+ Video retrieval
57
+ Video retrieval accuracy
58
+ Figure 1. Moment prediction bias: Video retrieval accuracy im-
59
+ proves as the number of retrieved videos increases, indicating that
60
+ the probability of predicting the correct moment also increases.
61
+ However, when the number of retrieved videos exceeds 2, moment
62
+ prediction accuracy hardly increases, which means that predicted
63
+ moments for most queries come from the top 2 videos.
64
+ segment of a video. VCMR consists of two sub-tasks: video
65
+ retrieval (VR) and single video moment retrieval (SVMR).
66
+ The goal of VR is to retrieve videos that may contain the
67
+ target moment via a natural language query. And SVMR
68
+ aims to use the query to localize the target moment in the
69
+ retrieved videos.
70
+ According to different strategies to learn two sub-tasks,
71
+ existing methods can be divided into one-stage method and
72
+ two-stage method. One-stage method [16,18,31,32] treats
73
+ VCMR as a multi-task learning problem, using a shared
74
+ backbone with two different heads to learn VR and SVMR.
75
+ Whereas two-stage method [15] leverages a pipeline of two
76
+ independent modules to learn the two sub-tasks. Specially,
77
+ it first trains a video retriever by query-video pairs to learn
78
+ VR, then takes advantage of Shared Normalization (Shared-
79
+ Norm) [7] technique to train localizer to learn SVMR,
80
+ where the negatives for Shared-Norm are from the training
81
+ data sampled by the trained retriever. In inference, it first
82
+ uses retriever to select the most relevant K videos from cor-
83
+ 1
84
+ arXiv:2301.13606v1 [cs.CV] 29 Jan 2023
85
+
86
+ 00:47:1200:49:42
87
+ House : or that these two
88
+ have green eyes?
89
+ 00:51:3100:55:14
90
+ Foreman : You're not saying...
91
+ They're not brother and sister.
92
+ 00:43:1800:46:40
93
+ House : Is it a coincidence
94
+ that your sister has great hair,
95
+ Query: House shows a picture of the patient to his team and they have concluded that maybe the two are not related by blood.
96
+ Figure 2. Latent key content: The images with a red border are
97
+ visual key content because these are relevant to “House shows a
98
+ picture of the patient to his team” in query. The highlighted subti-
99
+ tle is textual key content, for it relates to ”they have concluded that
100
+ maybe the two are not related by blood”.
101
+ pus, then uses localizer to localize the candidate moments in
102
+ the K videos. The final predicted moment depends on both
103
+ retrieval score and localization score. Two-stage method is
104
+ more suitable for VCMR because (1) Shared-Norm can en-
105
+ hance the possibility of the target moment appearing in the
106
+ correct video. (2) Two-stage method can select models with
107
+ different query-video interaction modes in the two mod-
108
+ ules. For example, it select late-fusion model as retriever
109
+ for fast video retrieval, and leverage early-fusion model as
110
+ localizer for accurate moment localization. State-of-the-art
111
+ model [15] for VCMR is also based on two-stage method.
112
+ However, two problems limit the performance of two-
113
+ stage method. The first is Moment prediction bias: as
114
+ shown in Fig. 1, the final predicted moments for most
115
+ queries are from the top-ranked videos among the K re-
116
+ trieved videos. This is counter-intuitive because the more
117
+ videos retrieved, the more likely those videos contain the
118
+ correct moment. This bias neglects the possibility that the
119
+ target moment is in the bottom-ranked videos. The reason
120
+ for this bias is that although two-stage method uses Shared-
121
+ Norm to normalize the probability of correct moment across
122
+ correct video and negative videos, it still only normalizes
123
+ the probability of the candidate moments in a single video
124
+ during inference. This inconsistency in training and infer-
125
+ ence results in the incomparable localization scores of can-
126
+ didate moments during inference. Since the final predicted
127
+ moment depends on both video retrieval score and moment
128
+ localization score, the incomparable localization scores will
129
+ make the final moment mainly depend on video retrieval
130
+ scores, resulting in the final predicted moment more tend-
131
+ ing to come from videos with higher rankings. The sec-
132
+ ond problem is Latent key content: the localizer of two-
133
+ stage method neglects key content from different modali-
134
+ ties for moment localization. Video is usually composed of
135
+ multimodal information, such as images (vision) and subti-
136
+ tles (text). As shown in Fig. 2, visual information and tex-
137
+ tual information have different emphases, if we can find out
138
+ the important visual information and textual information as
139
+ clues, it will help better moment localization.
140
+ In this paper, we propose MultI-video raNking with
141
+ mUlTimodal cluE (MINUTE) to improve the two prob-
142
+ lems of two-stage method. For the first problem, we keep
143
+ the consistence of Shared-Norm between training and in-
144
+ ference, which forces the localization scores of candidate
145
+ moments among multiple videos retrieved by retriever to be
146
+ comparable during inference. On this basis, we derive a
147
+ new scoring function to rank the candidate moments, which
148
+ can combine the scores of video retrieval and moment lo-
149
+ calization more effectively. For the second problem, we
150
+ propose an early-fusion localizer with a Multimodal Clue
151
+ Mining (MCM) component which can discover key content
152
+ from different modalities to help moment localization. Spe-
153
+ cially, MCM first uses query to measure the importance of
154
+ all images and subtitles in the video, then assigns weights
155
+ to these elements according to their importance. The ele-
156
+ ments with high importance can be seen as key clues to im-
157
+ prove moment localization. Then we feed weighted video
158
+ representation together with query representation to a mul-
159
+ timodal Transformer that captures deeper interactions be-
160
+ tween video and query to predict moments.
161
+ We conduct extensive experiments on TVR and DiDeMo
162
+ datasets. The experimental results show that our proposed
163
+ MINUTE outperforms other baselines, achieving a new
164
+ state-of-the-art result. Ablation experiments verify that our
165
+ method improves the two problems of two-stage method.
166
+ 2. Related Work
167
+ We first briefly introduce works related to two sub-tasks
168
+ of VCMR. After that, we introduce recent works for VCMR
169
+ in detail.
170
+ Text-video retrieval is a cross-modal retrieval task whose
171
+ goal is to retrieve relevant videos from a corpus through
172
+ a natural language query.
173
+ This task is similar to VR of
174
+ VCMR task, but most content of the video in the former
175
+ is relevant to the query, while only a small part of the con-
176
+ tent of the video in the latter is relevant to the query. The
177
+ works for text-video retrieval can be divided into two cat-
178
+ egories depending on the interaction mode between query
179
+ and video, e.g., late fusion and early fusion. Late-fusion
180
+ methods [8,21,27] use two separated encoders to embed im-
181
+ ages and videos into a shared semantic space. These models
182
+ can be very efficient if we calculate and index each modal
183
+ representation offline, for only the similarity between video
184
+ and query should be applied in inference.
185
+ Early-fusion
186
+ methods [6,12,25] make fine-grained interactions between
187
+ video and query with an attention mechanism [2,24] to im-
188
+ prove retrieval accuracy.
189
+ Temporal language grounding is a task similar to SVMR,
190
+ which requires localizing a moment from a video given a
191
+ natural language query. Temporal language grounding can
192
+ be seen as a special case of VCMR, with only one video
193
+ in the corpus for each query. According to the way of pre-
194
+ dicting moment, the existing works for temporal language
195
+ grounding can be divided into proposal-based and proposal-
196
+ 2
197
+
198
+ free. Proposal-based method [3,5,13,19,26,34] first gener-
199
+ ates several proposals as candidates and then ranks the pro-
200
+ posals according to their matching degree with the query,
201
+ and the proposal with the highest matching degree is re-
202
+ garded as the answer. Unlike the proposal-based method,
203
+ proposal-free method [4,17,29,30,33] directly predicts the
204
+ start and end times of the moment without pre-extracting
205
+ proposals as candidates.
206
+ Video corpus moment retrieval is first proposed by [9],
207
+ then [16] propose a new dataset TVR for VCMR who ex-
208
+ tends the uni-modal video (image) in the previous dataset
209
+ video to multiple modalities (image and subtitle). The ex-
210
+ isting works for VCMR can be divided into two categories
211
+ depending on how they learn the two sub-tasks, e,g., one-
212
+ stage [16, 18, 31, 32] method and two-stage method [15].
213
+ The one-stage method treats VCMR as a multi-task learn-
214
+ ing problem , using a shared model with two different heads
215
+ to learn VR and SVMR simultaneously. XML [16] is the
216
+ first one-stage method for VCMR who uses a late-fusion
217
+ model to encode video and query separately and then uses
218
+ two different heads to learn the two tasks. ReLoCLNet [32]
219
+ leverage contrastive learning to enhance the performance
220
+ of XML. [18] also follows XML and proposes a video-
221
+ language pre-train model HERO, which significantly im-
222
+ proves the performance. HAMMER [31] is an early-fusion
223
+ one-stage model that uses attention to make deep inter-
224
+ actions between query and video for more accurate mo-
225
+ ment retrieval. Two-stage method leverages two different
226
+ modules to learn two sub-tasks.
227
+ CONQUER [15] is the
228
+ only two-stage method that uses video retrieval heads of
229
+ HERO [18] as the retriever and proposes a model based on
230
+ context-query attention (CQA) [28] as the localizer. CON-
231
+ QUER achieves state-of-the-art results on VCMR. In train-
232
+ ing, CONQUER uses Shared-Norm [7] technique to train
233
+ localizer. In inference, CONQUER first uses a video re-
234
+ triever to retrieve top-K videos, then uses a moment local-
235
+ izer to localize the moment in the retrieved videos. Two-
236
+ stage method is more suitable for VCMR, but it suffers from
237
+ moment prediction bias and latent key content. In this
238
+ paper, we focus on improving the two problems.
239
+ 3. Background
240
+ We first formulate VCMR, then describe two-stage
241
+ method, followed by analyzing moment prediction bias.
242
+ 3.1. Task Formulation
243
+ We denote a corpus of videos V = {v1, v2, ..., v|V|}
244
+ where |V| is the number of videos in corpus and vi =
245
+ {f 1
246
+ i , f 2
247
+ i , ..., f |vi|
248
+ i
249
+ } the i-th video which contains |vi| frames.
250
+ Each frame f j
251
+ i consists of an image and a subtitle (Ij
252
+ i , sj
253
+ i).
254
+ Note that if it contains no subtitle, sj
255
+ i is set to empty.
256
+ Given a natural language query q = {w1, w2, ..., w|w|}
257
+ which consists of a sequence of words, the goal of VCMR
258
+ is to retrieve most relevant moment m∗ from V. The target
259
+ moment m∗ is a temporal segment (τ∗,st, τ∗,ed) in video v∗,
260
+ where v∗ denotes the video that contains the target moment
261
+ whose start and end timestamps are τ∗,st and τ∗,ed respec-
262
+ tively.
263
+ The goal of VCMR can be seen as maximizing the prob-
264
+ ability of target moment m∗ given the query q and the video
265
+ corpus V:
266
+ m∗ = argmax
267
+ m
268
+ P(m|q, V).
269
+ (1)
270
+ According to the chain rule of conditional probability:
271
+ P(m∗|q, V) = P(m∗|v∗, q) · P(v∗|q, V),
272
+ (2)
273
+ where P(v∗|q, V) and P(m∗|v∗, q) are the probabilities of
274
+ retrieving a video v∗ from corpus V and localizing the target
275
+ moment m∗ in the retrieved video respectively. The proba-
276
+ bility of target moment depends on the probabilities of start
277
+ and end timestamps:
278
+ P(m∗|v∗, q) = Pst(τ∗,st|v∗, q) · Ped(τ∗,ed|v∗, q).
279
+ (3)
280
+ 3.2. Two-stage Method
281
+ Two-stage method uses a video retriever to model
282
+ P(v∗|q, V) and a moment localizer to model P(m∗|v∗, q).
283
+ In training, two-stage method use margin-based loss [10]
284
+ to train video retriever, then use Shared-Norm to train mo-
285
+ ment localizer. Specially, for a query, there is a positive
286
+ video v+ whose moment (τ+,j, τ+,k) is ground truth and n
287
+ negative videos {v−
288
+ 1 , v−
289
+ 2 , . . . , v−
290
+ n } that do not contain target
291
+ moment. Shared-Norm is leveraged to normalize the prob-
292
+ abilities of τ∗,j as start time and τ∗,k as end time across all
293
+ frames in positive video and negatives, such as:
294
+ Pst(τ+,j|v+, q) =
295
+ exp(lst
296
+ +,j)
297
+ n+1
298
+
299
+ a=1
300
+ |vb|
301
+
302
+ b=1
303
+ exp(lst
304
+ a,b)
305
+ ,
306
+ (4)
307
+ where lst
308
+ a,b is the the logits that b-th frame in video va is start
309
+ timestamp of ground truth moment, and |vb| is the number
310
+ of frame in a video. Training with Shared-Norm enhances
311
+ the possibility of the target moment existing in the correct
312
+ video.
313
+ In inference, the retriever first uses the query to retrieve
314
+ top-K videos from the corpus, then the localizer localizes
315
+ the target moment in the retrieved videos. The score of the
316
+ final predicted moment (τi,j, τi,k) in video i with start time
317
+ j and end time k depends on both retrieval score and local-
318
+ ization score, the scoring function is:
319
+ Si,jk = exp(α · SR
320
+ i ) · SL
321
+ i,jk,
322
+ (5)
323
+ where Si,jk is the final score of the predicted moment, SR
324
+ i is
325
+ the retrieval score of video vi , and SL
326
+ i,jk is the localization
327
+ 3
328
+
329
+ score of a moment in a video, and α is a hyper-parameter to
330
+ encourage the target moment from top retrieved videos. The
331
+ retrieval score is computed by cosine similarity between
332
+ query representation and video representation. And the lo-
333
+ calization score is computed by the probability of a moment
334
+ in a single video:
335
+ SL
336
+ i,jk = Pst(τi,j|vi, q) · Ped(τi,k|vi, q),
337
+ (6)
338
+ where Pst(τi,j|vi, q) or Ped(τi,k|vi, q) is normalized across
339
+ a single video :
340
+ Pst(τi,j|vi, q) =
341
+ exp(lst
342
+ i,j)
343
+ |vi|
344
+
345
+ b=1
346
+ exp(lst
347
+ i,b)
348
+ ,
349
+ (7)
350
+ 3.3. Moment Prediction Bias
351
+ As shown in Fig. 1, the final predicted moments of
352
+ two-stage method for most queries come from top-ranked
353
+ videos.
354
+ This bias limits the performance of two-stage
355
+ method on VCMR, because it neglects the possibility of the
356
+ target moment existing in the bottom-ranked videos. We
357
+ conjecture that this bias mainly comes from the inconsis-
358
+ tency of normalization during training and inference, shown
359
+ in Eq. (4) and Eq. (7).
360
+ In training, it uses Shared-Norm to highlight the signif-
361
+ icance of the correct moment being in the correct video.
362
+ Nevertheless, in inference, this probability is based on ev-
363
+ ery single video, resulting in the predicted candidate mo-
364
+ ments from different videos being incomparable, so the sig-
365
+ nificance no longer exists. Therefore, the score of the final
366
+ predicted moment in Eq. (5) is more dependent on video
367
+ retrieval score, making the final predicted moment more
368
+ likely to be from the top-ranked videos.
369
+ 4. Method
370
+ We first illustrate how we improve moment prediction
371
+ bias. Then we introduce the proposed model MINUTE, we
372
+ emphasize multimodal clue mining component. Finally, we
373
+ describe the training of MINUTE.
374
+ 4.1. Multi-video Moment Ranking in Prediction
375
+ We propose to adopt Shared-Norm in inference, so that
376
+ the localization scores of candidate moments from multiple
377
+ videos are comparable, which can enhance the influence of
378
+ moment localization score SL
379
+ i,jk on the final score Si,jk to
380
+ improve moment prediction bias. Furthermore, we derive
381
+ a new scoring function from Eq. (2) to combine the video
382
+ retrieval and moment localization scores more effectively.
383
+ Specially, to compute P(v∗|q, V), we obtain video repre-
384
+ sentation vi = {f 1
385
+ i , f 2
386
+ i , ..., f |vi|
387
+ i
388
+ } and query representation
389
+ q. In the following paper, we use bold notations to denote
390
+ vectors. The j-th frame representation f j
391
+ i consists of image
392
+ representation and subtitle representation (Ij
393
+ i , sj
394
+ i). Query
395
+ also has two representations (qI, qs) to compute similarity
396
+ scores for images and subtitles respectively. The query and
397
+ video representations details are in Sec. 4.2.1.
398
+ Because only part of the content in the video is related to
399
+ the query, the similarity score between the query and video
400
+ SR
401
+ i
402
+ is the average of max-pooling of query-image scores
403
+ and max-pooling of query-subtitle scores. We use the inner
404
+ product as the similarity score sim():
405
+ sim(qc, cj
406
+ i) = qcT · cj
407
+ i, c ∈ {I, s},
408
+ φc =
409
+ max
410
+ 1≤j≤|vi| sim(qc, cj
411
+ i),
412
+ SR
413
+ i = φI + φs
414
+ 2
415
+ .
416
+ (8)
417
+ The probability P(v∗|q, V) is computed by softmax nor-
418
+ malized score across all query-video scores in corpus:
419
+ P(v∗|q, V) =
420
+ exp(SR
421
+ ∗ )
422
+ �|V|
423
+ j=1 exp(SR
424
+ j )
425
+ .
426
+ (9)
427
+ Computing the inner product between query and all videos
428
+ in the corpus is computationally intensive, so we em-
429
+ ploy Max Inner Product Search (MIPS) [22] to find top-K
430
+ videos to approximate the probability. The calculation of
431
+ P(v∗|q, V) in Eq. (9) can be approximated by P(v∗|q, V∗):
432
+ P(v∗|q, V) ≈ P(v∗|q, V∗) =
433
+ exp(SR
434
+ ∗ )
435
+ �K
436
+ j=1 exp(SR
437
+ j )
438
+ .
439
+ (10)
440
+ The probabilities of the rest videos in the corpus are con-
441
+ sidered close to 0. The training of the retriever is to maxi-
442
+ mize the log-likelihood of probability logP(v∗|q, V), which
443
+ is different from the previous two-stage method who use
444
+ margin-based loss.
445
+ As for P(m∗|v∗, q), we use Shared-Norm in inference,
446
+ which is consistent with that in training to improve moment
447
+ prediction bias:
448
+ P(m∗|v∗, q) ≈ P(m∗|V∗, q) =
449
+ exp(lst
450
+ ∗,j)
451
+ K
452
+
453
+ a=1
454
+ |vi|
455
+
456
+ b=1
457
+ exp(lst
458
+ a,b)
459
+ ·
460
+ exp(led
461
+ ∗,k)
462
+ K
463
+
464
+ a=1
465
+ |vi|
466
+
467
+ b=1
468
+ exp(led
469
+ a,b)
470
+ .
471
+ (11)
472
+ A well-trained localizer should suppress the probability that
473
+ the target moment appears in the wrong videos to close to
474
+ zero, so P(m∗|V∗, q) approximately equals to P(m∗|v∗, q).
475
+ The details of logits lst
476
+ ∗,j are introduced in Sec. 4.2.2.
477
+ Combine Eq. (2), Eq. (10) and Eq. (11), the probability
478
+ P(m∗|v∗, q) can be computed by:
479
+ P(m∗|v∗, q) ≈
480
+ exp(SR
481
+ ∗ )
482
+ �K
483
+ j=1 exp(SR
484
+ j )
485
+ exp(lst
486
+ ∗,j)
487
+ K
488
+
489
+ a=1
490
+ |vi|
491
+
492
+ b=1
493
+ exp(lst
494
+ a,b)
495
+ exp(led
496
+ ∗,k)
497
+ K
498
+
499
+ a=1
500
+ |vi|
501
+
502
+ b=1
503
+ exp(led
504
+ a,b)
505
+ , (12)
506
+ 4
507
+
508
+ where the denominator is the same for all candidate mo-
509
+ ments from K videos, so we can simplify this probability to
510
+ a new scoring function:
511
+ S∗ = SR
512
+ ∗ + lst
513
+ ∗,j + led
514
+ ∗,k,
515
+ (13)
516
+ where lst
517
+ ∗,j + led
518
+ ∗,k = SL
519
+ ∗,ij represents moment localization
520
+ score. This scoring function is simpler than Eq. (5) and
521
+ without hyper-parameter α which may greatly increase the
522
+ weight of the top-ranked video retrieval score.
523
+ In inference, we use scoring function in Eq. (13) to rank
524
+ all moments in multiple retrieved videos.
525
+ 4.2. Model
526
+ We propose a two-stage MINUTE model consisting of a
527
+ late-fusion video retriever and an early-fusion moment lo-
528
+ calizer.
529
+ 4.2.1
530
+ Video Retriever
531
+ The goal of video retriever is to select a small subset V∗
532
+ from the corpus V given the query q, where videos in the
533
+ subset may contain the target moment. The retriever of the
534
+ proposed model is a late-fusion model that contains two en-
535
+ coders, a query encoder and a video encoder, as shown in
536
+ Fig. 3. The late-fusion architecture ensures retrieval effi-
537
+ ciency if we index the representations of videos in advance.
538
+ Video Encoder The video encoder encodes frames in the
539
+ i-th video to frame representations vi = {f 1
540
+ i , ..., f |vi|
541
+ i
542
+ },
543
+ where the j-th frame f j
544
+ i contains image representation Ij
545
+ i
546
+ and subtitle representation sj
547
+ i. We first use RoBERTa [20]
548
+ to extract sentence features of subtitle and use Slow-
549
+ Fast [11] and ResNet [14] to extract image features. Then
550
+ we feed subtitle features and image features to a one-
551
+ layer multi-modal Transformer that simultaneously cap-
552
+ tures intra-modal and inter-modal dependencies to output
553
+ each image representation Ij
554
+ i and subtitle representation sj
555
+ i.
556
+ Query Encoder The query encoder convert query q =
557
+ {w1, w2, ..., w|q|} to query representation q. We first use
558
+ RoBERTa to extract the feature wj of each word in the
559
+ query. A one-layer Transformer is used to capture the con-
560
+ textual representation of each word. We generate two query
561
+ representations for query-image similarity score and query-
562
+ subtitle similarity score, denoted as qI and qs. We adopt
563
+ a modular pooling mechanism [16] to convert the sequence
564
+ representations to the two vectors:
565
+ oi = Wcwi, αi =
566
+ exp(oi)
567
+ |q|
568
+
569
+ j=1
570
+ exp(oj)
571
+ , qc =
572
+ |q|
573
+
574
+ i=1
575
+ αiwi,
576
+ (14)
577
+ where Wc is learnable parameters, c ∈ {I, s}. The modular
578
+ mechanism can be regarded as a learnable pooling and is
579
+ also used in previous works [16,18,32].
580
+ Query: Foreman tells Enid
581
+ why he had to sedate the
582
+ patient.
583
+ Transformer
584
+ Corpus
585
+ Enid : Did he need a
586
+ sedative? I did.
587
+ 00:48:4500:52:12
588
+ subtitles
589
+ images
590
+ SlowFast
591
+ ResNet
592
+ RoBERTa
593
+ PE
594
+ Multimodal Transformer
595
+ RoBERTa
596
+ ME
597
+ ME
598
+ PE
599
+ Query Encoder
600
+ Video Encoder
601
+ Video
602
+ 𝑰1
603
+ 𝑰2
604
+ 𝑰|𝑣|
605
+ 𝒔1
606
+ 𝒔2
607
+ 𝒔|𝑣|
608
+ Modular
609
+ Pooling
610
+ 𝒒𝐼
611
+ 𝒒𝑠
612
+ Figure 3. Video retriever consists of two encoders, video encoder
613
+ and query encoder. ’ME’ and ’PE’ represent modality embedding
614
+ and positional embedding, respectively.
615
+ We also use the retrieval head of HERO [18] as retriever
616
+ for a fair comparison with CONQUER [15]. The original
617
+ HERO uses margin-based loss [10] to train video retrieval
618
+ whose retrieval score only represents cosine similarity be-
619
+ tween query and videos, so we re-train HERO in the same
620
+ way as training the proposed retriever to model the proba-
621
+ bility P(v∗|q, V) in Eq. (10). We use simple retriever to
622
+ denote the proposed retriever and HERO retriever to de-
623
+ note the retriever based on HERO.
624
+ 4.2.2
625
+ Moment Localizer
626
+ Moment localizer shown in Fig. 4 uses the query to localize
627
+ the target moment m∗ in the top-K retrieved videos V∗. The
628
+ proposed localizer is based on early-fusion architecture to
629
+ explore deeper interactions between query and video. Be-
630
+ cause the retrieved videos are narrowed down to a small
631
+ range, the amount of computations is acceptable.
632
+ The localizer first uses query encoder to get token rep-
633
+ resentations { ¯
634
+ w1, ..., ¯
635
+ w|q|} and video encoder to get video
636
+ representation ¯vi = { ¯
637
+ f 1
638
+ i , ..., ¯
639
+ f |vi|
640
+ i
641
+ }, where ¯
642
+ f j
643
+ i contain an
644
+ image representation and a subtitle representation (¯Ij
645
+ i , ¯sj
646
+ i).
647
+ Video encoder and query encoder in localizer are same with
648
+ those in retriever but do not share parameters.
649
+ Our proposed localizer consists of two components:
650
+ 5
651
+
652
+ query
653
+ Transformer
654
+ Modular
655
+ Pooling
656
+ Video
657
+ Encoder
658
+ Multimodal Clue Mining
659
+ Multimodal Transformer
660
+ FC
661
+ Query encoder
662
+ ഥ𝒒𝐼
663
+ ഥ𝒒𝑠
664
+ ത𝑰
665
+ ത𝒔
666
+ ෠𝑰
667
+ ො𝒔
668
+ ෠𝒇
669
+ ഥ𝒘
670
+ video
671
+ 1D
672
+ Conv
673
+ 1D
674
+ Conv
675
+ 𝑙𝑠𝑡
676
+ 𝑙𝑒𝑑
677
+ ഥ𝒘
678
+ Figure 4. Moment localizer contains two components, multimodal
679
+ clue mining and multimodal Transformer. For brevity, we omit the
680
+ subscripts of the representations.
681
+ multimodal clue mining and multi-modal Transformer.
682
+ Multimodal Clue Mining (MCM) solves late key content
683
+ problem by discovering important content from multiple
684
+ modalities of video to help moment localization.
685
+ MCM
686
+ first uses query to measure the importance of each image
687
+ and subtitle in video, then assigns weights to these elements
688
+ from different modalities according to importance.
689
+ Specially, we leverage modular pooling to obtain query
690
+ representations ¯qI and ¯qs to measure image importance and
691
+ subtitle importance respectively. The importance is com-
692
+ puted by:
693
+ pj
694
+ c = ( ¯
695
+ Wc¯cj) ⊙ ¯qc, c ∈ {I, s},
696
+ (15)
697
+ where ¯
698
+ Wc is learnable parameters, and pj
699
+ c is the importance
700
+ of j-th image or subtitle. Then we use the importance to
701
+ weight the image and subtitle representations:
702
+ ˆcj = norm(pj
703
+ c) ⊙ ¯cj, c ∈ {I, s},
704
+ (16)
705
+ where ˆcj is weighted image representation or subtitle rep-
706
+ resentation and norm is L2-normalization which makes the
707
+ model converge better.
708
+ MCM can be seen as an amplifier that allows localizer to
709
+ focus on important content which we call clues from multi-
710
+ ple modalities.
711
+ We fuse the weighted representations ˆIj and ˆsj in a
712
+ frame by a fully-connect layer:
713
+ ˆ
714
+ f j = FC([ˆIj; ˆsj]),
715
+ (17)
716
+ where [; ] is concatenation and ¯f j is the fused represen-
717
+ tation the j-th frame.
718
+ The fused video representation is
719
+ ˆvi = { ˆ
720
+ f 1
721
+ i , ..., ˆ
722
+ f |vi|
723
+ i
724
+ } and are fed to a multimodal Trans-
725
+ former together with query token representations.
726
+ Multimodal Transformer (MMT) We use a three-layer
727
+ multi-modal Transformer to make deep interactions be-
728
+ tween fused video representation and token representations.
729
+ In addition, two 1D-convolution layers are leveraged to cap-
730
+ ture dependencies between adjacent frames and output log-
731
+ its lst
732
+ i,j, led
733
+ i,k of the start and end times of the target moment.
734
+ 4.3. Training
735
+ We first train retriever by text-video pairs, then use the
736
+ trained retriever to sample negative videos as hard negatives
737
+ to train localizer.
738
+ Training retriever To maximize the log-likelihood of prob-
739
+ ability logP(v∗|q, V) in Eq. (9), we adopt InfoNCE [23]
740
+ loss with in-batch negative sampling to train retriever. Spe-
741
+ cially, let d = {(v1, q1), ..., (vb, qb)} denote training data in
742
+ a batch, where b is batch size. Each pair (vi, qi) in d has
743
+ b − 1 negative samples for query-to-video loss or video-to-
744
+ query loss, such (vz, qi)z̸=i and (vi, qz)z̸=i:
745
+ Lv = −log
746
+ exp(SR
747
+ i,i)
748
+ b�
749
+ z=1
750
+ exp(SR
751
+ z,i)
752
+ , Lq = −log
753
+ exp(SR
754
+ i,i)
755
+ b�
756
+ z=1
757
+ exp(SR
758
+ i,z)
759
+ ,
760
+ (18)
761
+ where Lv and Lq are query-to-video loss and video-to-
762
+ query loss, respectively. We use the sum of the two losses
763
+ to train retriever.
764
+ Training localizer We use the well-trained retriever to re-
765
+ trieve top-ranked videos from training data and sample n
766
+ videos as hard negatives to train the localizer with Shared-
767
+ Norm technique.
768
+ Lst = −log
769
+ exp(lst
770
+ +,j)
771
+ n+1
772
+
773
+ a=1
774
+ |vb|
775
+
776
+ b=1
777
+ exp(lst
778
+ a,b)
779
+ , Led = −log
780
+ exp(led
781
+ +,k)
782
+ n+1
783
+
784
+ a=1
785
+ |vb|
786
+
787
+ b=1
788
+ exp(led
789
+ a,b)
790
+ ,
791
+ (19)
792
+ The sum of Lst and Led are used to train localizer.
793
+ 5. Experiment
794
+ We first introduce datasets and metrics. Then we de-
795
+ scribe implementation details. After that, we introduce ex-
796
+ 6
797
+
798
+ Table 1. Comparisons of VCMR results(IoU=0.7) with baselines
799
+ on TVR validation set and testing set.’SR’ denotes simple re-
800
+ triever, and ’HR’ denotes HERO retriever.
801
+ Model
802
+ Validation
803
+ Testing
804
+ R1
805
+ R10
806
+ R100
807
+ R1
808
+ R10
809
+ R100
810
+ XML
811
+ 2.62
812
+ 9.05
813
+ 22.47
814
+ 3.32
815
+ 13.41
816
+ 30.52
817
+ ReLoCLNet
818
+ 4.15
819
+ 14.06
820
+ 32.42
821
+ -
822
+ -
823
+ -
824
+ HAMMER
825
+ 5.13
826
+ 11.38
827
+ 16.71
828
+ -
829
+ -
830
+ -
831
+ HERO
832
+ 5.13
833
+ 16.26
834
+ 24.55
835
+ 6.21
836
+ 19.34
837
+ 36.66
838
+ CONQUER
839
+ 7.76
840
+ 22.49
841
+ 35.17
842
+ 9.24
843
+ 28.67
844
+ 41.98
845
+ MINUTE(SR)
846
+ 8.17
847
+ 23.38
848
+ 37.93
849
+ 9.59
850
+ 28.96
851
+ 45.23
852
+ MINUTE(HR)
853
+ 10.70
854
+ 29.37
855
+ 45.09
856
+ 12.60
857
+ 33.72
858
+ 50.23
859
+ Table 2.
860
+ Comparisons of VCMR results with baselines on
861
+ DiDeMo testing set.
862
+ Model
863
+ IoU=0.5
864
+ IoU=0.7
865
+ R1
866
+ R5
867
+ R10
868
+ R1
869
+ R5
870
+ R10
871
+ XML
872
+ 2.36
873
+ -
874
+ 10.42
875
+ 1.59
876
+ -
877
+ 6.77
878
+ HERO
879
+ 3.37
880
+ 8.97
881
+ 13.26
882
+ 2.76
883
+ 7.73
884
+ 11.78
885
+ CONQUER
886
+ 3.31
887
+ 9.27
888
+ 13.99
889
+ 2.79
890
+ 8.04
891
+ 11.90
892
+ MINUTE(HR)
893
+ 3.44
894
+ 9.62
895
+ 14.62
896
+ 2.81
897
+ 7.89
898
+ 12.03
899
+ perimental results comparison with baselines. Then we il-
900
+ lustrate ablation studies of the proposed model. Finally, we
901
+ present the case study.
902
+ 5.1. Datasets
903
+ TVR [16] is built on TV Shows whose videos consist of
904
+ images and subtitles. TVR contains 17435, 2179, and 1089
905
+ videos on the training, validation, and testing sets. The av-
906
+ erage length of the videos is 76.2 seconds, while the average
907
+ length of the moments is 9.1 secs.
908
+ DiDeMo [1] is a dataset whose videos are from the real
909
+ world, with only images and no subtitles in the video.
910
+ DiDeMo contains 8395, 1065, and 1004 training, valida-
911
+ tion, and testing videos, respectively. The average duration
912
+ of videos and moments is 54 secs and 6.5 secs, respectively.
913
+ 5.2. Evaluation Metrics
914
+ We follow the metrics in [16] as evaluation metrics of ex-
915
+ periments. For VCMR task, the evaluation metric is R@K,
916
+ IoU=p that represents the percentage that at least one pre-
917
+ dicted moments whose Intersection over Union(IoU) with
918
+ the ground truth exceed p in the top-K retrieved moments.
919
+ The two sub-tasks are also evaluated. The metric of SVMR
920
+ task is the same as that of VR task, but the evaluation is
921
+ conducted in only ground truth video for each query. As for
922
+ VR task, the metric is R@K which denotes the percentage
923
+ that correct video is in the top-K ranked videos.
924
+ 5.3. Implementation Details
925
+ Training We train simple retriever for 100 epochs with the
926
+ batch size 256. As for localizer, we sample 4 and 2 negative
927
+ Table 3. Comparisons of VR results with baselines on TVR vali-
928
+ dation set.
929
+ Model
930
+ R@1
931
+ R@5
932
+ R@10
933
+ R@100
934
+ XML
935
+ 16.54
936
+ 38.11
937
+ 50.41
938
+ 88.22
939
+ ReLoCLNet
940
+ 22.13
941
+ 45.85
942
+ 57.25
943
+ 90.21
944
+ HERO
945
+ 29.01
946
+ 52.82
947
+ 63.07
948
+ 89.91
949
+ SR
950
+ 23.12
951
+ 46.86
952
+ 57.83
953
+ 90.22
954
+ HR
955
+ 32.88
956
+ 55.62
957
+ 65.35
958
+ 91.26
959
+ Table 4. Comparisons of SVMR results with baselines on TVR
960
+ Validation set.
961
+ Model
962
+ IoU=0.5
963
+ IoU=0.7
964
+ R1
965
+ R10
966
+ R100
967
+ R1
968
+ R10
969
+ R100
970
+ XML
971
+ 31.43
972
+ -
973
+ -
974
+ 13.89
975
+ -
976
+ -
977
+ ReLoCLNet
978
+ 31.88
979
+ -
980
+ -
981
+ 15.04
982
+ -
983
+ -
984
+ HERO
985
+ 32.22
986
+ 60.08
987
+ 80.66
988
+ 15.30
989
+ 40.84
990
+ 63.45
991
+ CONQUER
992
+ 43.63
993
+ -
994
+ -
995
+ 22.84
996
+ -
997
+ -
998
+ MINUTE(SR)
999
+ 44.49
1000
+ 78.62
1001
+ 93.57
1002
+ 23.98
1003
+ 61.30
1004
+ 80.13
1005
+ MINUTE(HR)
1006
+ 44.74
1007
+ 78.90
1008
+ 93.80
1009
+ 24.08
1010
+ 62.10
1011
+ 80.45
1012
+ videos for each query from top-100 ranked videos on TVR
1013
+ and DiDeMo respectively, and train it for 10 epochs with the
1014
+ batch size 32. Both simple retriever and localizer are trained
1015
+ by AdamW with the learning rate 0.0001 and the weight
1016
+ decay of 0.01 in a single 3090 GPU. For HERO retriever,
1017
+ we retrain it with InfoNCE loss in 8 3090 GPUs with the
1018
+ same setting as the original HERO [18].
1019
+ Inference The localizer localizes the target moment in the
1020
+ top-10 retrieved videos. The length of predicted moments is
1021
+ limited to [1, 24] and [1, 7] for TVR and DeDiMo, respec-
1022
+ tively. We use non-maximum suppression(NMS) with the
1023
+ IoU 0.7 to post-process the predicted moments.
1024
+ 5.4. Comparison with Baselines
1025
+ We compare the proposed model with baselines on
1026
+ VCMR task including four one-stage models XML [16],
1027
+ ReLoCLNet [32], HAMMER [31], HERO [18] and a two-
1028
+ stage model CONQUER [15].
1029
+ TVR As shown in Tab. 1, the proposed models outperform
1030
+ all baseline methods.
1031
+ Compared with the best previous
1032
+ method CONQUER who also uses HERO to address the
1033
+ VR task, our proposed model with HERO retriever achieves
1034
+ 36% improvement at R@1 on the testing set. We also re-
1035
+ port the results on two sub-task in Tab. 3 and Tab. 4. For
1036
+ VR, HERO retriever trained by InfoNCE loss has better re-
1037
+ trieval accuracy than the original HERO. For SVMR, our
1038
+ proposed models also achieve the best results. It is worth
1039
+ noting that the proposed model with simple retriever out-
1040
+ performs CONQUER on VCMR even though the perfor-
1041
+ mance of VR(R@1 23.12) is much worse than that in CON-
1042
+ QUER(R@1 29.01). This is because moment prediction
1043
+ bias limits the performance of CONQUER.
1044
+ DiDeMo We report the VCMR results on DiDeMo testing
1045
+ 7
1046
+
1047
+ 1
1048
+ 2
1049
+ 3
1050
+ 4
1051
+ 5
1052
+ 6
1053
+ 7
1054
+ 8
1055
+ 9
1056
+ 10
1057
+ Number of retrieved videos
1058
+ 7
1059
+ 8
1060
+ 9
1061
+ 10
1062
+ R@1, IOU=0.7
1063
+ MINUTE(HR)
1064
+ CONQUER
1065
+ CONQUER*
1066
+ Figure 5. The performances of VCMR of our model and COU-
1067
+ QUER under different numbers of the retrieved videos, where
1068
+ ’CONQUER*’ denotes CONQUER with our retriever and scoring
1069
+ function.
1070
+ Table 5. Performances of VCMR and SVMR (R@1, IOU=0.5,0.7)
1071
+ when remove two componets in localizer. MCM denotes multi-
1072
+ modal clue mining, and MMT represents multimodal Transformer.
1073
+ Model
1074
+ VCMR
1075
+ SVMR
1076
+ 0.5
1077
+ 0.7
1078
+ 0.5
1079
+ 0.7
1080
+ MINUTE(HR)
1081
+ 19.22
1082
+ 10.70
1083
+ 44.74
1084
+ 24.08
1085
+ w/o MCM
1086
+ 18.21
1087
+ 10.17
1088
+ 43.41
1089
+ 23.46
1090
+ w/o MMT
1091
+ 16.71
1092
+ 8.66
1093
+ 40.5
1094
+ 20.97
1095
+ set in Tab. 2. The performance of the proposed model is still
1096
+ better than others. All the methods perform worse than the
1097
+ results on TVR because the DiDeMo dataset is designed for
1098
+ temporal language grounding, so the difficulty of retrieving
1099
+ video is not considered. The query of DiDeMo is not as spe-
1100
+ cific as that of TVR, such as ”a girl is playing ball”, making
1101
+ it hard to retrieve the correct video.
1102
+ 5.5. Moment Prediction Bias
1103
+ As shown in Fig. 5, when the number of retrieved videos
1104
+ increases, the performance of our model improves, but the
1105
+ CONQUER does not change much, which indicates that
1106
+ moment prediction bias limits its performance. This bias
1107
+ is from the inconsistency of Shared-Norm in training and
1108
+ inference.
1109
+ Our prediction based on the scoring function
1110
+ in Eq. (13) addresses this prediction bias by ranking mo-
1111
+ ments in multiple retrieved videos in inference. When we
1112
+ replace CONQUER’s retriever and scoring function with
1113
+ ours, CONQUER* in Fig. 5 can also improve moment pre-
1114
+ diction bias, showing the proposed model’s effectiveness.
1115
+ 5.6. Multimodal Clue Mining
1116
+ We perform ablation studies on the effectiveness of two
1117
+ components of localizer in Tab. 5. When removing MCM,
1118
+ the accuracy drops, which shows that discovering key con-
1119
+ tent from images and subtitles as clue is helpful for moment
1120
+ localization. When we only use MCM, the accuracy drops
1121
+ a lot, indicating that using clues is not enough, fine-grained
1122
+ cross-modal interactions are also needed.
1123
+ 37.5s
1124
+ MINUTE
1125
+ 38.01s
1126
+ 40.91s
1127
+ 24.00s
1128
+ 27.00s
1129
+ Ground Truth
1130
+ CONQUER
1131
+ 43.50s
1132
+ Query: Amy and Bernadette spin around on their bar seats to face the other way.
1133
+ 00:35:0100:37:39
1134
+ Bandleader : Mr. and
1135
+ Mrs. Chandler Bing.
1136
+ 00:30:5100:34:51
1137
+ Bandleader : Ladies and gentlemen, it gives
1138
+ me great pleasure to introduce to you :
1139
+ 31.46s
1140
+ 43.40s
1141
+ Ground Truth
1142
+ 30.00s
1143
+ 42.00s
1144
+ MINUTE
1145
+ Query: The bandleader announces Chandler and Monica and they walk into the room.
1146
+ 30.00s
1147
+ 36.00s
1148
+ CONQUER
1149
+ Figure 6. Two cases on TVR from the proposed model and CON-
1150
+ QUER.
1151
+ 5.7. Case Study
1152
+ We show two cases of VCMR in Fig. 6. In the first case,
1153
+ two models retrieve the correct video first, the moment pre-
1154
+ dicted by the proposed model is closer to the ground truth.
1155
+ The proposed model captures key images related to ”they
1156
+ walk into the room” to help localize the moment, indicating
1157
+ the effectiveness of MCM in our model. In the second case,
1158
+ both models rank the wrong video first because the scenario
1159
+ in this video is similar to that in the correct video. CON-
1160
+ QUER fails to predict correct moment from correct video,
1161
+ for it places too much emphasis on top-ranked videos. Our
1162
+ proposed model can predict correct moment, which verifies
1163
+ that our prediction improves moment prediction bias.
1164
+ 6. Conclusion
1165
+ In this paper, we propose a model MultI-video raNking
1166
+ with mUlTimodal cluE (MINUTE) improving two prob-
1167
+ lems of two-stage method on video corpus moment retrieval
1168
+ task, moment prediction bias and latent key content. We
1169
+ first analyze the reason for moment prediction bias that in-
1170
+ consistency of Shared-Norm in training and inference, then
1171
+ we adopt Shared-Norm in inference and rank moments in
1172
+ multiple videos based on our derived scoring function to
1173
+ improve moment prediction bias. As for latent key con-
1174
+ tent, we propose a multimodal clue mining component to
1175
+ discover important content from two modalities of video as
1176
+ clue for better moment localization. Extensive experiments
1177
+ on two datasets TVR and DiDeMo show that our proposed
1178
+ model improves two problems and achieves a new state-of-
1179
+ the-art of video corpus moment retrieval task.
1180
+ References
1181
+ [1] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef
1182
+ Sivic, Trevor Darrell, and Bryan Russell. Localizing mo-
1183
+ 8
1184
+
1185
+ ments in video with natural language. In ICCV, 2017. 7
1186
+ [2] Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio.
1187
+ Neural machine translation by jointly learning to align and
1188
+ translate. In ICLR, 2015. 2
1189
+ [3] Jingyuan Chen, Xinpeng Chen, Lin Ma, Zequn Jie, and Tat-
1190
+ Seng Chua. Temporally grounding natural sentence in video.
1191
+ In EMNLP, 2018. 3
1192
+ [4] Long Chen, Chujie Lu, Siliang Tang, Jun Xiao, Dong Zhang,
1193
+ Chilie Tan, and Xiaolin Li. Rethinking the bottom-up frame-
1194
+ work for query-based video localization. In AAAI, 2020. 3
1195
+ [5] Shaoxiang Chen and Yu-Gang Jiang. Semantic proposal for
1196
+ activity localization in videos via sentence query. In AAAI,
1197
+ 2019. 3
1198
+ [6] Shizhe Chen, Yida Zhao, Qin Jin, and Qi Wu. Fine-grained
1199
+ video-text retrieval with hierarchical graph reasoning.
1200
+ In
1201
+ CVPR, 2020. 2
1202
+ [7] Christopher Clark and Matt Gardner. Simple and effective
1203
+ multi-paragraph reading comprehension. In ACL, 2018. 1, 3
1204
+ [8] Jianfeng Dong, Xirong Li, Chaoxi Xu, Xun Yang, Gang
1205
+ Yang, Xun Wang, and Meng Wang. Dual encoding for video
1206
+ retrieval by text. TPAMI, 2021. 2
1207
+ [9] Victor Escorcia,
1208
+ Mattia Soldan,
1209
+ Josef Sivic,
1210
+ Bernard
1211
+ Ghanem, and Bryan C. Russell.
1212
+ Temporal localization of
1213
+ moments in video collections with natural language. CoRR,
1214
+ 2019. 1, 3
1215
+ [10] Fartash Faghri, David J Fleet, Jamie Ryan Kiros, and Sanja
1216
+ Fidler. Vse++: Improving visual-semantic embeddings with
1217
+ hard negatives. arXiv preprint arXiv:1707.05612, 2017. 3, 5
1218
+ [11] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and
1219
+ Kaiming He. Slowfast networks for video recognition. In
1220
+ ICCV, 2019. 5
1221
+ [12] Valentin Gabeur, Chen Sun, Karteek Alahari, and Cordelia
1222
+ Schmid.
1223
+ Multi-modal transformer for video retrieval.
1224
+ In
1225
+ ECCV. Springer, 2020. 2
1226
+ [13] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia.
1227
+ Tall: Temporal activity localization via language query. In
1228
+ ICCV, 2017. 3
1229
+ [14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
1230
+ Deep residual learning for image recognition.
1231
+ In CVPR,
1232
+ 2016. 5
1233
+ [15] Zhijian Hou, Chong-Wah Ngo, and Wing Kwong Chan.
1234
+ Conquer: Contextual query-aware ranking for video corpus
1235
+ moment retrieval. In ACM MM, 2021. 1, 2, 3, 5, 7
1236
+ [16] Jie Lei, Licheng Yu, Tamara L Berg, and Mohit Bansal. Tvr:
1237
+ A large-scale dataset for video-subtitle moment retrieval. In
1238
+ ECCV, 2020. 1, 3, 5, 7
1239
+ [17] Kun Li, Dan Guo, and Meng Wang.
1240
+ Proposal-free video
1241
+ grounding with contextual pyramid network. In AAAI, 2021.
1242
+ 3
1243
+ [18] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu,
1244
+ and Jingjing Liu. Hero: Hierarchical encoder for video+ lan-
1245
+ guage omni-representation pre-training. In EMNLP, 2020.
1246
+ 1, 3, 5, 7
1247
+ [19] Daizong Liu, Xiaoye Qu, Jianfeng Dong, Pan Zhou, Yu
1248
+ Cheng, Wei Wei, Zichuan Xu, and Yulai Xie. Context-aware
1249
+ biaffine localizing network for temporal sentence grounding.
1250
+ In CVPR, 2021. 3
1251
+ [20] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar
1252
+ Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettle-
1253
+ moyer, and Veselin Stoyanov. Roberta: A robustly optimized
1254
+ bert pretraining approach. arXiv preprint arXiv:1907.11692,
1255
+ 2019. 5
1256
+ [21] Mandela Patrick, Po-Yao Huang, Yuki Asano, Florian
1257
+ Metze, Alexander G Hauptmann, Joao F Henriques, and An-
1258
+ drea Vedaldi. Support-set bottlenecks for video-text repre-
1259
+ sentation learning. In ICLR, 2020. 2
1260
+ [22] Anshumali Shrivastava and Ping Li. Asymmetric lsh (alsh)
1261
+ for sublinear time maximum inner product search (mips). In
1262
+ NeurIPS, 2014. 4
1263
+ [23] Aaron Van den Oord, Yazhe Li, and Oriol Vinyals. Repre-
1264
+ sentation learning with contrastive predictive coding. arXiv
1265
+ e-prints, pages arXiv–1807, 2018. 6
1266
+ [24] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko-
1267
+ reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia
1268
+ Polosukhin. Attention is all you need. NeurIPS, 2017. 2
1269
+ [25] Xiaohan Wang, Linchao Zhu, and Yi Yang. T2vlad: global-
1270
+ local sequence alignment for text-video retrieval. In CVPR,
1271
+ 2021. 2
1272
+ [26] Shaoning Xiao, Long Chen, Jian Shao, Yueting Zhuang, and
1273
+ Jun Xiao. Natural language video localization with learnable
1274
+ moment proposals. In EMNLP, 2021. 3
1275
+ [27] Xun Yang, Jianfeng Dong, Yixin Cao, Xun Wang, Meng
1276
+ Wang, and Tat-Seng Chua. Tree-augmented cross-modal en-
1277
+ coding for complex-query video retrieval. In SIGIR, pages
1278
+ 1339–1348, 2020. 2
1279
+ [28] Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui
1280
+ Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le.
1281
+ Qanet:
1282
+ Combining local convolution with global self-
1283
+ attention for reading comprehension. In ICLR, 2018. 3
1284
+ [29] Yitian Yuan, Tao Mei, and Wenwu Zhu. To find where you
1285
+ talk: Temporal sentence localization in video with attention
1286
+ based location regression. In AAAI, 2019. 3
1287
+ [30] Runhao Zeng, Haoming Xu, Wenbing Huang, Peihao Chen,
1288
+ Mingkui Tan, and Chuang Gan. Dense regression network
1289
+ for video grounding. In CVPR, 2020. 3
1290
+ [31] Bowen Zhang, Hexiang Hu, Joonseok Lee, Ming Zhao,
1291
+ Sheide Chammas, Vihan Jain, Eugene Ie, and Fei Sha. A
1292
+ hierarchical multi-modal encoder for moment localization in
1293
+ video corpus. arXiv preprint arXiv:2011.09046, 2020. 1, 3,
1294
+ 7
1295
+ [32] Hao Zhang, Aixin Sun, Wei Jing, Guoshun Nan, Liangli
1296
+ Zhen, Joey Tianyi Zhou, and Rick Siow Mong Goh. Video
1297
+ corpus moment retrieval with contrastive learning. In SIGIR,
1298
+ 2021. 1, 3, 5, 7
1299
+ [33] Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou.
1300
+ Span-based localizing network for natural language video lo-
1301
+ calization. In ACL, 2020. 3
1302
+ [34] Mingxing Zhang, Yang Yang, Xinghan Chen, Yanli Ji, Xing
1303
+ Xu, Jingjing Li, and Heng Tao Shen. Multi-stage aggregated
1304
+ transformer network for temporal language localization in
1305
+ videos. In CVPR, 2021. 3
1306
+ 9
1307
+
29FRT4oBgHgl3EQfnzcq/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8dffc36cef7cdd3cc2c12514e9070899d8d716dad39f054de952dc7713f6baa
3
+ size 6750253
39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6647a25a9ba0414595f017775ebc1146276aeb6607f51108c6892ba8377dc4c3
3
+ size 388877
39AyT4oBgHgl3EQfcPct/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd2f19689d50ece20291f64bc589849f6ef378756974799992afe5a6553994d1
3
+ size 163190
39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:720fbd2fac88be16d7975ff17a069543a410127a6fb99b352a33ff969a2191f2
3
+ size 2555949
39FAT4oBgHgl3EQfEhxj/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a45ffb958c4d15140f3f3107fa33631a406785baa23412078b63d381e811a4e1
3
+ size 103826
3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cfb0d8c8e5aca35be04e1bb0b14c51426a4c8aff006e7c8ee82148749f1cfda
3
+ size 322045
3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97298f48671b124e453e08fa1e578285c19f8241b807dff4b02afe674ced5d3a
3
+ size 3473453
3dFLT4oBgHgl3EQfry9C/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390375edbdd70ecaa7775d509ababd7adbb9d47b90ca449798581b9395faa192
3
+ size 124263
3tA0T4oBgHgl3EQfNP_j/content/tmp_files/2301.02145v1.pdf.txt ADDED
@@ -0,0 +1,1565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
2
+ 1
3
+ Domain Generalization via Ensemble Stacking for Face Presentation
4
+ Attack Detection
5
+ Usman Muhammad1, Djamila Romaissa Beddiar1, and Mourad Oussalah1, Fellow, IEEE
6
+ 1 Center for Machine Vision and Signal Analysis, University of Oulu, Finland
7
+ Face presentation attack detection (PAD) plays a pivotal role in securing face recognition systems against spoofing attacks. Although
8
+ great progress has been made in designing face PAD methods, developing a model that can generalize well to an unseen test domain
9
+ remains a significant challenge. Moreover, due to different types of spoofing attacks, creating a dataset with a sufficient number
10
+ of samples for training deep neural networks is a laborious task. This work addresses these challenges by creating synthetic data
11
+ and introducing a deep learning-based unified framework for improving the generalization ability of the face PAD. In particular,
12
+ synthetic data is generated by proposing a video distillation technique that blends a spatiotemporal warped image with a still image
13
+ based on alpha compositing. Since the proposed synthetic samples can be generated by increasing different alpha weights, we train
14
+ multiple classifiers by taking the advantage of a specific type of ensemble learning known as a stacked ensemble, where each such
15
+ classifier becomes an expert in its own domain but a non-expert to others. Motivated by this, a meta-classifier is employed to learn
16
+ from these experts collaboratively so that when developing an ensemble, they can leverage complementary information from each
17
+ other to better tackle or be more useful for an unseen target domain. Experimental results using half total error rates (HTERs) on
18
+ four PAD databases CASIA-MFSD (6.97%), Replay-Attack (33.49%), MSU-MFSD (4.02%), and OULU-NPU (10.91%)) demonstrate
19
+ the robustness of the method and open up new possibilities for advancing presentation attack detection using ensemble learning
20
+ with large-scale synthetic data.
21
+ Index Terms—Face Anti-Spoofing, Ensemble Learning, Deep Learning, Synthetic Data, LSTM.
22
+ I. Introduction
23
+ O
24
+ VER the past few decades, facial recognition (FR)
25
+ technology has been frequently used in numerous real-
26
+ world applications, such as mobile payments, access control,
27
+ immigration, education, surveillance, and healthcare [1]. The
28
+ accuracy of FR is no longer a major concern and the error
29
+ rate has dropped to 0.08%, according to tests conducted by
30
+ the National Institute of Standards and Technology (NIST)
31
+ [2]. Despite great success, a simple FR system might be
32
+ vulnerable to spoofing, known as a presentation attack. For
33
+ instance, print attacks, video replay, and 3D masks are the
34
+ most common attacks reported recently in the face anti-
35
+ spoofing domain [3], [4]. Thus, a number of hand-crafted and
36
+ deep representation methods have been proposed to protect FR
37
+ systems against presentation attacks [5], [6], [7], [8], [9], [10],
38
+ [11]. Many of them report promising performance in intra-
39
+ domain testing scenario. However, the performance remains
40
+ limited in cross-dataset testing scenario due to distributional
41
+ discrepancy between source domain and the target domain.
42
+ One of the major reasons that deep-learning-based models
43
+ are prone to overfitting due to the lack of availability of a
44
+ sufficient amount of training samples in the source domain.
45
+ Another possible reason might be that many face PAD methods
46
+ assume that training and testing data come from the same
47
+ target distribution. However, if a model was trained on cut
48
+ photo attack images, would it work on mask attack images?
49
+ What if the model trained only on replay attack images and
50
+ tested in warped photo attacks? Is it possible to deploy a model
51
+ that is trained using different illumination conditions and
52
+ background scenes under control lighting systems? Answers to
53
+ Manuscript received January 1, 2022; revised August 26, 2022. Correspond-
54
+ ing author: M. Usman (email: Muhammad.usman@oulu.fi)
55
+ all these questions depend on how a machine learning model
56
+ can deal with this domain shift problem. Thus, to alleviate
57
+ this issue, domain adaptation (DA) techniques are used to
58
+ leverage a source dataset and maintain a good accuracy on
59
+ the target dataset by using unlabeled target data. However, in
60
+ many applications, it is difficult to collect sufficient target data.
61
+ For instance, in face PAD, hackers are using different types
62
+ of spoofing attacks which makes it impractical to collect each
63
+ type of new attack sample in advance.
64
+ To overcome the domain shift problem, domain generaliza-
65
+ tion (DG) methods have been introduced to improve the gener-
66
+ alization [9], [10], [11]. However, the generalization capability
67
+ of PAD methods remains challenging because either the deep
68
+ feature-based methods or low-level feature-based methods may
69
+ not generalize well into new applications. Generalizability
70
+ refers to the performance difference of a model when the PAD
71
+ models are trained and tuned on one or multiple databases and
72
+ then tested on a completely unseen database. As shown in
73
+ Fig.1, the goal of domain generalization is to use the training
74
+ samples from one or several different source domains but
75
+ related domains (i.e., diverse training datasets) that perform
76
+ well when evaluated on a completely unseen target domain.
77
+ To improve the generalization, the majority of recent ap-
78
+ proaches in face PAD such as adversarial learning [12],
79
+ meta pattern learning [13], generative domain adaptation [14],
80
+ hypothesis verification [15], or cross-adversarial learning [16],
81
+ address the domain generalization issue by exploiting a com-
82
+ mon feature space from multiple source domains, but the
83
+ performance remains limited due to a substantial distribution
84
+ difference among source domains. For instance, research in
85
+ [17] relies on a shared feature space and assumes that it
86
+ would also be invariant to domain shift. This assumption has a
87
+ flaw because when the source domains become more diverse,
88
+ arXiv:2301.02145v1 [cs.CV] 5 Jan 2023
89
+
90
+ JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
91
+ 2
92
+ Fig. 1: The source domains are trained with diverse sets of
93
+ synthetic images where the meta-learner seeks complementary
94
+ information to generalize well to unseen target distribution.
95
+ learning a domain-invariant model becomes more difficult
96
+ [18]. For instance, instead of concentrating on some domain-
97
+ specific differentiation cues such as cut photo texture cues
98
+ available in the CASIA database, models can be benefited from
99
+ generalized feature space if more generalized cues are shared
100
+ by all source domains [11]. In addition, spoofing attacks have
101
+ been launched physically by malicious hackers (i.e., outside
102
+ the control of the biometric system). Therefore, building new
103
+ datasets to collect large samples of fake faces, especially for
104
+ each type of new attack remain infeasible in the face anti-
105
+ spoofing domain. Although the dominant approaches such as
106
+ Generative adversarial networks (GANs) [19], Bidirectional
107
+ GANs [20], the DCGAN [21], can be applied to mitigate
108
+ the gap between the target domain and the source domain by
109
+ generating synthetic faces, these models require careful tuning
110
+ of their parameters.
111
+ In this paper, rather than proposing a specific model
112
+ suited for the intra-database testing scenario, a novel unified
113
+ framework is introduced based on the idea of stacking-based
114
+ ensemble learning to improve the generalization of the face
115
+ PAD. We first generate different sets of synthetic training
116
+ samples and then train different sub-models on each of the
117
+ synthetic sets to specialize in their own domain. More specif-
118
+ ically, our goal is to understand the relationship between the
119
+ spatiotemporal artifacts that appear in synthetic samples. Con-
120
+ sequently, we train three sub-models in which we investigate
121
+ the characteristics of these spatiotemporal artifacts. By doing
122
+ this, we assume that sub-models that are trained on specific
123
+ source domains would be experts in domain-specific sources
124
+ but non-expert in all other source domains as well as the
125
+ target domain. Motivated by this, we train a meta-learner that
126
+ minimizes the cross-domain generalization error by combining
127
+ the input predictions of all experts (sub-models). Thus, our
128
+ key idea is to train the sub-models separately so that when
129
+ forming stacking, a meta-learner can leverage complementary
130
+ information in order to better approach the target domain.
131
+ To achieve our goal, we first introduce a video distillation
132
+ technique to generate synthetic samples. This is inspired by
133
+ our previous works [8], [22] that claim estimation of global
134
+ motion is important for face PAD. Specifically, a 2D image
135
+ morphing technique is proposed with a combination of a warp
136
+ and a cross dissolve. The main idea is to blend the encoded
137
+ spatiotemporal warped images with the still images using
138
+ alpha blending. By doing so, we generate multiple sets of
139
+ 2D synthetic images with different alpha weights and expand
140
+ the training samples significantly. Several synthetic examples
141
+ are shown in Fig.2. We then train different recurrent neural
142
+ networks with each subset of synthetic data and use the
143
+ prediction of each subset to train the meta-classifier. Moreover,
144
+ the interpretability methods are employed to further assess how
145
+ robust is the model, by revealing that the most significant areas
146
+ for determining the deep learning model decision on the PAD
147
+ task are consistent with motion cues associated with the arti-
148
+ facts, i.e., screen sloping, hand movement, material reflection,
149
+ and expression changes. Overall, the main contributions of this
150
+ study are five-fold:
151
+ • A video distillation technique is proposed to train a
152
+ 2D CNN on a still image, where “still” encodes both
153
+ appearance and temporal information from the video
154
+ sequence into a single RGB image.
155
+ • 2D image morphing is introduced to create large-scale
156
+ synthetic training samples that greatly promote the per-
157
+ formance of the face anti-spoofing model.
158
+ • Stacked recurrent neural networks are utilized to predict
159
+ spatiotemporal inconsistencies and then those predictions
160
+ are employed to form the deep architecture (meta-model).
161
+ • Techniques of interpretation are provided for exploring
162
+ the decisions made by the employed model. The model
163
+ revealed that the motion cues are the most important
164
+ factors for distinguishing whether an input image is
165
+ spoofed or not.
166
+ • Experiments on four benchmark datasets, consisting
167
+ of CASIA-MFSD, Replay-Attack, MSU-MFSD, and
168
+ OULU-NPU databases, show that our proposed method
169
+ is significantly superior on three databases in comparison
170
+ with other state-of-the-art generalization methods used
171
+ now.
172
+ The rest of this work is organized as follows. Section II
173
+ discusses the recent developments and related past works.
174
+ Section III explains all the steps of the proposed method.
175
+ Section IV shows the implementation details, ablation study,
176
+ and comparison against several public benchmark datasets.
177
+ Section V concludes the entire work and gives suggestions
178
+ for future research.
179
+ II. Literature Review
180
+ Over the past few years, face PAD methods have re-
181
+ ceived considerable attention from both academia and in-
182
+ dustry. In general, these methods can be roughly classified
183
+ into appearance-based methods and temporal-based methods.
184
+ Appearance-based methods: Traditional appearance-based
185
+ methods usually extract hand-crafted features such as LBP
186
+ [23] and SIFT [24] based on various appearance cues. The
187
+ authors in [5] claimed that color information is crucial and
188
+ luminance-chrominance color spaces improve the detection
189
+
190
+ Source domain
191
+ Domain
192
+ 1
193
+ α = 0.5
194
+ α=1.0
195
+ α=1.5
196
+ Target domain
197
+ Meta-
198
+ Domain
199
+ learner
200
+ 2
201
+ α=0.5
202
+ α=1.0
203
+ α=1
204
+ Domain
205
+ 3
206
+ α = 0.5
207
+ α=1.0JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
208
+ 3
209
+ Fig. 2: 2D synthetic samples from CASIA-MFSD. Left col-
210
+ umn: Video sequence used to generate synthetic samples.
211
+ Right column: Spatiotemporal encoded images morphed with
212
+ the still image using alpha values of 0.5 (Synt 1), 1.0 (Synt 2),
213
+ and 1.5 (Synt 3), respectively. These synthetic samples can be
214
+ used for ensemble stacking to significantly improve the face
215
+ anti-spoofing performance.
216
+ performance of face PAD in comparison to the RGB and
217
+ the gray-scale image representations. The multiscale filtering
218
+ approach proposed in [25] was found to be effective where
219
+ LBP-based multiscale features provide improved performance.
220
+ Wen et al [26] utilize image distortion analysis (IDA) and
221
+ develop an ensemble classifier, where multiple SVM classifiers
222
+ are implemented. In particular, the features are selected based
223
+ on specular reflection, blurriness, chromatic moment, and color
224
+ diversity to provide input to SVM classifiers. A component-
225
+ based coding framework is proposed to encode different
226
+ components of the face in [27]. To deploy secure face locking
227
+ on a smartphone, a method is developed based on extracting
228
+ color distortion, Moiré-pattern analysis, surface reflection, and
229
+ shape deformation [24]. The LBP features are combined with
230
+ the feature maps of a deep learning model to improve the
231
+ detection of face PAD in [28]. The authors show that the need
232
+ for large training samples in face PAD can be mitigated by
233
+ using convolutional feature maps. Moreover, a hybrid deep
234
+ learning method is introduced in [29] to encode appearance
235
+ information from two CNNs where the SVM classifier is used
236
+ to discriminate live and spoofed images. Although appearance-
237
+ based methods provide improved performance in an intra-
238
+ database testing scenario, the performance remains limited
239
+ when evaluated on a completely unseen testing domain.
240
+ Temporal-based methods: The study reported in [8] es-
241
+ timates global motion and amplifies motion cues such as
242
+ hand movements or head rotation where BiLSTM is used to
243
+ predict the motion. Since global estimation leaves the artifacts
244
+ such as black framing at the border of the encoded images
245
+ in [8], this issue was solved by using dense sampling with
246
+ similarity transformation [22]. Moreover, in order to encode
247
+ head movements, eye-blinking, and lip movements, a dynamic
248
+ mode decomposition (DMD) method is introduced to capture
249
+ the temporal cues from frame sequences [30]. Eulerian motion
250
+ magnification is used to magnify the facial expressions in [31].
251
+ Then, local descriptors such as HOOF and LBP are utilized
252
+ to improve the classification performance. Photoplethysmogra-
253
+ phy (rPPG) signal was found to be crucial to improve the face
254
+ PAD performance [32]. A unified framework based on CNN-
255
+ BiLSTM is used to capture both appearance and temporal cues
256
+ in [29]. A study conducted in [33] shows that the spontaneous
257
+ blinking of a person provides an intrinsic detection cue to
258
+ improve live face detection. A dense optical flow scheme is
259
+ proposed to estimate the motion of two successive frames
260
+ in [34]. The authors claimed that real and attack videos
261
+ have different optical flow motion patterns which help to
262
+ improve the PAD performance. A 3D CNN model is employed
263
+ to capture both spatial and temporal information in [35].
264
+ A combined CNN-RNN model is developed to capture the
265
+ auxiliary information (i.e., the depth map and rPPG signals)
266
+ for improving the detection performance [36]. However, when
267
+ the temporal and appearance-based methods are employed in
268
+ a cross-dataset scenario, the detection performance remains
269
+ vulnerable to degradation due to real-world variations (such
270
+ as user demographics, input cameras, and variations in illu-
271
+ mination). Therefore, domain generalization that aims to learn
272
+ from several source domains becomes significant while dealing
273
+ with presentation attack detection.
274
+ Deep Domain Generalization methods: Several deep do-
275
+ main generalization methods have been introduced to im-
276
+ prove the generalization ability of face PAD. For instance,
277
+ a domain adaptation method that generates pseudo-labeled
278
+ samples named cyclically disentangled feature translation net-
279
+ work (CDFTN) is proposed in [37]. Chuang et al proposed
280
+ to improve the generalization based on one-side triplet loss
281
+ [38]. A two-stream network is utilized to fuse the input RGB
282
+ image and meta-pattern learning was proposed to improve
283
+ the generalization [13]. A cross-adversarial training scheme
284
+ is proposed to improve the generalization by minimizing
285
+ the correlation among two sets of features [16]. The work
286
+ reported in [14], aims to learn a generalized feature space
287
+ by designing the target data to the source-domain style and
288
+ called Generative Domain Adaptation (GDA). A hypothesis
289
+ verification framework is proposed in [15] where two hy-
290
+ pothesis verification modules are utilized for improving the
291
+ generalization. A novel Shuffled Style Assembly Network
292
+ (SSAN) is introduced by aligning multiple source domains
293
+ into a stylized feature space and domain generalization was
294
+ improved by a contrastive learning strategy [39]. To select
295
+ common features space, adversarial learning is proposed and
296
+ aggregation of live faces is performed to achieve a generalized
297
+ feature space in [12]. However, there is no consensus that the
298
+ pre-defined distributions can be considered the optimal ones
299
+ for the feature space. Thus, we argue that a model can un-
300
+ derstand faces much better by simply aligning multiple source
301
+ domains based on the idea of collaborative ensemble learning.
302
+ In particular, the generalized feature space can automatically
303
+ capture spatiotemporal inconsistencies based on the knowledge
304
+ provided by multiple source domains.
305
+ III. The Proposed Method
306
+ Figure.3 illustrates the overall framework. Firstly, we
307
+ present a method to show how to synthesize training samples.
308
+
309
+ Video sequence
310
+ Encoded clip
311
+ Synt 1
312
+ Synt 2
313
+ Synt 3JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
314
+ 4
315
+ Fig. 3: Flow chart of our proposed method. A video of length V is divided into non-overlapping segments of smaller length
316
+ v. For each segment, global motion is estimated and the stabilized sequence is accumulated to obtain a spatiotemporal warped
317
+ image. Then, the encoded spatiotemporal warped image is morphed with a still image (i.e., the first frame of the segment) by
318
+ using alpha compositing. Since different alpha values are used to create multiple synthetic images, we build multiple classifiers
319
+ on these synthetic images to form stacking-based ensemble learning for improving the generalization of face PAD.
320
+ The purpose of synthesis is to bring spatiotemporal artifacts
321
+ that can be used to train multiple individual models for
322
+ understanding the relationship between them. Secondly, a
323
+ unified CNN-RNN network is proposed due to the fact that
324
+ mainstream 2D CNN frameworks cannot deal with sequential
325
+ data (i.e., sequences to sequences). Then, model stacking is
326
+ designed in such a way that it can minimize the weakness and
327
+ maximize the strengths of every individual model based on
328
+ the meta-learner. Lastly, the model interpretation is provided to
329
+ investigate the contribution of synthetic data on which the deep
330
+ model mainly relies. Each step is explained in the following
331
+ sub-sections.
332
+ A. 2D Virtual Synthesis
333
+ To generate synthetic samples, a video V is equally divided
334
+ into P non-overlapping segments, i.e., V = {Sk}P
335
+ s=1, where
336
+ Sk is the k-th segment. The length of each segment is set
337
+ to be (w = 40) frames. For each segment, features are
338
+ extracted from the fixed (first) and moving (second) image
339
+ of the segment. In particular, the FAST feature detector [40]
340
+ is utilized to detect interest points and then FREAK descriptor
341
+ [41] extracts the features to collect points of interest from both
342
+ frames. Since salient image features are extracted, the next step
343
+ is interest points matching where Hamming distance (HD) is
344
+ utilized in our work. The inter-frame parameters are estimated
345
+ throughout the whole length of the segment (since the first
346
+ frame) by using the rigid (Euclidean space) transformation. As
347
+ the name suggests, rigid transformation preserves the distance
348
+ and angles (i.e, distance between two points remains the same).
349
+ The rigid transformation matrix M is a 3×3 matrix. We find
350
+ the 2D pixel coordinates in Cartesian coordinate system by
351
+ estimating the translation map from M. Let [a, b, 1]T illustrate
352
+ the homogeneous coordinates in moving image and [a′, b′, 1]T
353
+ define the coordinates in the fixed image, we have
354
+
355
+
356
+ a′
357
+ b′
358
+ 1
359
+
360
+ � =
361
+
362
+
363
+ d11
364
+ d12
365
+ d13
366
+ d21
367
+ d22
368
+ d23
369
+ d31
370
+ d32
371
+ d33
372
+
373
+
374
+
375
+
376
+ a
377
+ b
378
+ 1
379
+
380
+
381
+ (1)
382
+ and pixel shift can be calculated as
383
+ �∆a
384
+ ∆b
385
+
386
+ =
387
+ �a′ − a
388
+ b′ − b
389
+
390
+ (2)
391
+ To eliminate false-matching points and robust estimation of the
392
+ geometric transformation between the frames, we use the M-
393
+ estimator Sample Consensus (MSAC) algorithm [42] to detect
394
+ outliers and remove false matching points. To obtain warped
395
+ images, we simply average the stabilized frame sequences
396
+ using the following aggregation function:
397
+ ev = 1
398
+ w
399
+ w
400
+
401
+ k=1
402
+ evk,
403
+ (3)
404
+
405
+ Moving 2D Image
406
+ Input video
407
+ Convert to
408
+ grayscale
409
+ Still image
410
+ (Target)
411
+ Segment i
412
+ Blending
413
+ Grayscale Image Points detection Feature's matching
414
+ Calculate rigid
415
+ Warp the
416
+ Segment 2
417
+ Fixed2D Image
418
+ transformation
419
+ moving
420
+ matrix T
421
+ image with T
422
+ Convert to
423
+ Synthetic
424
+ grayscale
425
+ images
426
+ Segment N
427
+ 02
428
+ Model 1
429
+ ve
430
+ Model 2
431
+ Stacking
432
+ RNN
433
+ RNN
434
+ RNN
435
+ Spoof
436
+ Model 3
437
+ f3
438
+ Training set
439
+ Convolutional layers
440
+ Recurrent neural networks
441
+ Stacking ensembleJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
442
+ 5
443
+ where w denotes the total number of selected frames in
444
+ segment k for video V . By the above aggregation, the average
445
+ over frames directly merges temporal information, and the
446
+ image registration combines available spatial reference infor-
447
+ mation. Figure.4 shows the effectiveness of the proposed video
448
+ distillation scheme. The results demonstrate that the removal
449
+ of global motion must be taken into account before the feature
450
+ extraction step during the development of a face PAD model.
451
+ Since our target is to predict the temporal inconsistencies,
452
+ a synthetic image is generated in such a way that every
453
+ spatiotemporal encoded image acquired from Eq.3 is blended
454
+ into the first (still) image of the segment to obtain a synthetic
455
+ image. By doing this, we make sure that the synthetic image
456
+ would never leave the space of the human face (see Fig.2).
457
+ Thus, the proposed blending process involves two steps: 1)
458
+ obtain a source image (i.e., a spatiotemporal encoded image
459
+ from a video distillation technique), and 2) target image:
460
+ choosing a first (still) image of each segment to blend into
461
+ a source image (usually known as cross dissolving). Let’s
462
+ assume that we blend source image (P1) over target image
463
+ (P2) as:
464
+ Pmorph(a, b) = αP1(a, b) + (1 − α)P2(a, b)
465
+ (4)
466
+ where α is the morphing weight (0 < α ≤ 1). Thus, a
467
+ synthetic image is obtained at new location Pmorph(a, b) gets
468
+ α percentage from αP1(a, b) and (1 − α) from P2(a, b) [43].
469
+ It is worthwhile to mention that the proposed video dis-
470
+ tillation scheme is inspired by our previous works [8], [22]
471
+ that estimate global motion. Thus, benefiting from the video
472
+ distillation nature of the previous methods, we extend our
473
+ previous works to generate synthetic samples by introducing
474
+ a cross-dissolve. Moreover, we use the FREAK descriptor
475
+ and rigid transformation to estimate inter-frame motion. By
476
+ doing this, the computation cost of the method is significantly
477
+ reduced (We further discuss this argument in section IV).
478
+ B. Recurrent Neural Network (RNN)
479
+ Deep learning methods based on 2D Convolutional Neural
480
+ Networks (CNNs) have shown an improved performance than
481
+ classical machine learning approaches [9], [6], [7]. However,
482
+ the mainstream 2D CNN frameworks focus on spatial infor-
483
+ mation, thus lacking the capacity to understand sequential
484
+ data. Specifically, CNNs do not have a memory mechanism in
485
+ order to capture the temporal relations. Motivated by the fact
486
+ that recurrent neural networks (RNNs) can deal with temporal
487
+ information, we develop a unified framework consisting of
488
+ CNN-RNN to encode complementary information between
489
+ frames. In particular, a CNN is fine-tuned on the labeled
490
+ dataset in the first stage. Then, the fine-tuned features are
491
+ extracted from the pooling layer and used as input to train
492
+ a Long-short-term memory (LSTM) [44] network.
493
+ The LSTM is the most popular RNN architecture and
494
+ capable of learning long-term dependencies. It is composed of
495
+ memory cell (Ce), an input gate (ie), an output gate (oe) and a
496
+ forget gate (ge). The input gate governs the information flow
497
+ into the cell by multiplying the cell’s non-linear transformation
498
+ of inputs me. The output gate decides how much information
499
+ Fig. 4: (a) We computed the mean of the raw video frames to
500
+ visualize the global motion that shows a great deal of distor-
501
+ tion in the encoded image. (b) The proposed spatiotemporal
502
+ encoded images after removing the global motion.
503
+ from the cell is used to compute the output activation of the
504
+ LSTM unit. The forget gate regulates the extent to which a
505
+ value remains in the cell. The LSTM unit updates for time
506
+ step e are:
507
+
508
+ ���
509
+ ge
510
+ ie
511
+ me
512
+ oe
513
+
514
+ ��� =
515
+
516
+ ���
517
+ σ
518
+ σ
519
+ tanh
520
+ σ
521
+
522
+ ��� H · [pe−1, xe]
523
+ (5)
524
+ Ce = ge ⊙ Ce−1 + me ⊙ ie
525
+ (6)
526
+ pe = tanh(Ce) ⊙ oe
527
+ (7)
528
+ where xe is the input at the current time-step, ie is the
529
+ current cell state, g, i, and m represent input gate activation,
530
+ forget gate activation and output gate activation, respectively.
531
+ σ illustrates the logistic sigmoid function and ⊙ represents
532
+ element-wise multiplication. The fully connected and softmax
533
+ layer is used for detecting real and fake images.
534
+ C. Model Stacking
535
+ Ensemble learning has been supported by multiple ap-
536
+ proaches like bagging, boosting, or stacking which results
537
+ in a better generalization of the learning models [45]. Es-
538
+ pecially, stacking is one of the integration techniques that
539
+ involves combining the predictions based on the different
540
+ weak models’ predictions wherein the meta-learning model
541
+ is used to integrate the output of base models [46]. One of
542
+ the common approaches in stacked ensemble learning is to
543
+ develop a bench of T Tier-1 classifiers S1, S2, S3, ..., SN based
544
+ on cross-validation to the training sample [47].
545
+ Rather than focusing on the prediction of a single model,
546
+ we train diverse RNN-based sub-models in our work with
547
+ different synthetic training samples to predict the temporal
548
+ inconsistencies from the data. In particular, the LSTM [44]
549
+ and the Bidirectional LSTM (BiLSTM) [48] with different
550
+ hidden layers are trained on three synthetic sets where each
551
+ sub-model works independently to specialize in its own source
552
+ domain. To better understand the learning of sub-models, Fig.5
553
+ represents the proposed validation scheme, where each RNN
554
+ is trained with k-1 folds, k-2 folds, and k-3 folds to get the
555
+
556
+ (a) Encoded images with global motion
557
+ (b) Encoded images after removing global motionJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
558
+ 6
559
+ Fig. 5: The proposed validation for ensemble learning.
560
+ TABLE I: BiLSTM architectures and parameters.
561
+ First Architecture
562
+ Second Architecture
563
+ Third Architecture
564
+ No. of layers
565
+ 1
566
+ 1
567
+ 1
568
+ Layers type
569
+ LSTM
570
+ BiLSTM
571
+ LSTM
572
+ No. of units
573
+ 500
574
+ 20
575
+ 100
576
+ Optimizer
577
+ ADAM
578
+ ADAM
579
+ ADAM
580
+ learning rate
581
+ 0.0001
582
+ 0.0001
583
+ 0.001
584
+ Cost function
585
+ cross entropy
586
+ cross entropy
587
+ cross entropy
588
+ TABLE II: Meta model architecture and parameters.
589
+ No. of layers
590
+ 1
591
+ Layers type
592
+ LSTM
593
+ No. of units
594
+ 20
595
+ Optimizer
596
+ ADAM
597
+ learning rate
598
+ 0.0001
599
+ Cost function
600
+ cross entropy
601
+ most out of the stacking. Thus, by making experts on different
602
+ training subsets, we reinforce each model to concentrate on
603
+ different aspects of data (i.e., temporal inconsistencies), such
604
+ as one model can focus on certain type of features using a
605
+ subset of synthetic data. Similarly, another model can perform
606
+ better on the others. We then combine the predictions from
607
+ these experts (sub-models) models by running another model
608
+ called a meta-learner (meta-classifier). By doing this, the meta-
609
+ learner helps to maximize the strengths of every individual
610
+ model and reduce generalization errors.
611
+ Table I shows the architectures and parameters of the base
612
+ models, while Table II depicts the meta-model architecture. It
613
+ is worth mentioning here that we accumulate the output of the
614
+ three base models’ validation sets as the new validation set for
615
+ training the meta-model. This way, the meta-model will make
616
+ the final test prediction on the test set.
617
+ D. Interpretation of a deep neural network
618
+ Interpretation is essential to observe what learning patterns
619
+ in data are important but there is no clear consensus that
620
+ how interpretability should be best defined in the context
621
+ of machine learning. Although explanation methods intend
622
+ to make neural networks more trustworthy and interpretable,
623
+ the question arises of how some features favor deep learning
624
+ to make such a valuable prediction. For instance, synthetic
625
+ samples in our work are found to be more useful to train a deep
626
+ model and it shows better interpretability in comparison to the
627
+ same model trained without synthetic samples. This is due to
628
+ the fact that the motion cues which are naturally available in
629
+ the frame sequences are "easy to learn" for the model, and play
630
+ an important role in model optimization. Thus, the importance
631
+ of interpretation is becoming increasingly popular and leads
632
+ to useful or promising findings.
633
+ In our work, Gradient-weighted class activation mapping
634
+ (denoted as Grad-CAM) [49], Occlusion sensitivity maps
635
+ (denoted as OCC-SEN) [50], Gradient Attribution map using
636
+ Guided Backpropagation (denoted as Grad-ATT) [51], and
637
+ locally interpretable model-agnostic explanations (denoted as
638
+ LIME) [52] are utilized to understand what patterns in data
639
+ are deemed important or make the contributions to the final
640
+ decision. In particular, this enables us to trust the behavior
641
+ of the developed deep learning model, and/or further tune
642
+ the model by observing its interpretations. In particular, we
643
+ extract visualization maps from pretrained DenseNet-201 [57]
644
+ convolutional neural network for all of the methods above
645
+ in our experiments. In Fig.6, we visualize diverse sets of
646
+ synthetic images from the CASIA datasets. The first four rows
647
+ show print attack images while the next four rows show replay
648
+ attack images. Each visualization method captures the class
649
+ discriminative region thanks to the proposed video distillation
650
+ and synthetic data generation scheme that force the network to
651
+ use more subtle cues for its correct classification. In particular,
652
+ the first row shows that the neurons in the deep convolutional
653
+ layers focus on the paper’s texture, and hand movement cues.
654
+ However, Grad-ATT [51] interpretation shows that the model
655
+ also takes background as context to make the prediction.
656
+ Surprisingly, this issue is eliminated by the proposed synthetic
657
+ data generation scheme where the second, third, and fourth
658
+ row shows that the model only considers motion cues, the
659
+ surface edges and barely touches the background context.
660
+ In case of a replay attack, the remaining rows show that
661
+ the tablet screen and hand movement provide discriminative
662
+ information for the model prediction. Since we cannot present
663
+ this for every image from the dataset, we observed that the
664
+ mouth information, eye blinking, or head rotation contribute
665
+ positively to distinguishing live and spoofed images. Thus,
666
+ interpretation from the above methods demonstrates that the
667
+ proposed learning model is focusing on the correct features of
668
+ the input data, and the model’s decision can be viewed in a
669
+ human-understandable way. Moreover, the proposed synthetic
670
+ data generation method provides informative RGB images and
671
+ helps the model to make the features of spoofed faces more
672
+ dispersed which allows a better class boundary to generalize
673
+ well to the target domain.
674
+ IV. Experimental analysis of using open datasets
675
+ To assess the effectiveness of the synthesized face im-
676
+ ages, four publicly available databases are used: OULU-NPU
677
+ database [58] (denoted as O), CASIA Face Anti-Spoofing
678
+ database (denoted as C) [59], Idiap Replay-Attack database
679
+ [60] (denoted as I), and MSU Mobile Face Spoofing database
680
+ [26] (denoted as M). The performance is evaluated in-terms
681
+ of Half Total Error Rate (HTER) (half of the summation of
682
+ false acceptance rate and false rejection rate) and Area Under
683
+ Curve on the target testing dataset.
684
+
685
+ Fold 0
686
+ Fold 1
687
+ Fold 2
688
+ Fold 3
689
+ Train set
690
+ Synthetic set 1
691
+ Synthetic set 2
692
+ Synthetic set 3JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
693
+ 7
694
+ Fig. 6: Visualization of feature maps. The types of images are labelled in the first column. The second column shows the
695
+ original encoded and synthetic images. The third column illustrates the feature maps from Grad-CAM [49] while the fourth
696
+ column shows the feature maps from occlusion sensitivity maps [50]. Similarly, the fifth and sixth column visualize the features
697
+ maps from Gradient Attribution map using Guided Backpropagation [51], and locally interpretable model-agnostic explanations
698
+ [52], respectively. The last column shows the masked images obtained from LIME predictions.
699
+ A. Implementation details
700
+ All the images are resized to 224 × 224 according to
701
+ the input requirement of pretrained DenseNet-201 [57] ar-
702
+ chitecture. The CNN model is fine-tuned by using Stochastic
703
+ Gradient Descent (SGD) optimizer with a validation frequency
704
+ of 30, and mini-batch size of 32. We set the learning rate
705
+ up to 0.0001, and do not use fixed size epochs because an
706
+ early stopping function [61] is utilized to stop the model
707
+ automatically to prevent overfitting.
708
+ During the ensemble learning stage, the CNN model is fine-
709
+ tuned with original encoded video clips and three different
710
+ synthetic sets separately. Then, the features from each fine-
711
+ tuned model are used as input to train three diverse RNN
712
+ models. In particular, the Adam optimizer is utilized with a
713
+ validation frequency of 30. The learning rate is set to 0.0001,
714
+ and the weights are initialized with He initializer [62] for the
715
+ first LSTM (Sub-model 1) model. We do not set the fixed
716
+ epochs because an early stopping function [61] was used to
717
+ prevent overfitting. For training the second sub-model, the
718
+ BiLSTM is trained with the hidden layer dimension of 20.
719
+ The other parameters were kept the same as sub-model 1.
720
+ For the third sub-model, the LSTM model is trained with the
721
+ hidden layer dimension of 100 by decreasing the learning rate
722
+ of 0.001. For the data synthetic method, we generate three
723
+ synthetic samples, in which 0.5, 1.0, and 1.5 alpha values
724
+ are used to expand the training images. In order to train the
725
+ meta-model, the epochs size 80, the Gradient threshold 1, and
726
+ a hidden layer dimension of 20 was used to train the meta-
727
+
728
+ Image Type
729
+ Grad-CAM
730
+ OCC-SEN
731
+ Grad-ATT
732
+ LIME
733
+ Masked
734
+ Encoded image
735
+ Synthetic sample
736
+ Synthetic sample
737
+ 2
738
+ Synthetic sample
739
+ 3
740
+ Encoded image
741
+ Synthetic sample
742
+ Synthetic sample
743
+ 2
744
+ Synthetic sampleJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
745
+ 8
746
+ TABLE III: Performance evaluation using MSU-MFSD (M), CASIA-MFSD (C), replay-attack (I), and OULU-NPU (0)
747
+ databases. Comparison results are obtained directly from the corresponding papers.
748
+ O&C&I to M
749
+ O&M&I to C
750
+ O&C&M to I
751
+ I&C&M to O
752
+ Method
753
+ HTER(%)
754
+ AUC(%)
755
+ HTER(%)
756
+ AUC(%)
757
+ HTER(%)
758
+ AUC(%)
759
+ HTER(%)
760
+ AUC(%)
761
+ MADDG [11]
762
+ 17.69
763
+ 88.06
764
+ 24.50
765
+ 84.51
766
+ 22.19
767
+ 84.99
768
+ 27.89
769
+ 80.02
770
+ DAFL [10]
771
+ 14.58
772
+ 92.58
773
+ 17.41
774
+ 90.12
775
+ 15.13
776
+ 95.76
777
+ 14.72
778
+ 93.08
779
+ SSDG-R [17]
780
+ 07.38
781
+ 97.17
782
+ 10.44
783
+ 95.94
784
+ 11.71
785
+ 96.59
786
+ 15.61
787
+ 91.54
788
+ DR-MD [9]
789
+ 17.02
790
+ 90.10
791
+ 19.68
792
+ 87.43
793
+ 20.87
794
+ 86.72
795
+ 25.02
796
+ 81.47
797
+ MA-Net [6]
798
+ 20.80
799
+ -
800
+ 25.60
801
+ -
802
+ 24.70
803
+ -
804
+ 26.30
805
+ -
806
+ RFMetaFAS [7]
807
+ 13.89
808
+ 93.98
809
+ 20.27
810
+ 88.16
811
+ 17.30
812
+ 90.48
813
+ 16.45
814
+ 91.16
815
+ FAS-DR-BC(MT) [53]
816
+ 11.67
817
+ 93.09
818
+ 18.44
819
+ 89.67
820
+ 11.93
821
+ 94.95
822
+ 16.23
823
+ 91.18
824
+ ADL [12]
825
+ 05.00
826
+ 97.58
827
+ 10.00
828
+ 96.85
829
+ 12.07
830
+ 94.68
831
+ 13.45
832
+ 94.43
833
+ ResNet-BiLSTM w/DS [3]
834
+ 04.12
835
+ 99.93
836
+ 07.04
837
+ 99.87
838
+ 13.48
839
+ 97.42
840
+ 41.33
841
+ 88.48
842
+ HFN + MP [13]
843
+ 05.24
844
+ 97.28
845
+ 09.11
846
+ 96.09
847
+ 15.35
848
+ 90.67
849
+ 12.40
850
+ 94.26
851
+ Cross-ADD [16]
852
+ 11.64
853
+ 95.27
854
+ 17.51
855
+ 89.98
856
+ 15.08
857
+ 91.92
858
+ 14.27
859
+ 93.04
860
+ ASGS [22]
861
+ 05.91
862
+ 99.88
863
+ 10.21
864
+ 99.86
865
+ 45.84
866
+ 76.09
867
+ 13.54
868
+ 99.73
869
+ GDA [14]
870
+ 09.20
871
+ 98.00
872
+ 12.20
873
+ 93.00
874
+ 10.00
875
+ 96.00
876
+ 14.40
877
+ 92.60
878
+ SSAN-R [39]
879
+ 06.67
880
+ 98.75
881
+ 10.00
882
+ 96.67
883
+ 08.88
884
+ 96.79
885
+ 13.72
886
+ 93.63
887
+ FG +HV [15]
888
+ 09.17
889
+ 96.92
890
+ 12.47
891
+ 93.47
892
+ 16.29
893
+ 90.11
894
+ 13.58
895
+ 93.55
896
+ Ensemble (CNN-RNN)
897
+ 04.02
898
+ 99.95
899
+ 06.97
900
+ 99.97
901
+ 33.49
902
+ 93.16
903
+ 10.91
904
+ 99.89
905
+ TABLE IV: The results of cross-dataset testing on limited source domains. The comparison results are obtained directly from
906
+ the corresponding papers.
907
+ O&I to M
908
+ M&I to C
909
+ O&I to C
910
+ O&M to I
911
+ C&M to O
912
+ Method
913
+ HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%)
914
+ Supervised [54]
915
+ 12.1
916
+ 94.2
917
+ 30.4
918
+ 77.0
919
+ 18.0
920
+ 90.1
921
+ 16.8
922
+ 93.8
923
+ 17.9
924
+ 89.5
925
+ Mean-Teacher [55]
926
+ 19.6
927
+ 86.5
928
+ 31.1
929
+ 76.6
930
+ 23.7
931
+ 84.9
932
+ 18.4
933
+ 86.0
934
+ 23.5
935
+ 84.9
936
+ USDAN [56]
937
+ 15.8
938
+ 88.1
939
+ 35.6
940
+ 69.0
941
+ 33.3
942
+ 72.7
943
+ 19.8
944
+ 87.9
945
+ 20.2
946
+ 88.3
947
+ EPCR-labeled [54]
948
+ 12.5
949
+ 95.3
950
+ 18.9
951
+ 89.7
952
+ 18.9
953
+ 89.7
954
+ 14.0
955
+ 92.4
956
+ 17.9
957
+ 90.9
958
+ EPCR-unlabeled [54]
959
+ 10.4
960
+ 94.5
961
+ 25.4
962
+ 83.8
963
+ 16.7
964
+ 91.4
965
+ 12.4
966
+ 94.3
967
+ 17.8
968
+ 91.3
969
+ Ensemble (CNN-RNN) 07.8
970
+ 98.5
971
+ 17.1
972
+ 94.3
973
+ 12.5
974
+ 97.1
975
+ 15.1
976
+ 93.1
977
+ 14.7
978
+ 93.4
979
+ learner. For reproducibility of our results, we keep the same
980
+ parameter settings for conducting the experiments on all the
981
+ databases.
982
+ B. Comparison against the state-of-the-art methods
983
+ To compare the performance with the recently introduced
984
+ domain generalization methods, we conduct cross-dataset test-
985
+ ing where the model is trained on three source databases
986
+ and evaluated on a completely unseen database using the
987
+ leave-one-out (LOO) strategy. In particular. the testing sets
988
+ of source databases are used as a validation set for computing
989
+ the equal error rate. Thus, the HTER is calculated directly
990
+ on the target (unseen) dataset for a fair comparison with
991
+ the previous methods. As shown in Table III, the proposed
992
+ ensemble learning provides the best results on three proto-
993
+ cols of O&C&I to M, O&M&I to C, I&C&M to O, and
994
+ demonstrates that the model can extract more generalized
995
+ differentiation cues for face PAD. This is due to the recently
996
+ proposed countermeasures paying more attention by exploring
997
+ a common feature space from multiple source domains that
998
+ only fit data in the source domains [17]. In contrast to the
999
+ existing approaches where adversarial learning [12], generative
1000
+ domain adaptation [14] and meta-learning [13] has been used,
1001
+ the proposed ensemble learning improves the generalization by
1002
+ exploiting the relationship of multiple trained models which
1003
+ are expert in their own source domain, but ensure that meta-
1004
+ learner can take complementary information from them to
1005
+ improve the generalization of face PAD model.
1006
+ C. Experiment on Limited Source Domains.
1007
+ We also consider the scenario of a limited source domain
1008
+ by training the model on two source domains instead of three
1009
+ as shown in Table IV. The model continues to achieve the
1010
+ best performance on all the target domains. In particular, the
1011
+ lowest HTER in four protocols and the highest AUC show
1012
+ that limited source data does not degrade the generalization
1013
+ capability of our network in a challenging testing scenario.
1014
+ D. Ablation study
1015
+ To verify the superiority of our proposed ensemble learning
1016
+ and the contributions of each sub-model, we conduct exper-
1017
+ iments for multi-source domains and limited-source domains
1018
+ separately. Table V reports the numerical results for multi-
1019
+ source domain settings. The baseline results represent the
1020
+ performance of the ResNet-BiLSTM model without synthetic
1021
+ data. These results are based on encoded spatiotemporal im-
1022
+ ages obtained from the proposed video distillation scheme.
1023
+ Sub-model 1 represents the results when one set of synthetic
1024
+ images were added with spatiotemporal encoded images by
1025
+ using the value of alpha (0.5). The numerical results of CNN
1026
+ and CNN-RNN show that synthetic images start improving the
1027
+ model’s performance on all datasets. In particular, the RNN
1028
+ improves the performance significantly. Similarly, sub-model
1029
+ 2 represents the results with a different set of synthetic images
1030
+ (i.e., alpha value was increased to 1.0). The proposed model
1031
+ experienced a slight drop in performance for CNN predictions
1032
+ but continues to improve the performance of RNN on M, I
1033
+ and O. Moreover, when we further evaluate the performance
1034
+
1035
+ JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
1036
+ 9
1037
+ TABLE V: Ablation study using cross-database evaluation.
1038
+ O&C&I to M
1039
+ O&M&I to C
1040
+ O&C&M to I
1041
+ I&C&M to O
1042
+ Method
1043
+ HTER(%)
1044
+ AUC(%)
1045
+ HTER(%)
1046
+ AUC(%)
1047
+ HTER(%)
1048
+ AUC(%)
1049
+ HTER(%)
1050
+ AUC(%)
1051
+ Baseline w/o synthetic data
1052
+ 19.02
1053
+ 86.12
1054
+ 19.52
1055
+ 87.63
1056
+ 31.66
1057
+ 76.22
1058
+ 35.44
1059
+ 85.54
1060
+ Sub-model 1 (CNN)
1061
+ 18.11
1062
+ 87.63
1063
+ 18.66
1064
+ 86.22
1065
+ 29.00
1066
+ 78.39
1067
+ 36.55
1068
+ 84.25
1069
+ Sub-model 1 (CNN-RNN)
1070
+ 09.97
1071
+ 99.26
1072
+ 07.31
1073
+ 99.98
1074
+ 36.87
1075
+ 76.05
1076
+ 19.90
1077
+ 99.55
1078
+ Sub-model 2 (CNN)
1079
+ 18.82
1080
+ 91.09
1081
+ 22.20
1082
+ 84.49
1083
+ 35.63
1084
+ 77.24
1085
+ 34.01
1086
+ 74.91
1087
+ Sub-model 2 (CNN-RNN)
1088
+ 08.40
1089
+ 98.64
1090
+ 10.14
1091
+ 97.04
1092
+ 34.44
1093
+ 77.01
1094
+ 12.41
1095
+ 98.55
1096
+ Sub-model 3 (CNN)
1097
+ 17.21
1098
+ 90.87
1099
+ 23.22
1100
+ 84.49
1101
+ 37.33
1102
+ 75.05
1103
+ 33.45
1104
+ 76.14
1105
+ Sub-model 3 (CNN-RNN)
1106
+ 06.05
1107
+ 98.53
1108
+ 06.08
1109
+ 99.11
1110
+ 39.33
1111
+ 76.41
1112
+ 14.40
1113
+ 98.95
1114
+ Ensemble (CNN)
1115
+ 08.95
1116
+ 97.79
1117
+ 15.80
1118
+ 95.47
1119
+ 35.66
1120
+ 90.19
1121
+ 12.89
1122
+ 98.94
1123
+ Ensemble (CNN-RNN)
1124
+ 04.02
1125
+ 99.95
1126
+ 06.97
1127
+ 99.97
1128
+ 33.49
1129
+ 93.16
1130
+ 10.91
1131
+ 99.89
1132
+ TABLE VI: Ablation study with limited open-source databases using cross-database evaluation.
1133
+ O&I to M
1134
+ M&I to C
1135
+ O&I to C
1136
+ O&M to I
1137
+ C&M to O
1138
+ Method
1139
+ HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%)
1140
+ Sub-model 1 19.6
1141
+ 86.5
1142
+ 31.1
1143
+ 76.6
1144
+ 23.7
1145
+ 84.9
1146
+ 22.4
1147
+ 84.0
1148
+ 23.5
1149
+ 84.9
1150
+ Sub-model 2 15.8
1151
+ 88.1
1152
+ 35.6
1153
+ 69.0
1154
+ 33.3
1155
+ 72.7
1156
+ 19.8
1157
+ 87.9
1158
+ 20.2
1159
+ 88.3
1160
+ Sub-model 3 12.5
1161
+ 95.3
1162
+ 18.9
1163
+ 89.7
1164
+ 18.9
1165
+ 89.7
1166
+ 18.0
1167
+ 92.4
1168
+ 17.9
1169
+ 90.9
1170
+ Ensemble
1171
+ 07.8
1172
+ 98.5
1173
+ 17.1
1174
+ 94.3
1175
+ 12.5
1176
+ 97.1
1177
+ 15.1
1178
+ 93.1
1179
+ 14.7
1180
+ 93.4
1181
+ (a)
1182
+ (b)
1183
+ (c)
1184
+ Fig. 7: The T-SNE visualization of feature distributions on cross-testing scenarios. (a) shows the feature distribution of the
1185
+ original encoded video clips, (b) reflects the feature distribution of encoded video clips with a subset of synthetic samples, (c)
1186
+ shows the feature distribution of meta-learner.
1187
+ TABLE VII: Average execution time (in seconds)
1188
+ Dataset
1189
+ Optical flow [63] ASGS method [22] TSS method [8] Ours
1190
+ CASIA-FASD
1191
+ 1560
1192
+ 1487
1193
+ 1140
1194
+ 1023
1195
+ REPLAY-ATTACK
1196
+ 1082
1197
+ 1003
1198
+ 780
1199
+ 641
1200
+ on the third set of synthetic (α = 1.5) images, sub-model 3
1201
+ shows that further improvement can be achieved with synthetic
1202
+ images. When we combine the prediction of these sub-models
1203
+ and train the meta-learner, we achieve remarkable performance
1204
+ on three datasets in comparison to state-of-the-art methods
1205
+ [53],[6], [7],[8],[9],[10],[11]. The quantitative results indicate
1206
+ that the ensemble learning guided by video distillation scheme
1207
+ is beneficial to improve the performance for cross-domain face
1208
+ PAD.
1209
+ Analysis of limited source domains: In Table VI, we com-
1210
+ pare the domain generalization ability of our proposed method
1211
+ when limited source domain databases are accessible (i.e. only
1212
+ two source datasets). The results indicate that the proposed
1213
+ method is effective even in challenging cases. We hypothesize
1214
+ that this improvement is due to the fact that encoded RGB
1215
+ images with synthetic samples are almost as descriptive as the
1216
+ entire video.
1217
+ Comparisons of execution times: We analyze the execution
1218
+ times of the proposed video distillation technique with the
1219
+ previous global motion estimation methods [8], [22] and
1220
+ optical flow[63]. Table VII reports the numerical results in the
1221
+ total number of seconds used to generate the training samples
1222
+ on two datasets. All these comparison results were reported
1223
+ by using a MATLAB environment based on a workstation
1224
+ with 3.5 GHz Intel Core i7-5930k and 64 GB RAM. One
1225
+ can see that the proposed global motion estimation technique
1226
+ is computationally less expensive than the previous motion
1227
+ estimated methods reported recently in the literature.
1228
+ E. Visualization and Analysis
1229
+ To intuitively show the contribution of each sub-model, we
1230
+ visualize the feature distribution of different features using
1231
+ t-SNE [64], as illustrated in Fig. 7. The model is trained
1232
+ on 0+C+I source domains without synthetic samples and
1233
+ shows a trivial distribution in Fig. 7 (a) with an unclear
1234
+ interface between live and spoofed samples. One can see
1235
+ these overlapped areas can be easily misclassified and cause to
1236
+ degrade the performance. After adding synthetic samples to the
1237
+ sub-model, as represented in Fig. 7 (b), the feature distribution
1238
+ improves and provides a relatively clear interface than the
1239
+ baseline model, that is because the synthetic samples force
1240
+
1241
+ Attack
1242
+ RealAttack
1243
+ RealAttack
1244
+ RealJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
1245
+ 10
1246
+ (a)
1247
+ (b)
1248
+ (c)
1249
+ (d)
1250
+ Fig. 8: The Receiver Operating Characteristics (ROC) curves. (a) O&C&I to M, (b) O&M&I to C, (c) O&C&M to I, and (d)
1251
+ I&C&M to O are developed by plotting the true positive rate (TPR) against the false positive rate (FPR).
1252
+ the model to predict the spatiotemporal artifacts. Nonetheless,
1253
+ when the meta-model is introduced, a well-structured and
1254
+ compact distribution with a clear interface can be seen in
1255
+ Fig 7 (c). Thus, our proposed ensemble learning shows good
1256
+ generalizability on unseen target data.
1257
+ In Fig.8, we visualize ROC curves to show how much the
1258
+ model is capable of distinguishing real and attack classes. As
1259
+ illustrated in Fig.8, the meta-model on all datasets achieves
1260
+ more than 90% AUC which is a very impressive performance
1261
+ on unseen testing sets. The ROC curve is plotted with TPR
1262
+ against the FPR where FPR is on the x-axis and TPR is on the
1263
+ y-axis. In particular, the meta-model (ensemble) drag curves
1264
+ closer to the top-left corner indicate better performance.
1265
+ V. Conclusions
1266
+ In this paper, we show that ensemble learning represents an
1267
+ interesting research direction for improving the generalization
1268
+ of face PAD. In particular, the model is comprised of multiple
1269
+ synthetic source domains, and each sub-model predicts the
1270
+ spatiotemporal inconsistencies based on their similarity to each
1271
+ training domain. Besides, a meta-learner is introduced to take
1272
+ the complementary information from each sub-model. Based
1273
+ on the experimental results on four benchmark datasets, the
1274
+ proposed method exhibits better performance than a single
1275
+ model trained only on original training data. Thus, using
1276
+ ensemble stacking is shown to outperform the existing state-
1277
+ of-the-art generalization methods. Finally, the interpretation of
1278
+ the model shows that capturing the motion information is quite
1279
+ helpful to improve the generalization ability of the proposed
1280
+ method. Our future work will focus on the development of
1281
+ robust motion estimation methods in end-to-end learning to
1282
+ improve the generalization of face PAD. .
1283
+ VI. Declaration of Competing Interest
1284
+ The authors have no conflict of interest that could have
1285
+ appeared to influence the work reported in this paper.
1286
+ VII. Acknowledgments
1287
+ This work is supported by the Center for Machine Vision
1288
+ and Signal Analysis (CMVS) in the Faculty of Information
1289
+ Technology and Electrical Engineering (ITEE) at University
1290
+ of Oulu, Finland.
1291
+ References
1292
+ [1] J. H. Kim, J. Jang, Y. Kim, and D. Nan, “A structural topic model for
1293
+ exploring user satisfaction with mobile payments,” Computers, Materials
1294
+ and Continua, vol. 73, no. 2, pp. 3815–3826, 2022.
1295
+ [2] P. J. Grother, M. L. Ngan, K. K. Hanaoka, et al., “Ongoing face
1296
+ recognition vendor test (frvt) part 2: Identification,” 2018.
1297
+ [3] U. Muhammad and M. Oussalah, “Face anti-spoofing from the perspec-
1298
+ tive of data sampling,” Electronics Letters, 2022.
1299
+ [4] U. Muhammad and M. Oussalah, “Self-supervised face presentation
1300
+ attack detection with dynamic grayscale snippets,” arXiv preprint
1301
+ arXiv:2208.13070, 2022.
1302
+ [5] Z. Boulkenafet, J. Komulainen, and A. Hadid, “Face spoofing detec-
1303
+ tion using colour texture analysis,” IEEE Transactions on Information
1304
+ Forensics and Security, vol. 11, no. 8, pp. 1818–1830, 2016.
1305
+ [6] A. Liu, Z. Tan, J. Wan, Y. Liang, Z. Lei, G. Guo, and S. Z. Li, “Face anti-
1306
+ spoofing via adversarial cross-modality translation,” IEEE Transactions
1307
+ on Information Forensics and Security, vol. 16, pp. 2759–2772, 2021.
1308
+ [7] R. Shao, X. Lan, and P. C. Yuen, “Regularized fine-grained meta face
1309
+ anti-spoofing,” in Proceedings of the AAAI Conference on Artificial
1310
+ Intelligence, vol. 34, pp. 11974–11981, 2020.
1311
+ [8] U. Muhammad, Z. Yu, and J. Komulainen, “Self-supervised 2d face
1312
+ presentation attack detection via temporal sequence sampling,” Pattern
1313
+ Recognition Letters, 2022.
1314
+ [9] G. Wang, H. Han, S. Shan, and X. Chen, “Cross-domain face pre-
1315
+ sentation attack detection via multi-domain disentangled representation
1316
+ learning,” in Proceedings of the IEEE/CVF Conference on Computer
1317
+ Vision and Pattern Recognition, pp. 6678–6687, 2020.
1318
+ [10] S. Saha, W. Xu, M. Kanakis, S. Georgoulis, Y. Chen, D. P. Paudel, and
1319
+ L. Van Gool, “Domain agnostic feature learning for image and video
1320
+ based face anti-spoofing,” in Proceedings of the IEEE/CVF Conference
1321
+ on Computer Vision and Pattern Recognition Workshops, pp. 802–803,
1322
+ 2020.
1323
+ [11] R. Shao, X. Lan, J. Li, and P. C. Yuen, “Multi-adversarial discriminative
1324
+ deep domain generalization for face presentation attack detection,” in
1325
+ Proceedings of the IEEE/CVF Conference on Computer Vision and
1326
+ Pattern Recognition, pp. 10023–10031, 2019.
1327
+ [12] M. Liu, J. Mu, Z. Yu, K. Ruan, B. Shu, and J. Yang, “Adversarial
1328
+ learning and decomposition-based domain generalization for face anti-
1329
+ spoofing,” Pattern Recognition Letters, vol. 155, pp. 171–177, 2022.
1330
+ [13] R. Cai, Z. Li, R. Wan, H. Li, Y. Hu, and A. C. Kot, “Learning
1331
+ meta pattern for face anti-spoofing,” IEEE Transactions on Information
1332
+ Forensics and Security, vol. 17, pp. 1201–1213, 2022.
1333
+ [14] Q. Zhou, K.-Y. Zhang, T. Yao, R. Yi, K. Sheng, S. Ding, and L. Ma,
1334
+ “Generative domain adaptation for face anti-spoofing,” in European
1335
+ Conference on Computer Vision, pp. 335–356, Springer, 2022.
1336
+ [15] S. Liu, S. Lu, H. Xu, J. Yang, S. Ding, and L. Ma, “Feature generation
1337
+ and hypothesis verification for reliable face anti-spoofing,” in Proceed-
1338
+ ings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 1782–
1339
+ 1791, 2022.
1340
+ [16] H. Huang, Y. Xiang, G. Yang, L. Lv, X. Li, Z. Weng, and Y. Fu,
1341
+ “Generalized face anti-spoofing via cross-adversarial disentanglement
1342
+ with mixing augmentation,” in ICASSP 2022-2022 IEEE International
1343
+ Conference on Acoustics, Speech and Signal Processing (ICASSP),
1344
+ pp. 2939–2943, IEEE, 2022.
1345
+ [17] Y. Jia, J. Zhang, S. Shan, and X. Chen, “Single-side domain generaliza-
1346
+ tion for face anti-spoofing,” in Proceedings of the IEEE/CVF Conference
1347
+ on Computer Vision and Pattern Recognition, pp. 8484–8493, 2020.
1348
+
1349
+ 0.8
1350
+ rate
1351
+ positive
1352
+ 0.6
1353
+ True
1354
+ 0.4
1355
+ Sub-model 1
1356
+ Sub-model 2
1357
+ Sub-model 3
1358
+ 0.2
1359
+ Ensemblelearning
1360
+ 0
1361
+ 0
1362
+ 0.2
1363
+ 0.4
1364
+ 0.6
1365
+ 0.8
1366
+ 1
1367
+ Falsepositive rate0.8
1368
+ rate
1369
+ positive
1370
+ 0.6
1371
+ Sub-model 1
1372
+ 0.4
1373
+ Sub-model 2
1374
+ Sub-model 3
1375
+ 0.2
1376
+ Ensemblelearning
1377
+ 0
1378
+ 0
1379
+ 0.2
1380
+ 0.4
1381
+ 0.6
1382
+ 0.8
1383
+ Falsepositiverate0.8
1384
+ rate
1385
+ positive
1386
+ 0.6
1387
+ 0.4
1388
+ Sub-model 1
1389
+ Sub-model 2
1390
+ Sub-model 3
1391
+ 0.2
1392
+ Ensemblelearning
1393
+ 0
1394
+ 0
1395
+ 0.2
1396
+ 0.4
1397
+ 0.6
1398
+ 0.8
1399
+ Falsepositiverate0.8
1400
+ rate
1401
+ positive
1402
+ 0.6
1403
+ True
1404
+ 0.4
1405
+ Sub-model 1
1406
+ Sub-model 2
1407
+ Sub-model 3
1408
+ 0.2
1409
+ Ensemblelearning
1410
+ 0
1411
+ 0
1412
+ 0.2
1413
+ 0.4
1414
+ 0.6
1415
+ 0.8
1416
+ FalsepositiverateJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022
1417
+ 11
1418
+ [18] K. Zhou, Y. Yang, Y. Qiao, and T. Xiang, “Domain adaptive ensemble
1419
+ learning,” IEEE Transactions on Image Processing, vol. 30, pp. 8008–
1420
+ 8018, 2021.
1421
+ [19] I. J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley,
1422
+ S. Ozair, A. Courville, and Y. Bengio, “Generative adversarial networks,”
1423
+ arXiv preprint arXiv:1406.2661, 2014.
1424
+ [20] J. Donahue, P. Krähenbühl, and T. Darrell, “Adversarial feature learn-
1425
+ ing,” arXiv preprint arXiv:1605.09782, 2016.
1426
+ [21] A. Radford, L. Metz, and S. Chintala, “Unsupervised representation
1427
+ learning with deep convolutional generative adversarial networks,” arXiv
1428
+ preprint arXiv:1511.06434, 2015.
1429
+ [22] U. Muhammad, J. Zhang, L. Liu, and M. Oussalah, “An adaptive
1430
+ spatio-temporal global sampling for presentation attack detection,” IEEE
1431
+ Transactions on Circuits and Systems II: Express Briefs, 2022.
1432
+ [23] T. d. Freitas Pereira, A. Anjos, J. M. D. Martino, and S. Marcel,
1433
+ “Lbp- top based countermeasure against face spoofing attacks,” in Asian
1434
+ Conference on Computer Vision, pp. 121–132, Springer, 2012.
1435
+ [24] K. Patel, H. Han, and A. K. Jain, “Secure face unlock: Spoof detection on
1436
+ smartphones,” IEEE transactions on information forensics and security,
1437
+ vol. 11, no. 10, pp. 2268–2283, 2016.
1438
+ [25] Z. Boulkenafet, J. Komulainen, X. Feng, and A. Hadid, “Scale space
1439
+ texture analysis for face anti-spoofing,” in 2016 International Conference
1440
+ on Biometrics (ICB), pp. 1–6, IEEE, 2016.
1441
+ [26] D. Wen, H. Han, and A. K. Jain, “Face spoof detection with image
1442
+ distortion analysis,” IEEE Transactions on Information Forensics and
1443
+ Security, vol. 10, no. 4, pp. 746–761, 2015.
1444
+ [27] J. Yang, Z. Lei, S. Liao, and S. Z. Li, “Face liveness detection with
1445
+ component dependent descriptor,” in 2013 International Conference on
1446
+ Biometrics (ICB), pp. 1–6, IEEE, 2013.
1447
+ [28] L. Li and X. Feng, “Face anti-spoofing via deep local binary pattern,”
1448
+ in Deep Learning in Object Detection and Recognition, pp. 91–111,
1449
+ Springer, 2019.
1450
+ [29] U. Muhammad, T. Holmberg, W. C. de Melo, and A. Hadid, “Face anti-
1451
+ spoofing via sample learning based recurrent neural network (rnn).,” in
1452
+ BMVC, p. 113, 2019.
1453
+ [30] S. Tirunagari, N. Poh, D. Windridge, A. Iorliam, N. Suki, and A. T. Ho,
1454
+ “Detection of face spoofing using visual dynamics,” IEEE transactions
1455
+ on information forensics and security, vol. 10, no. 4, pp. 762–777, 2015.
1456
+ [31] S. Bharadwaj, T. I. Dhamecha, M. Vatsa, and R. Singh, “Computa-
1457
+ tionally efficient face spoofing detection with motion magnification,” in
1458
+ Proceedings of the IEEE conference on computer vision and pattern
1459
+ recognition workshops, pp. 105–110, 2013.
1460
+ [32] S.-Q. Liu, X. Lan, and P. C. Yuen, “Multi-channel remote photoplethys-
1461
+ mography correspondence feature for 3d mask face presentation attack
1462
+ detection,” IEEE Transactions on Information Forensics and Security,
1463
+ vol. 16, pp. 2683–2696, 2021.
1464
+ [33] L. Sun, G. Pan, Z. Wu, and S. Lao, “Blinking-based live face detec-
1465
+ tion using conditional random fields,” in International Conference on
1466
+ Biometrics, pp. 252–260, Springer, 2007.
1467
+ [34] W. Yin, Y. Ming, and L. Tian, “A face anti-spoofing method based
1468
+ on optical flow field,” in 2016 IEEE 13th International Conference on
1469
+ Signal Processing (ICSP), pp. 1333–1337, IEEE, 2016.
1470
+ [35] H. Li, P. He, S. Wang, A. Rocha, X. Jiang, and A. C. Kot, “Learning
1471
+ generalized deep feature representation for face anti-spoofing,” IEEE
1472
+ Transactions on Information Forensics and Security, vol. 13, no. 10,
1473
+ pp. 2639–2652, 2018.
1474
+ [36] Y. Liu, A. Jourabloo, and X. Liu, “Learning deep models for face anti-
1475
+ spoofing: Binary or auxiliary supervision,” in Proceedings of the IEEE
1476
+ conference on computer vision and pattern recognition, pp. 389–398,
1477
+ 2018.
1478
+ [37] H. Yue, K. Wang, G. Zhang, H. Feng, J. Han, E. Ding, and J. Wang,
1479
+ “Cyclically disentangled feature translation for face anti-spoofing,” arXiv
1480
+ preprint arXiv:2212.03651, 2022.
1481
+ [38] C.-C. Chuang, C.-Y. Wang, and S.-H. Lai, “Generalized face anti-
1482
+ spoofing via multi-task learning and one-side meta triplet loss,” arXiv
1483
+ preprint arXiv:2211.15955, 2022.
1484
+ [39] Z. Wang, Z. Wang, Z. Yu, W. Deng, J. Li, T. Gao, and Z. Wang, “Domain
1485
+ generalization via shuffled style assembly for face anti-spoofing,” in
1486
+ Proceedings of the IEEE/CVF Conference on Computer Vision and
1487
+ Pattern Recognition, pp. 4123–4133, 2022.
1488
+ [40] E. Rosten and T. Drummond, “Fusing points and lines for high perfor-
1489
+ mance tracking,” in Tenth IEEE International Conference on Computer
1490
+ Vision (ICCV’05) Volume 1, vol. 2, pp. 1508–1515, Ieee, 2005.
1491
+ [41] A. Alahi, R. Ortiz, and P. Vandergheynst, “Freak: Fast retina keypoint,”
1492
+ in 2012 IEEE conference on computer vision and pattern recognition,
1493
+ pp. 510–517, Ieee, 2012.
1494
+ [42] P. H. Torr and A. Zisserman, “Robust parameterization and computation
1495
+ of the trifocal tensor,” Image and vision Computing, vol. 15, no. 8,
1496
+ pp. 591–605, 1997.
1497
+ [43] B. Kamgar-Parsi, W. Lawson, and B. Kamgar-Parsi, “Toward develop-
1498
+ ment of a face recognition system for watchlist surveillance,” IEEE
1499
+ Transactions on Pattern Analysis and Machine Intelligence, vol. 33,
1500
+ no. 10, pp. 1925–1937, 2011.
1501
+ [44] S. Hochreiter and J. Schmidhuber, “Long short-term memory,” Neural
1502
+ computation, vol. 9, no. 8, pp. 1735–1780, 1997.
1503
+ [45] S. Fatemifar, M. Awais, A. Akbari, and J. Kittler, “A stacking ensemble
1504
+ for anomaly based client-specific face spoofing detection,” in 2020 IEEE
1505
+ International Conference on Image Processing (ICIP), pp. 1371–1375,
1506
+ IEEE, 2020.
1507
+ [46] M. Ganaie, M. Hu, et al., “Ensemble deep learning: A review,” arXiv
1508
+ preprint arXiv:2104.02395, 2021.
1509
+ [47] R. Polikar, “Ensemble learning,” in Ensemble machine learning, pp. 1–
1510
+ 34, Springer, 2012.
1511
+ [48] M. Schuster and K. K. Paliwal, “Bidirectional recurrent neural net-
1512
+ works,” IEEE transactions on Signal Processing, vol. 45, no. 11,
1513
+ pp. 2673–2681, 1997.
1514
+ [49] R. R. Selvaraju, M. Cogswell, A. Das, R. Vedantam, D. Parikh, and
1515
+ D. Batra, “Grad-cam: Visual explanations from deep networks via
1516
+ gradient-based localization,” in Proceedings of the IEEE international
1517
+ conference on computer vision, pp. 618–626, 2017.
1518
+ [50] M. D. Zeiler and R. Fergus, “Visualizing and understanding convolu-
1519
+ tional networks,” in European conference on computer vision, pp. 818–
1520
+ 833, Springer, 2014.
1521
+ [51] J. T. Springenberg, A. Dosovitskiy, T. Brox, and M. Riedmiller,
1522
+ “Striving for simplicity: The all convolutional net,” arXiv preprint
1523
+ arXiv:1412.6806, 2014.
1524
+ [52] M. T. Ribeiro, S. Singh, and C. Guestrin, “" why should i trust you?"
1525
+ explaining the predictions of any classifier,” in Proceedings of the 22nd
1526
+ ACM SIGKDD international conference on knowledge discovery and
1527
+ data mining, pp. 1135–1144, 2016.
1528
+ [53] Y. Qin, Z. Yu, L. Yan, Z. Wang, C. Zhao, and Z. Lei, “Meta-teacher for
1529
+ face anti-spoofing,” IEEE Transactions on Pattern Analysis and Machine
1530
+ Intelligence, 2021.
1531
+ [54] Z. Wang, Z. Yu, X. Wang, Y. Qin, J. Li, C. Zhao, Z. Lei, X. Liu, S. Li,
1532
+ and Z. Wang, “Consistency regularization for deep face anti-spoofing,”
1533
+ arXiv preprint arXiv:2111.12320, 2021.
1534
+ [55] A. Tarvainen and H. Valpola, “Mean teachers are better role mod-
1535
+ els: Weight-averaged consistency targets improve semi-supervised deep
1536
+ learning results,” Advances in neural information processing systems,
1537
+ vol. 30, 2017.
1538
+ [56] Y. Jia, J. Zhang, S. Shan, and X. Chen, “Unified unsupervised and
1539
+ semi-supervised domain adaptation network for cross-scenario face anti-
1540
+ spoofing,” Pattern Recognition, vol. 115, p. 107888, 2021.
1541
+ [57] G. Huang, Z. Liu, L. Van Der Maaten, and K. Q. Weinberger, “Densely
1542
+ connected convolutional networks,” in Proceedings of the IEEE confer-
1543
+ ence on computer vision and pattern recognition, pp. 4700–4708, 2017.
1544
+ [58] Z. Boulkenafet, J. Komulainen, L. Li, X. Feng, and A. Hadid, “Oulu-npu:
1545
+ A mobile face presentation attack database with real-world variations,”
1546
+ in 2017 12th IEEE international conference on automatic face & gesture
1547
+ recognition (FG 2017), pp. 612–618, IEEE, 2017.
1548
+ [59] Z. Zhang, J. Yan, S. Liu, Z. Lei, D. Yi, and S. Z. Li, “A face anti-
1549
+ spoofing database with diverse attacks,” in 2012 5th IAPR international
1550
+ conference on Biometrics (ICB), pp. 26–31, IEEE, 2012.
1551
+ [60] I. Chingovska, A. Anjos, and S. Marcel, “On the effectiveness of local
1552
+ binary patterns in face anti-spoofing,” in 2012 BIOSIG-proceedings
1553
+ of the international conference of biometrics special interest group
1554
+ (BIOSIG), pp. 1–7, IEEE, 2012.
1555
+ [61] L. Prechelt, “Early stopping-but when?,” in Neural Networks: Tricks of
1556
+ the trade, pp. 55–69, Springer, 1998.
1557
+ [62] K. He, X. Zhang, S. Ren, and J. Sun, “Delving deep into rectifiers:
1558
+ Surpassing human-level performance on imagenet classification,” in
1559
+ Proceedings of the IEEE international conference on computer vision,
1560
+ pp. 1026–1034, 2015.
1561
+ [63] B. K. Horn and B. G. Schunck, “Determining optical flow,” Artificial
1562
+ intelligence, vol. 17, no. 1-3, pp. 185–203, 1981.
1563
+ [64] L. Van der Maaten and G. Hinton, “Visualizing data using t-sne.,”
1564
+ Journal of machine learning research, vol. 9, no. 11, 2008.
1565
+
3tA0T4oBgHgl3EQfNP_j/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9cbd38f9161ac71ab44828858540d255b6f02e9be3801353243e99b327d6298
3
+ size 1609960
4dAzT4oBgHgl3EQf9f77/content/tmp_files/2301.01922v1.pdf.txt ADDED
@@ -0,0 +1,1151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Open-Set Face Identification
2
+ on Few-Shot Gallery by Fine-Tuning
3
+ Hojin Park, Jaewoo Park, and Andrew Beng Jin Teoh
4
+ School of Electrical and Electronics Engineering
5
+ College of Engineering, Yonsei University
6
+ Seoul, Korea
7
8
+ Abstract—In this paper, we focus on addressing the open-
9
+ set face identification problem on a few-shot gallery by fine-
10
+ tuning. The problem assumes a realistic scenario for face iden-
11
+ tification, where only a small number of face images is given
12
+ for enrollment and any unknown identity must be rejected
13
+ during identification. We observe that face recognition models
14
+ pretrained on a large dataset and naively fine-tuned models
15
+ perform poorly for this task. Motivated by this issue, we propose
16
+ an effective fine-tuning scheme with classifier weight imprinting
17
+ and exclusive BatchNorm layer tuning. For further improvement
18
+ of rejection accuracy on unknown identities, we propose a novel
19
+ matcher called Neighborhood Aware Cosine (NAC) that computes
20
+ similarity based on neighborhood information. We validate the
21
+ effectiveness of the proposed schemes thoroughly on large-scale
22
+ face benchmarks across different convolutional neural network
23
+ architectures. The source code for this project is available at:
24
+ https://github.com/1ho0jin1/OSFI-by-FineTuning
25
+ I. INTRODUCTION
26
+ Recently face recognition (FR) has achieved astonishing
27
+ success attributed to three factors in large. Deep convolutional
28
+ neural network (CNN) architectures [2], [3] that have strong
29
+ visual prior were developed and leveraged as FR embedding
30
+ models. Large-scale face datasets [4], [5] that cover massive
31
+ identities with diverse ethnicity and facial variations became
32
+ available. On top of these, various metric learning losses
33
+ [6]–[9] elevated the performance of deep FR models to an
34
+ unprecedented level.
35
+ The majority of FR embedding models have been evaluated
36
+ on numerous benchmarks with closed-set identification [7]–
37
+ [11]. The closed-set identification protocol assumes all probe
38
+ identities present in the gallery. However, in a realistic sce-
39
+ nario, an unknown identity that is not enrolled may be en-
40
+ countered. Another important but practical aspect to consider
41
+ is the scarcity of intra-class samples for the gallery identities
42
+ to be registered; namely, due to the expensive data acquisition
43
+ cost and privacy issue, only a very small number of samples
44
+ might be available for each gallery identity to register. In this
45
+ respect, open-set face identification (OSFI) with the small-
46
+ sized gallery is closer to a real scenario as it needs to perform
47
+ both known probe identity identification and unknown probe
48
+ identity rejection based on the limited information from the
49
+ small gallery set. Despite its versatile practical significance,
50
+ however, OSFI with a small gallery has been rarely explored.
51
+ Devising a model specific to OSFI with a small gallery
52
+ can be challenging in the following aspects: Firstly, an OSFI
53
+ (a)
54
+ (b)
55
+ Fig. 1.
56
+ (a) Full fine-tuning all parameters severely degrades the OSFI
57
+ performance, while our method significantly improves the pre-trained model.
58
+ Detection & Identification Rate (DIR) [1] quantifies both correct identification
59
+ of the known probe identities and detection of the unknown. (b) An outline of
60
+ our proposed fine-tuning scheme: Given a model pretrained on a large-scale
61
+ face database, we initialize the gallery set classifier by weight imprinting,
62
+ and then fine-tune the model on a few-shot gallery set by training only the
63
+ BatchNorm layers. In the evaluation stage, a given probe is either accepted as
64
+ known or rejected as an unknown identity based on novel similarity matcher
65
+ dubbed Neighborhood Aware Cosine (NAC) matcher.
66
+ model performs both identifications of a known probe identity
67
+ but also correct rejection of unknown probe identity. Hence,
68
+ conventional FR embedding models devised mainly for closed-
69
+ set identification can perform poorly at the rejection of the
70
+ unknown. In fact, as observed in Fig. 1 (a), FR embedding
71
+ models pretrained on a large-scale public face database are
72
+ not effective for open-set identification, leaving room for
73
+ improvement. This suggests the need for fitting the pretrained
74
+ arXiv:2301.01922v1 [cs.CV] 5 Jan 2023
75
+
76
+ IJB-C
77
+ CASIA-WebFace
78
+ 0.8
79
+ 0.9
80
+ Rate
81
+ Pretrained
82
+ Pretrained
83
+ 0.7
84
+ Full finetuning
85
+ 0.8
86
+ Full finetuning
87
+ &Identification
88
+ Ours
89
+ Ours
90
+ 0.6
91
+ 0.7
92
+ 0.5
93
+ 0.6
94
+ etection
95
+ 0.4
96
+ 0.5
97
+ 0.3
98
+ 0.4
99
+ 0.01
100
+ 0.1
101
+ 1.0
102
+ 0.01
103
+ 0.1
104
+ 1.0
105
+ FalseAlarmRate
106
+ FalseAlarmRatePretrain Set
107
+ Evaluation Set (disjoint from pretrain set)
108
+ Known
109
+ Unknown
110
+ Gallery (Few-shot)
111
+ Known Query
112
+ Unknown Query
113
+ Weight Imprinting
114
+ Probe
115
+ Accept
116
+ M
117
+ NAC
118
+ 2Q
119
+ Reject
120
+ Evaluation
121
+ Pretraining
122
+ BatchNorm-only
123
+ Fine-Tuningmodel to be more specific to the given gallery set.
124
+ Secondly, due to the few-shot nature of the small-sized
125
+ gallery set, there is a high risk of overfitting for fine-tuning
126
+ the pretrained model. As shown in Fig. 1 (a), full fine-tuning
127
+ (i.e. updating all parameters) of the pretrained model results
128
+ in severe performance degradation. This drives us to devise an
129
+ overfitting-resilient parameter tuning scheme.
130
+ Moreover, an ordinary cosine similarity matcher used in the
131
+ closed-set identification might have a large tradeoff between
132
+ the known probe identity identification and unknown probe
133
+ identity rejection. As will be observed in Sec. III-D, the simple
134
+ cosine matcher has a severe drawback for the task at hand. This
135
+ motivates us to devise a robust matcher for OSFI.
136
+ Based on these observations, we propose an efficient fine-
137
+ tuning scheme and a novel similarity-based matcher for OSFI
138
+ constrained on a small gallery set. Our fine-tuning scheme
139
+ consists of weight initialization of the classifier governed by
140
+ weight imprinting (WI) [12] and training only BatchNorm
141
+ (BN) layers [13] for overfitting-resilient adaptation on the
142
+ small gallery set. Moreover, for both effective detection of
143
+ the unknown and identification of the known probe identities,
144
+ a novel Neighborhood Aware Cosine (NAC) matcher that
145
+ respects the neighborhood information of the learned gallery
146
+ features, and hence better calibrates the rejection score is
147
+ proposed. Our contributions are summarized as follows:
148
+ 1) To effectively solve the OSFI problem constrained on a
149
+ small gallery set, we propose to fine-tune the pretrained
150
+ face embedding model. Since full fine-tuning deterio-
151
+ rates the embedding quality, we search for the optimal
152
+ method.
153
+ 2) We demonstrate that the combination of weight imprint-
154
+ ing and exclusive BatchNorm layer fine-tuning excels
155
+ other baselines.
156
+ 3) We recognize that the commonly used cosine similarity
157
+ is a sub-optimal matcher for rejection. We propose a
158
+ novel matcher named NAC that significantly improves
159
+ the rejection accuracy.
160
+ II. RELATED WORKS
161
+ A. Open Set Face Identification (OSFI)
162
+ [14], one of the earliest works in OSFI, used their proposed
163
+ Open-set TCM-kNN on top of features extracted by PCA and
164
+ Fisher Linear Discriminant. [15] proposed their own OSFI
165
+ protocol and showed that an extreme value machine [16]
166
+ trained on the gallery set performs better than using cosine
167
+ similarity or linear discriminant analysis for matchers. [17]
168
+ trained a matcher composed of locality sensitive hashing [18]
169
+ and partial least squares [19]. [20] applied OpenMax [21]
170
+ and PROSER [22], two methods for open-set recognition of
171
+ generic images, on top of extracted face features.
172
+ All previous works propose to train an open-set classifier
173
+ (matcher) of some form, but all of them use a fixed
174
+ encoder. To the best of our knowledge, we are the first to
175
+ propose an effective fine-tuning scheme as a solution to OSFI.
176
+ B. Cosine Similarity-based Loss Functions
177
+ [23] proposed to l2-normalize the features such that the
178
+ train loss is only determined by the angle between the feature
179
+ and the classifier weights. [7] further extended this idea by
180
+ applying a multiplicative margin to the angle between a feature
181
+ and its corresponding weight vector. This penalized the intra-
182
+ class features to be gathered while forcing inter-class centers
183
+ (prototypes) to be separated. A number of follow-up papers
184
+ such as [8]–[11] modify this angular margin term in different
185
+ ways, but their motivations and properties are generally simi-
186
+ lar. Therefore, in our experiments we only use CosFace loss [8]
187
+ as a representative method. For comprehensive understanding
188
+ of these loss functions, refer to [24].
189
+ III. APPROACH
190
+ Our proposed approach is two-fold: fine-tuning on the
191
+ gallery and open-set identification evaluation. In the fine-
192
+ tuning stage, the classifier is initialized by weight imprinting
193
+ to initiate learning from optimal discriminative features, and
194
+ the model is fine-tuned by updating only the BatchNorm layers
195
+ to avoid overfitting on the few-shot gallery data. In evaluation,
196
+ we utilize a novel matcher NAC that computes a neighborhood
197
+ aware similarity for better-calibrated rejection of the unknown.
198
+ We demonstrate that the combination of these three methods
199
+ significantly outperforms all other baselines.
200
+ A. Problem Definition and Metrics
201
+ Formally, in an OSFI problem, we assume the availability
202
+ of an encoder φ pretrained on a large-scale face database (FR
203
+ embedding model), which is disjoint from the evaluation set
204
+ with respect to identity. The evaluation set consists of a gallery
205
+ G = {(xG
206
+ i , yG
207
+ i )}Cm
208
+ i=1 and a probe set Q. The probe set Q is
209
+ further divided into the known probe set K = {(xK
210
+ i , yK
211
+ i )}
212
+ and the unknown probe set U = {(xU
213
+ i , yU
214
+ i )}. G and K has no
215
+ overlapping images x but shares same identities y ∈ {1, ..., C}
216
+ whereas U has disjoint identities, i.e., YU ∩ {1, ..., C} = Ø.
217
+ m refers to the number of images per identity in G, which
218
+ we fix to 3 to satisfy the few-shot constraint. We allow the
219
+ encoder to be fine-tuned over the gallery set.
220
+ The evaluation of OSFI performance uses the detection
221
+ and identification rate at some false alarm rate (DIR@FAR).
222
+ FAR=1 means we do not reject any probe. Note that unlike
223
+ the general case shown in [1], here we only consider rank-
224
+ 1 identification rate for DIR. Therefore, DIR@FAR=1 is the
225
+ rank-1 closed-set identification accuracy.
226
+ B. Classifier Initialization by Weight Imprinting
227
+ Due to the few-shot nature of the gallery set where we
228
+ fine-tune on, the initialization of model parameters and, in
229
+ particular, of classifier weights is crucial to avoid overfitting.
230
+ The most naive option is a random initialization of the
231
+ classifier weight matrix W. Another commonly used strategy
232
+ is linear probing [25], i.e., finding an optimized weight W
233
+ that minimizes the classification loss over the frozen encoder
234
+ embeddings φ(x).
235
+
236
+ We experimentally find that, as seen in Fig. 2, both of these
237
+ initialization schemes do not induce discriminative structure
238
+ for the encoder embedding φ(x). In particular, during fine-
239
+ tuning, each weight vector wc in the classifier acts as a center
240
+ (or prototype) for the c-th class (i.e. identity). Fig. 2 shows that
241
+ neither random initialization nor linear probing of wc derives
242
+ optimally discriminative weight vectors wc, resulting in low
243
+ quality of class separation of gallery features.
244
+ Motivated from this issue, we propose to initialize by weight
245
+ imprinting (WI), which induces the optimal discriminative
246
+ quality for the gallery features:
247
+ wc =
248
+
249
+ wc
250
+ ∥�
251
+ wc∥2
252
+ ,
253
+
254
+ wc = 1
255
+ m
256
+
257
+ yG
258
+ i =c
259
+ φ(xG
260
+ i )
261
+ (1)
262
+ where ∥·∥2 is the l2 norm, and the embedding feature φ(x) is
263
+ unit-normalized such that ∥φ(x)∥2 = 1.
264
+ As expected, Fig. 2 verifies that fine-tuning from the weight
265
+ imprinted initialization achieves a much higher discriminative
266
+ quality. This shows the superiority of weight imprinting com-
267
+ pared to random initialization and linear probing.
268
+ Note that weight imprinting has been frequently used in FR
269
+ embedding models [8], [9]. However, the critical difference
270
+ is that those models utilize weight imprinting only to prepare
271
+ templates before evaluation. In our case, the WI initialization
272
+ is utilized particularly for fine-tuning.
273
+ C. BatchNorm-only Fine-Tuning
274
+ Choosing the appropriate layer to tune is another important
275
+ issue for fine-tuning. Moreover, due to the extremely small
276
+ number of samples for each gallery identity, there is a risk
277
+ of overfitting as suggested by the classical theory on the vc
278
+ dimension [26]. In fact, a recent study [25] suggests that full
279
+ fine-tuning hurts the pretrained filters including the useful
280
+ convolutional filters learned from a large-scale database.
281
+ To minimize the negative effect of this deterioration, we
282
+ fine-tune only the BatchNorm (BN) layers along with the
283
+ classifier weight:
284
+ min
285
+ W, θBN L(W T φθ(x), y),
286
+ θ = [θBN, θrest]
287
+ (2)
288
+ where θ refers to all parameters in the encoder φ = φθ and
289
+ θBN and θrest respectively refers to BatchNorm parameters
290
+ and the rest. During fine-tuning, θrest is fixed with no gradient
291
+ flow. The loss function L can be a softmax cross-entropy, or
292
+ widely used FR embedding model losses such as ArcFace [9]
293
+ and CosFace [8].
294
+ Due to selective fine-tuning of only the BN layers (and clas-
295
+ sifier weight), the convolutional filters learned from the large-
296
+ scale pre-train database are simply transferred. The BN-only
297
+ training is thus computationally efficient as it occupies only
298
+ 0.1-0.01% of the total parameters in the CNN. Nevertheless,
299
+ its model complexity is sufficient to learn a general image task
300
+ as guaranteed by [27].
301
+ Fig. 2.
302
+ The Intra-class variance (left) and inter-class separation (right) of
303
+ classifiers that are initialized by different schemes. NormFace [23], CosFace
304
+ [8] and ArcFace [9] loss are used for linear probing initialization. The weight
305
+ imprinting initialization does not require training, thus stays constant.
306
+ Fig. 3. An unknown
307
+ feature u placed be-
308
+ tween gallery proto-
309
+ types of class i and
310
+ j. ϵ is some small
311
+ positive constant.
312
+ TABLE I
313
+ AVERAGE ANGLE (DEGREES) BETWEEN IJB-C
314
+ PROBE FEATURE VECTORS AND THEIR TOP-K
315
+ CLOSEST GALLERY PROTOTYPES. THE THIRD
316
+ COLUMN REFERS TO THE AVERAGE OF TOP-2 TO
317
+ TOP-16.
318
+ Encoder
319
+ top-1
320
+ top-2
321
+ 2∼16
322
+ Res50
323
+ K
324
+ 50.7◦
325
+ 64.0◦
326
+ 69.1◦
327
+ U
328
+ 63.8◦
329
+ 66.0◦
330
+ 69.7◦
331
+ VGG19
332
+ K
333
+ 53.4◦
334
+ 66.2◦
335
+ 71.4◦
336
+ U
337
+ 65.9◦
338
+ 68.2◦
339
+ 72.1◦
340
+ D. Neighborhood Aware Cosine Similarity
341
+ The cosine similarity function is the most predominant
342
+ matcher for contemporary face verification and identification.
343
+ Denoting the probe feature vector as p and the gallery pro-
344
+ totypes as {gj}C
345
+ j=1, where gj :=
346
+ 1
347
+ m
348
+
349
+ yG
350
+ i =j φ(xG
351
+ i ) is the
352
+ mean of all the normalized gallery feature vectors of class
353
+ j, identification is performed by finding the maximum class
354
+ index c = arg maxj=1,...,C cos(p, gj). On the other hand, in
355
+ the extension to OSFI, the decision of accepting as known or
356
+ rejecting as unknown can be formulated:
357
+ max
358
+ j=1,...,C cos(p, gj)
359
+ Accept
360
+
361
+ Reject
362
+ τ
363
+ (3)
364
+ where cos(p, q) =
365
+ p
366
+ ∥p∥2 ·
367
+ q
368
+ ∥q∥2 is the cosine similarity between
369
+ two feature vectors, τ is the rejection threshold.
370
+ Now, consider an example illustrated in Fig. 3. The cosine
371
+ matcher will assign the probe u to the identity i with the
372
+ acceptance score 0.866, which is fairly close to the maximum
373
+ score 1. This value alone might imply that the probe is a
374
+ known sample as it is close to the gallery identity i. However,
375
+ the probe feature vector is placed right in the middle of the
376
+ identities i and j. The in-between placement of u suggests
377
+ that the probe can be possibly unknown and thus should be
378
+ assigned with a lesser value of the acceptance score.
379
+ Motivated by this intuition, we propose the Neighborhood
380
+ Aware Cosine (NAC) matcher that respects all top-k surround-
381
+ ing gallery features:
382
+ NAC(p, gi) = exp(cos(p, gi)) · 1[i ∈ Nk]
383
+
384
+ j∈Nk exp(cos(p, gj))
385
+ (4)
386
+
387
+ Intra-classvariance
388
+ Inter-class separation
389
+ 。06
390
+ 104 °
391
+ NormFace
392
+ CosFace
393
+ 80 °
394
+ 103 °
395
+ ArcFace
396
+ 70 °
397
+ Weight Imprinting
398
+ 102 °
399
+ Angle
400
+ 60 °
401
+ 101 °
402
+ 。09
403
+ 100 °
404
+ 40 °
405
+ 。66
406
+ 98 °
407
+ 0
408
+ 5
409
+ 10
410
+ 15
411
+ 20
412
+ 0
413
+ 5
414
+ 10
415
+ 15
416
+ 20
417
+ Epochs
418
+ Epochs9i
419
+ 30°
420
+ 30° + E
421
+ .9jFig. 4. The distributions of scores for known (K) and unknown (U) probes
422
+ of IJB-C dataset using cosine similarity (left) and NAC with k = 16 (right).
423
+ The scores are min-max normalized and τ is set such that FAR=0.01 for both
424
+ cases. DIR=48.05% (left) vs DIR=54.53% (right). ResNet-50 was used as the
425
+ encoder.
426
+ Here, Nk is the index set of k gallery prototypes that are
427
+ nearest to the probe feature p, and 1 is the indicator function.
428
+ The main goal of the NAC matcher is to improve the unknown
429
+ rejection. Table I shows that known probe features are much
430
+ closer to their closest prototype than the second-closest proto-
431
+ type, unlike unknown probes. By exploiting this phenomenon,
432
+ the NAC matcher is able to assign a much smaller score to
433
+ unknown probe, as shown in Fig. 4.
434
+ IV. EXPERIMENTS
435
+ A. Datasets
436
+ We use VGGFace2 [4] dataset for pretraining the encoders,
437
+ and CASIA-WebFace [28] and IJB-C [29] for evaluation. Us-
438
+ ing MTCNN [30], we align and crop every images to 112x112
439
+ with equal parameters for all datasets. For VGGFace2, we
440
+ remove all identities overlapping with the evaluation datasets.
441
+ The evaluation datasets are equally split into two groups
442
+ such that the number of known and unknown identities are
443
+ equal. Then we randomly choose m=3 images of the known
444
+ identities to create the gallery (G), and the rest are known
445
+ probes (K). All images of unknown identities are unknown
446
+ probes (U). Table II summarizes the statistics of the datasets
447
+ we use. Note that we chose every known identity to have more
448
+ than 10 images such that there can be at least 7 probe samples.
449
+ Also note that IJB-C dataset consists of still images and video
450
+ frames (video frames typically have poorer image quality). We
451
+ sample the gallery from still images and probes from video
452
+ frames, which makes this dataset much challenging. We note
453
+ that the protocol devised here can be regarded as an extension
454
+ of that in [15].
455
+ B. Baselines
456
+ 1) Classifier Initialization: Along with Weight Imprinting
457
+ (denoted WI), we report the results of using random ini-
458
+ tialization and linear probing initialization as described in
459
+ Sec. III-B.
460
+ 2) Encoder Layer Fine-Tuning: Along with BatchNorm-
461
+ only fine-tuning (denoted as BN), we explore tuning other
462
+ layers of the encoder. The simplest one is tuning every layer
463
+ (i.e. all parameters of a model), which we denote as full. The
464
+ second is freezing the early layers and training only the deeper
465
+ ones, which we denote as partial. We also consider the parallel
466
+ residual adapter [31], which adds additional 1x1 convolutional
467
+ TABLE II
468
+ DATASET STATISTICS. THE NUMBER INSIDE THE PARENTHESES REFERS
469
+ TO THE AVERAGE NUMBER OF IMAGES PER IDENTITY. FOR EVALUATION
470
+ DATASETS, KNOWN IDENTITIES CONSIST OF THE GALLERY (G) AND
471
+ KNOWN PROBE (K), WHERE THE GALLERY HAS 3 IMAGES PER IDENTITY.
472
+ Pretrain
473
+ # IDs (images / ID)
474
+ VGGFace2
475
+ 7,689 (354.0)
476
+ Evaluation
477
+ Known (G + K)
478
+ Unknown (U)
479
+ CASIA-WebFace
480
+ 5,287 (3+20.0)
481
+ 5,288 (16.5)
482
+ IJB-C
483
+ 1,765 (3+15.3)
484
+ 1,765 (13.9)
485
+ TABLE III
486
+ THE TOTAL NUMBER OF PARAMETERS AND NUMBER OF FINE-TUNED
487
+ PARAMETERS FOR EACH ENCODER FINE-TUNING SCHEME. ‘+’ REFERS TO
488
+ THE NUMBER OF ADDED PARAMETERS FOR THE PARALLEL ADAPTER.
489
+ # Params (million)
490
+ VGG19
491
+ Res50
492
+ Pretrained
493
+ 32.88
494
+ 43.58
495
+ Full fine-tuning
496
+ 32.88
497
+ 43.58
498
+ Partial fine-tuning
499
+ 4.72
500
+ 4.72
501
+ Parallel Adapter
502
+ +2.22
503
+ +3.39
504
+ BN-only fine-tuning
505
+ 0.01
506
+ 0.03
507
+ filters to the original convolutional layers. During fine-tuning,
508
+ only these additional filters are trained to capture the subtle
509
+ difference in the new dataset. Note that the authors in [31]
510
+ apply this technique to ResNet [3], hence the name residual
511
+ parallel adapter. But this idea can be generally applied to
512
+ CNNs without residual connection, hence we also apply this
513
+ to a VGG-style network. We denote this as PA, referring to
514
+ Parallel Adapter.
515
+ 3) Matcher: During OSFI evaluation, the vanilla cosine
516
+ similarity matcher is adopted as the baseline matcher. When
517
+ the NAC matcher is used, we denote by NAC. For comparison,
518
+ we also use the extreme value machine (EVM) proposed by
519
+ [15]. We train the EVM on the gallery set with the best
520
+ parameters found by the authors.
521
+ In summary, classifier initialization methods we consider are
522
+ {Random, Linear probing, WI}, fine-tuning layer configu-
523
+ rations are {Full, Partial, PA, BN}, and matchers are {cos,
524
+ EVM, NAC}. We test the OSFI performances among different
525
+ combinations of these three components. Our proposed OSFI
526
+ scheme is to use WI+BN+NAC jointly.
527
+ C. Training Details
528
+ We choose VGG19 [2] and ResNet-50 [3] for the encoders
529
+ with the feature dimension 512. We pretrain these encoders
530
+ on the VGGFace2 dataset with CosFace with scale=32, mar-
531
+ gin=0.4 as loss function until convergence.
532
+ Then we fine-tune the encoder with different classifier
533
+ initialization schemes and encoder layer configurations. When
534
+ using the linear probing initialization, we train the classifier
535
+ until the training accuracy reaches 95%.
536
+ We follow the encoder layer finetuning in Sec. IV-B. For
537
+ the partial fine-tuning, we only train the last 2 convolutional
538
+ layers (Conv-BN-ReLU-Conv-BN-ReLU). Table III shows the
539
+ number of total and updated parameters for each fine-tuning
540
+ scheme.
541
+
542
+ COS
543
+ NAC (k=16)
544
+ T
545
+ Known
546
+ Known
547
+ Unknown
548
+ Unknown
549
+ 0.00
550
+ 0.25
551
+ 0.50
552
+ 0.75
553
+ 1.00
554
+ 0.00
555
+ 0.25
556
+ 0.50
557
+ 0.75
558
+ 1.00Fig. 5. The OSFI performance of cosine similarity and NAC with different values of k on IJB-C dataset, using VGGNet-19 (left) and ResNet-50 (mid) as the
559
+ encoder. The square markers refer to cosine similarity and star marks the optimal k for different layer fine-tuning methods. To summarize the OSFI performance
560
+ into a single number, we used the area under the curve (AUC, %) of DIR@FAR curve. (Right) DIR@FAR curve of Pretrained and BN configuration using
561
+ cosine similarity and NAC (k=16) as the matcher. Numbers in the legend show the AUC values. When k = 1, NAC is replaced by cos.
562
+ We fix the number of epochs to 20 and batch size to 128
563
+ for every method. We again use CosFace loss for consistency.
564
+ For the optimizer we use Adam [32] with cosine annealing.
565
+ The initial learning rate is set to 1e-4 for full and PA, and 1e-
566
+ 3 for partial and BN, which we find as the optimal learning
567
+ rate for each method. For data augmentation, we use random
568
+ horizontal flipping and random cropping with the random scale
569
+ from 0.7 to 1.0. The cropped images are resized to the original
570
+ size.
571
+ D. Optimal k for NAC
572
+ Since the gallery set is too small, we cannot afford a separate
573
+ validation set to individually optimize k for each dataset.
574
+ Instead, we attempt to find a global value that has optimal
575
+ performance regardless of the fine-tuning method, if one exists.
576
+ We first fine-tune the encoders with different layer con-
577
+ figurations, which gives us five different encoders includ-
578
+ ing one without any fine-tuning; pretrained, full, partial,
579
+ PA, and BN. Then we search the best parameter k for the
580
+ NAC matcher by grid search strategy, where the grid is
581
+ [2,4,8,16,32,128,256,512,1024,C], and C is the total number
582
+ of identities. Note that k = 1 refers to using cosine similarity
583
+ instead of NAC, which we added for comparison. Since a
584
+ single-value objective is preferred, we use the area under the
585
+ curve (AUC) of the DIR@FAR curve instead of DIR value
586
+ at different FAR values. We repeat this process with different
587
+ datasets and encoder architectures.
588
+ The results are shown in Fig. 5. We did not include
589
+ the results of CASIA-WebFace as it shows a similar trend.
590
+ Excluding k = 1 which is not NAC, the results show a smooth
591
+ unimodal curve with a peak at k = 16 or 32. This shows
592
+ that the NAC matcher indeed has a globally optimal k value
593
+ that is robust against different datasets, encoders, and fine-tune
594
+ methods. Thus we choose k = 16 (k = 32 also gives similar
595
+ results) as the global parameter throughout this paper.
596
+ Note that when k = C, NAC becomes equivalent to softmax
597
+ function with cosine similarity logits. However, this is notably
598
+ inferior compared to k = 16, which implies that considering
599
+ only the k-nearest is superior to considering every gallery
600
+ prototype.
601
+ E. Comparison of Fine-Tuning Methods
602
+ We compare the OSFI performances of the pretrained model
603
+ (non-fine-tuned) with six different combinations of classifier
604
+ initialization schemes and layer finetuning configurations: ran-
605
+ dom+full, linear probing+full, WI+full, WI+partial, WI+PA,
606
+ WI+BN. The matcher is fixed to cosine similarity. These
607
+ correspond to row 4-9 in Table IV.
608
+ First, to compare different classifier initialization schemes,
609
+ we fix the fine-tuning scheme to full. When using random
610
+ initialization, rejection accuracy (DIR@FAR=0.001,0.01,0.1)
611
+ and closed-set accuracy (DIR@FAR=1) severely drops. For
612
+ linear probing, rejection accuracy improves while closed-
613
+ set accuracy drops. Only WI clearly improves the encoder
614
+ performance, supporting the superiority of weight imprinting.
615
+ Now we fix the classifier initialization to WI and compare
616
+ different layer finetuning configurations. full clearly has the
617
+ worst performance. While PA is better than partial in closed-
618
+ set accuracy, partial clearly outperforms PA in rejection ac-
619
+ curacy. BN outperforms all others in closed-set accuracy with
620
+ a large margin but sometimes falls behind partial in rejection
621
+ accuracy.
622
+ With the aid of the NAC matcher, our method WI+BN+NAC
623
+ outperforms all other methods in every aspect. Compared
624
+ to original, this gains 4.60%, 8.11%, 4.57%, 1.68% higher
625
+ DIR in average with respect to FAR of 0.001, 0.01, 0.1, 1.0,
626
+ respectively.
627
+ F. Analysis on Discriminative Quality of Different Fine-tuning
628
+ Methods
629
+ How do different layer finetuning configurations affect the
630
+ final OSFI performance? To analyze this, we adopt three
631
+ different metrics; inter-class separation, intra-class variance,
632
+ and Davies-Bouldin Index (DBI) [33]. The definitions of the
633
+ first two metrics are identical to that of Fig. 2. DBI is a metric
634
+ for evaluating the clustering quality, where DBI ≈ 0 means
635
+ perfect clustering. We compute these metrics on the gallery
636
+ features after fine-tuning, and the results are shown in Table
637
+ V.
638
+ Here we can easily separate these configurations into two
639
+ groups: full and partial vs PA and BN. The first group has
640
+
641
+ VGG19
642
+ ResNet-50
643
+ ResNet-50
644
+ 75.5
645
+ 0.8
646
+ 70.5
647
+ 75.0
648
+ 74.5
649
+ 0.6
650
+ (%)
651
+ 70.0
652
+ 74.0
653
+ AUC
654
+ 69.5
655
+ 0.4
656
+ Pretrained
657
+ 73.5
658
+ Full
659
+ -
660
+ Pretrained+cos:72.36%
661
+ 69.0
662
+ Partial
663
+ 73.0
664
+ 0.2
665
+ Pretrained+nac:73.42%
666
+ PA
667
+ 72.5
668
+ Cosine
669
+ 68.5
670
+ WI+BN+cos: 75.02%
671
+ BN
672
+
673
+ NAC (best)
674
+ 72.0
675
+ 0.0
676
+ WI+BN+nac: 75.41%
677
+ 2
678
+ 8
679
+ 16
680
+ 32
681
+ 64
682
+ 128 256512 1024C
683
+ 2
684
+ 8
685
+ 16
686
+ 32
687
+ 64
688
+ 128 256512 1024C
689
+ 0.0001
690
+ 0.0010
691
+ 0.0100
692
+ 0.1000
693
+ 1.0000
694
+ k
695
+ k
696
+ False Alarm RateTABLE IV
697
+ DIR@FAR OF DIFFERENT METHODS ON CASIA-WEBFACE DATASET AND IJB-C DATASET, USING VGGNET-19 AND RESNET-50 AS THE ENCODER.
698
+ DIR@FAR=1 (100%) IS THE CLOSED-SET ACCURACY. THE HIGHEST VALUE IN EACH COLUMN IS MARKED IN BOLD. FOR THE FIRST THREE ROWS THE
699
+ ENCODER IS NOT FINE-TUNED AND ONLY THE MATCHERS ARE CHANGED. THE LAST ROW (WI+BN+NAC) IS OUR PROPOSED METHOD.
700
+ Encoder
701
+ Method
702
+ CASIA-WebFace
703
+ IJB-C
704
+ Classifier
705
+ initialization
706
+ Fine-tuning
707
+ layers
708
+ Matcher
709
+ DIR @ FAR (%)
710
+ DIR @ FAR (%)
711
+ 0.1
712
+ 1.0
713
+ 10.0
714
+ 100.0
715
+ 0.1
716
+ 1.0
717
+ 10.0
718
+ 100.0
719
+ VGG19
720
+ None
721
+ None
722
+ cos
723
+ 25.23
724
+ 52.97
725
+ 70.07
726
+ 80.89
727
+ 28.35
728
+ 45.55
729
+ 61.71
730
+ 73.80
731
+ None
732
+ None
733
+ EVM
734
+ 37.57
735
+ 57.75
736
+ 71.03
737
+ 80.78
738
+ 35.03
739
+ 53.64
740
+ 63.34
741
+ 73.70
742
+ None
743
+ None
744
+ NAC
745
+ 25.15
746
+ 55.68
747
+ 71.41
748
+ 80.89
749
+ 36.73
750
+ 51.92
751
+ 64.27
752
+ 73.80
753
+ Random
754
+ Full
755
+ cos
756
+ 23.95
757
+ 43.19
758
+ 59.03
759
+ 70.94
760
+ 17.18
761
+ 32.62
762
+ 46.90
763
+ 60.23
764
+ Linear probing
765
+ Full
766
+ cos
767
+ 28.82
768
+ 55.64
769
+ 70.44
770
+ 79.84
771
+ 30.80
772
+ 45.91
773
+ 59.63
774
+ 70.09
775
+ WI
776
+ Full
777
+ cos
778
+ 27.63
779
+ 57.58
780
+ 72.02
781
+ 80.94
782
+ 35.49
783
+ 50.52
784
+ 63.56
785
+ 73.53
786
+ WI
787
+ Partial
788
+ cos
789
+ 28.91
790
+ 57.31
791
+ 72.29
792
+ 81.16
793
+ 34.81
794
+ 51.98
795
+ 64.53
796
+ 73.89
797
+ WI
798
+ PA
799
+ cos
800
+ 26.29
801
+ 57.90
802
+ 72.82
803
+ 81.82
804
+ 31.74
805
+ 50.21
806
+ 64.26
807
+ 74.50
808
+ WI
809
+ BN
810
+ cos
811
+ 25.39
812
+ 56.65
813
+ 72.54
814
+ 82.14
815
+ 32.19
816
+ 48.74
817
+ 63.87
818
+ 74.43
819
+ WI
820
+ BN
821
+ NAC
822
+ 25.94
823
+ 58.01
824
+ 72.92
825
+ 82.14
826
+ 38.09
827
+ 53.08
828
+ 65.30
829
+ 74.43
830
+ Res50
831
+ None
832
+ None
833
+ cos
834
+ 23.85
835
+ 58.06
836
+ 74.15
837
+ 83.69
838
+ 32.11
839
+ 48.05
840
+ 65.31
841
+ 76.96
842
+ None
843
+ None
844
+ EVM
845
+ 39.44
846
+ 61.61
847
+ 75.02
848
+ 83.57
849
+ 38.12
850
+ 38.12
851
+ 66.81
852
+ 76.96
853
+ None
854
+ None
855
+ NAC
856
+ 21.24
857
+ 60.23
858
+ 75.31
859
+ 83.69
860
+ 36.67
861
+ 54.53
862
+ 68.14
863
+ 76.96
864
+ Random
865
+ Full
866
+ cos
867
+ 25.31
868
+ 45.43
869
+ 60.80
870
+ 72.44
871
+ 14.88
872
+ 32.05
873
+ 49.39
874
+ 61.88
875
+ Linear probing
876
+ Full
877
+ cos
878
+ 28.35
879
+ 60.11
880
+ 74.63
881
+ 82.73
882
+ 30.35
883
+ 46.42
884
+ 61.90
885
+ 72.34
886
+ WI
887
+ Full
888
+ cos
889
+ 26.73
890
+ 63.92
891
+ 77.49
892
+ 84.65
893
+ 39.05
894
+ 56.00
895
+ 67.83
896
+ 76.94
897
+ WI
898
+ Partial
899
+ cos
900
+ 25.98
901
+ 64.66
902
+ 78.07
903
+ 85.02
904
+ 44.31
905
+ 57.11
906
+ 69.13
907
+ 77.49
908
+ WI
909
+ PA
910
+ cos
911
+ 24.89
912
+ 63.85
913
+ 77.58
914
+ 85.01
915
+ 36.69
916
+ 54.86
917
+ 68.30
918
+ 77.63
919
+ WI
920
+ BN
921
+ cos
922
+ 25.70
923
+ 65.83
924
+ 79.66
925
+ 86.73
926
+ 40.29
927
+ 55.71
928
+ 69.29
929
+ 78.74
930
+ WI
931
+ BN
932
+ NAC
933
+ 23.65
934
+ 67.72
935
+ 80.34
936
+ 86.73
937
+ 40.25
938
+ 58.25
939
+ 70.40
940
+ 78.74
941
+ TABLE V
942
+ INTER-CLASS SEPARATION, INTRA-CLASS VARIANCE, DBI, AND AUC
943
+ GAIN BY USING NAC (REFER TO FIG. 5) FOR EACH LAYER FINETUNING
944
+ CONFIGURATION. THESE VALUES ARE AVERAGED ACROSS DATASETS AND
945
+ ENCODER ARCHITECTURES. ↑ MEANS THAT LARGER QUANTITY IS BETTER
946
+ AND VICE VERSA.
947
+ Inter (↑)
948
+ Intra (↓)
949
+ DBI (↓)
950
+ ∆AUC (↑)
951
+ Pretrained Model
952
+ 106.3◦
953
+ 34.5◦
954
+ 1.52
955
+ 0.740
956
+ Full finetuning
957
+ 106.7◦
958
+ 24.2◦
959
+ 0.87
960
+ 0.025
961
+ Partial finetuning
962
+ 106.4◦
963
+ 24.5◦
964
+ 0.90
965
+ 0.058
966
+ Parallel Adapter
967
+ 107.0◦
968
+ 31.8◦
969
+ 1.32
970
+ 0.135
971
+ BN-only finetuning
972
+ 107.3◦
973
+ 33.6◦
974
+ 1.46
975
+ 0.335
976
+ similar inter-class separation with Pretrained and significantly
977
+ smaller intra-class variance, which leads to small DBI. This is
978
+ in stark contrast with the second group.
979
+ With this observation, we can conjecture the different opti-
980
+ mization strategies of each group. The first group was able to
981
+ easily reduce the training loss by collapsing the gallery fea-
982
+ tures into a single direction (shown by the small angle between
983
+ intra-class features). This was possible because both full and
984
+ partial directly updated the parameters of the convolutional
985
+ filters. On the other hand, all convolutional filters were frozen
986
+ for both PA and BN. This constraint may have prevented these
987
+ methods from taking the shortcut, i.e. simply collapsing the
988
+ gallery features, and instead led to separating the embeddings
989
+ of different identities. This explains why PA and BN have
990
+ higher closed-set accuracy.
991
+ This can also explain the AUC gain (∆AUC) when using
992
+ NAC instead of cosine similarity. Features become redundant
993
+ when they collapse, and so does the prototype. Therefore the
994
+ information from neighboring prototypes becomes less helpful
995
+ in rejecting unknown samples, leading to the marginal gain
996
+ from using NAC. This is why full and partial do not benefit
997
+ from using NAC matcher.
998
+ Fig. 6. The performance of our method against the baseline w.r.t. different
999
+ gallery size. AUC of DIR@FAR curve is used as the performance measure.
1000
+ G. Performance with respect to Different Gallery Size
1001
+ Fig. 6 shows the OSFI performance of our method against
1002
+ the baseline (pretrained encoder with cos matcher) with respect
1003
+ to different gallery size. We can see that our method consis-
1004
+ tently improves upon the baseline, except for the extreme case
1005
+ where only one image is provided for each identity.
1006
+ V. CONCLUSION AND FUTURE WORKS
1007
+ In this work we showed that combining weight-imprinted
1008
+ classifier and BatchNorm-only tuning of the encoder effec-
1009
+ tively improves the encoder’s OSFI performance without suf-
1010
+ fering from overfitting. We further facilitated the performance
1011
+ by our novel NAC matcher instead of the commonly used
1012
+ cosine similarity. Future works will explore extending this idea
1013
+ to the open-set few-shot recognition of generic images.
1014
+ Acknowledgements:
1015
+ This work was supported by the National Research Foundation
1016
+ of Korea (NRF) grant funded by the Korea government (MSIP)
1017
+ (NO. NRF-2022R1A2C1010710)
1018
+
1019
+ IJB-C, ResNet-50
1020
+ CASIA-WebFace.ResNet-50
1021
+ 80
1022
+ Pretrained
1023
+ 90
1024
+ Pretrained
1025
+ Ours
1026
+ Ours
1027
+ AUC(%)
1028
+ 80
1029
+ 70
1030
+ 70
1031
+ 60
1032
+ 60
1033
+ 50
1034
+ 50
1035
+ 2
1036
+ NumberofImagesperGalleryIdentity
1037
+ NumberofImagesperGalleryIdentityREFERENCES
1038
+ [1] A. K. Jain and S. Z. Li, Handbook of face recognition.
1039
+ Springer, 2011,
1040
+ vol. 1.
1041
+ [2] K. Simonyan and A. Zisserman, “Very deep convolutional networks for
1042
+ large-scale image recognition,” arXiv preprint arXiv:1409.1556, 2014.
1043
+ [3] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image
1044
+ recognition,” in Proceedings of the IEEE conference on computer vision
1045
+ and pattern recognition, 2016, pp. 770–778.
1046
+ [4] Q. Cao, L. Shen, W. Xie, O. M. Parkhi, and A. Zisserman, “Vggface2:
1047
+ A dataset for recognising faces across pose and age,” in 2018 13th IEEE
1048
+ international conference on automatic face & gesture recognition (FG
1049
+ 2018).
1050
+ IEEE, 2018, pp. 67–74.
1051
+ [5] Y. Guo, L. Zhang, Y. Hu, X. He, and J. Gao, “Ms-celeb-1m: A dataset
1052
+ and benchmark for large-scale face recognition,” in European conference
1053
+ on computer vision.
1054
+ Springer, 2016, pp. 87–102.
1055
+ [6] F. Schroff, D. Kalenichenko, and J. Philbin, “Facenet: A unified embed-
1056
+ ding for face recognition and clustering,” in Proceedings of the IEEE
1057
+ conference on computer vision and pattern recognition, 2015, pp. 815–
1058
+ 823.
1059
+ [7] W. Liu, Y. Wen, Z. Yu, M. Li, B. Raj, and L. Song, “Sphereface: Deep
1060
+ hypersphere embedding for face recognition,” in Proceedings of the
1061
+ IEEE conference on computer vision and pattern recognition, 2017, pp.
1062
+ 212–220.
1063
+ [8] H. Wang, Y. Wang, Z. Zhou, X. Ji, D. Gong, J. Zhou, Z. Li, and
1064
+ W. Liu, “Cosface: Large margin cosine loss for deep face recognition,”
1065
+ in Proceedings of the IEEE conference on computer vision and pattern
1066
+ recognition, 2018, pp. 5265–5274.
1067
+ [9] J. Deng, J. Guo, N. Xue, and S. Zafeiriou, “Arcface: Additive angular
1068
+ margin loss for deep face recognition,” in Proceedings of the IEEE/CVF
1069
+ Conference on Computer Vision and Pattern Recognition, 2019, pp.
1070
+ 4690–4699.
1071
+ [10] X. Wang, S. Zhang, S. Wang, T. Fu, H. Shi, and T. Mei, “Mis-classified
1072
+ vector guided softmax loss for face recognition,” in Proceedings of the
1073
+ AAAI Conference on Artificial Intelligence, vol. 34, no. 07, 2020, pp.
1074
+ 12 241–12 248.
1075
+ [11] Q. Meng, S. Zhao, Z. Huang, and F. Zhou, “Magface: A universal repre-
1076
+ sentation for face recognition and quality assessment,” in Proceedings of
1077
+ the IEEE/CVF Conference on Computer Vision and Pattern Recognition,
1078
+ 2021, pp. 14 225–14 234.
1079
+ [12] H. Qi, M. Brown, and D. G. Lowe, “Low-shot learning with imprinted
1080
+ weights,” in Proceedings of the IEEE conference on computer vision
1081
+ and pattern recognition, 2018, pp. 5822–5830.
1082
+ [13] S. Ioffe and C. Szegedy, “Batch normalization: Accelerating deep
1083
+ network training by reducing internal covariate shift,” in International
1084
+ conference on machine learning.
1085
+ PMLR, 2015, pp. 448–456.
1086
+ [14] F. Li and H. Wechsler, “Open set face recognition using transduction,”
1087
+ IEEE transactions on pattern analysis and machine intelligence, vol. 27,
1088
+ no. 11, pp. 1686–1697, 2005.
1089
+ [15] M. Gunther, S. Cruz, E. M. Rudd, and T. E. Boult, “Toward open-set
1090
+ face recognition,” in Proceedings of the IEEE Conference on Computer
1091
+ Vision and Pattern Recognition Workshops, 2017, pp. 71–80.
1092
+ [16] E. M. Rudd, L. P. Jain, W. J. Scheirer, and T. E. Boult, “The extreme
1093
+ value machine,” IEEE transactions on pattern analysis and machine
1094
+ intelligence, vol. 40, no. 3, pp. 762–768, 2017.
1095
+ [17] R. Vareto, S. Silva, F. Costa, and W. R. Schwartz, “Towards open-set
1096
+ face recognition using hashing functions,” in 2017 IEEE international
1097
+ joint conference on biometrics (IJCB).
1098
+ IEEE, 2017, pp. 634–641.
1099
+ [18] B. Kulis and K. Grauman, “Kernelized locality-sensitive hashing,” IEEE
1100
+ Transactions on Pattern Analysis and Machine Intelligence, vol. 34,
1101
+ no. 6, pp. 1092–1104, 2011.
1102
+ [19] G. Mateos-Aparicio, “Partial least squares (pls) methods: Origins, evo-
1103
+ lution, and application to social sciences,” Communications in Statistics-
1104
+ Theory and Methods, vol. 40, no. 13, pp. 2305–2317, 2011.
1105
+ [20] H. Dao, D.-H. Nguyen, and M.-T. Tran, “Face recognition in the wild
1106
+ for secure authentication with open set approach,” in International
1107
+ Conference on Future Data and Security Engineering.
1108
+ Springer, 2021,
1109
+ pp. 338–355.
1110
+ [21] A. Bendale and T. E. Boult, “Towards open set deep networks,” in
1111
+ Proceedings of the IEEE conference on computer vision and pattern
1112
+ recognition, 2016, pp. 1563–1572.
1113
+ [22] D.-W. Zhou, H.-J. Ye, and D.-C. Zhan, “Learning placeholders for
1114
+ open-set recognition,” in Proceedings of the IEEE/CVF Conference on
1115
+ Computer Vision and Pattern Recognition, 2021, pp. 4401–4410.
1116
+ [23] F. Wang, X. Xiang, J. Cheng, and A. L. Yuille, “Normface: L2
1117
+ hypersphere embedding for face verification,” in Proceedings of the 25th
1118
+ ACM international conference on Multimedia, 2017, pp. 1041–1049.
1119
+ [24] I. Masi, Y. Wu, T. Hassner, and P. Natarajan, “Deep face recognition: A
1120
+ survey,” in 2018 31st SIBGRAPI conference on graphics, patterns and
1121
+ images (SIBGRAPI).
1122
+ IEEE, 2018, pp. 471–478.
1123
+ [25] Anonymous, “Fine-tuning distorts pretrained features and underperforms
1124
+ out-of-distribution,” in Submitted to The Tenth International Conference
1125
+ on Learning Representations, 2022, under review. [Online]. Available:
1126
+ https://openreview.net/forum?id=UYneFzXSJWh
1127
+ [26] V. N. Vapnik and A. Y. Chervonenkis, “On the uniform convergence
1128
+ of relative frequencies of events to their probabilities,” in Measures of
1129
+ complexity.
1130
+ Springer, 2015, pp. 11–30.
1131
+ [27] J. Frankle, D. J. Schwab, and A. S. Morcos, “Training batchnorm and
1132
+ only batchnorm: On the expressive power of random features in cnns,”
1133
+ arXiv preprint arXiv:2003.00152, 2020.
1134
+ [28] D. Yi, Z. Lei, S. Liao, and S. Z. Li, “Learning face representation from
1135
+ scratch,” arXiv preprint arXiv:1411.7923, 2014.
1136
+ [29] B. Maze, J. Adams, J. A. Duncan, N. Kalka, T. Miller, C. Otto,
1137
+ A. K. Jain, W. T. Niggel, J. Anderson, J. Cheney et al., “Iarpa
1138
+ janus benchmark-c: Face dataset and protocol,” in 2018 International
1139
+ Conference on Biometrics (ICB).
1140
+ IEEE, 2018, pp. 158–165.
1141
+ [30] K. Zhang, Z. Zhang, Z. Li, and Y. Qiao, “Joint face detection and
1142
+ alignment using multitask cascaded convolutional networks,” IEEE
1143
+ Signal Processing Letters, vol. 23, no. 10, pp. 1499–1503, 2016.
1144
+ [31] S.-A. Rebuffi, H. Bilen, and A. Vedaldi, “Efficient parametrization of
1145
+ multi-domain deep neural networks,” 2018.
1146
+ [32] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,”
1147
+ arXiv preprint arXiv:1412.6980, 2014.
1148
+ [33] D. L. Davies and D. W. Bouldin, “A cluster separation measure,” IEEE
1149
+ transactions on pattern analysis and machine intelligence, no. 2, pp.
1150
+ 224–227, 1979.
1151
+
4dAzT4oBgHgl3EQf9f77/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc43fb406e9849d9b6f29aaf45e93641a47de3c45ea1b1ba644c43bf730acc6f
3
+ size 939303
5NE0T4oBgHgl3EQfegCm/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc83b6835b8dfb6f81ccf740510c30388f89cf6ebfa5c1aeff8b7ce86ef296f0
3
+ size 108228
5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93620e003c473cea5db2706ecd35f400c54fbe27d9ed022525a107ebfb74b73c
3
+ size 1813327
5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35290918cb2d2b484900b1c330611ea66ded7fb8e09d91f077925449da77b13
3
+ size 7209005
5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aae6f852013d37701d1d412e1822f0c6b1e9bc1dadc7a95038ea5833c9348dcd
3
+ size 595340
5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebea018128386d3d57fd47ce8ce2c139f98da18a7828e93b5eaaa31e686dc37a
3
+ size 7536685
5dE4T4oBgHgl3EQf1Q2P/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f4bbb5ef1f0f3c334cf3e2f8ff0ee2f0a1d24ce2b801ab588ef468ed7259a26
3
+ size 307878
69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db8eb38153d9c8b0ca334dd09b89ff026f99b30f80e9a9d5d007398395eae496
3
+ size 2478477
69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:268ca0a37dacc220d0401ce156eddd091b39909a4f2d125ea10f2734836bc00e
3
+ size 3932205
69E2T4oBgHgl3EQf7ggl/content/tmp_files/2301.04209v1.pdf.txt ADDED
@@ -0,0 +1,1660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ High Dimensional Analysis of Variance in Multivariate Linear
2
+ Regression
3
+ Zhipeng Lou1, Xianyang Zhang2 and Wei Biao Wu3
4
+ January 12, 2023
5
+ Abstract
6
+ In this paper, we develop a systematic theory for high dimensional analysis of variance in multivariate
7
+ linear regression, where the dimension and the number of coefficients can both grow with the sample
8
+ size. We propose a new U type test statistic to test linear hypotheses and establish a high dimensional
9
+ Gaussian approximation result under fairly mild moment assumptions.
10
+ Our general framework and
11
+ theory can be applied to deal with the classical one-way multivariate ANOVA and the nonparametric
12
+ one-way MANOVA in high dimensions. To implement the test procedure in practice, we introduce a
13
+ sample-splitting based estimator of the second moment of the error covariance and discuss its properties.
14
+ A simulation study shows that our proposed test outperforms some existing tests in various settings.
15
+ Keywords: Data-splitting; Gaussian approximation; Multivariate analysis of variance; One-way
16
+ layout; U statistics
17
+ 1
18
+ Introduction
19
+ In statistical inference of multivariate linear regression, a fundamental problem is to investigate the rela-
20
+ tionships between the covariates and the responses. In this article, we aim to test whether a given set of
21
+ covariates are associated with the responses by multivariate analysis of variance (MANOVA). To fix the idea,
22
+ we build the multivariate linear regression model with p predictors as
23
+ Yi = B⊤Xi + Vi (i = 1, . . . , n),
24
+ (1.1)
25
+ where Yi = (Yi1, . . . , Yid)⊤ and Xi = (Xi1, . . . , Xip)⊤ are respectively the response vector and the predictor
26
+ vector respectively for the ith sample, B⊤ = (B1, . . . , Bp) is the unknown coefficient matrix with Bk ∈ Rd
27
+ consisting of coefficients on the kth covariate, and the innovation vectors V1, . . . , Vn ∈ Rd are independent
28
+ and identically distributed random vectors with E(V1) = 0 and cov(V1) = Σ. The first element of Xi can be
29
+ set to be 1 to reflect an intercept term. Equivalently we can write (1.1) in compact matrix form as
30
+ Y = XB + V,
31
+ (1.2)
32
+ 1Department of Operations Research and Financial Engineering, Princeton, NJ 08544.
33
+ 2Department of Statistics, Texas A&M University, College Station, TX 77843.
34
+ 3Department of Statistics, University of Chicago, Chicago, IL, 60637.
35
+ 1
36
+ arXiv:2301.04209v1 [stat.ME] 10 Jan 2023
37
+
38
+ where Y = (Y1, . . . , Yn)⊤, X = (X1, . . . , Xn)⊤ and V = (V1, . . . , Vn)⊤. Let C ∈ Rm×p be a matrix of rank m,
39
+ where m ∈ {1, . . . , p}. We are interested in testing a collection of linear constraints on the coefficient matrix
40
+ H0 : CB = 0 versus H1 : CB ̸= 0.
41
+ (1.3)
42
+ This testing problem has been extensively studied in the low dimensional setting where both the number
43
+ of predictors and the dimension of the response are relatively small compared to the sample size. A natural
44
+ and popular choice is the classical likelihood ratio test when the errors are normally distributed; see Chapter
45
+ 8 in Anderson (2003) for a review of theoretical investigations. In recent years, high dimensional data are
46
+ increasingly encountered in various applications. Over the past decade, there have been tremendous efforts
47
+ to develop new methodologies and theories for high dimensional regression. The paradigm where d is 1
48
+ or small and p can increase with n has received considerable attention, while on the other hand the one
49
+ where d is very large and p is relatively small has been less studied. The model (1.2) in the latter setting
50
+ has been applied to a number of research problems involving high-dimensional data types such as DNA
51
+ sequence data, gene expression microarray data, and imaging data; see for example Zapala and Schork
52
+ (2006), Wessel and Schork (2006) and Zapala and Schork (2012). Those related studies typically generate
53
+ huge amounts of data (responses) that, due to their expense and sophistication, are often collected on a
54
+ relatively small number of individuals, and investigate how the data can be explained by a certain number
55
+ of predictor variables such as the ages of individuals assayed, clinical diagnoses, strain memberships, cell
56
+ line types, or genotype information (Zapala and Schork, 2006). Owing to inappropriateness of applying the
57
+ standard MANOVA strategy and shortage of high-dimensional MANOVA theory, biological researchers often
58
+ considered some form of data reduction such as cluster analysis and factor analysis, which can suffer from
59
+ many problems, as pointed out by Zapala and Schork (2012). In the works Zapala and Schork (2006, 2012),
60
+ the authors incorporated a distance matrix to modify the standard MANOVA, but they commented that
61
+ there is very little published material that can be used to guide a researcher as to which distance measure is
62
+ the most appropriate for a given situation. Motivated by these real-world applications, we aim to develop a
63
+ general methodology for high dimensional MANOVA and lay a theoretical foundation for assessing statistical
64
+ significance.
65
+ The testing problem (1.3) for model (1.2) is closely related to a group of high dimensional hypothesis
66
+ tests. Two-sample mean test, for testing H0 : µ1 = µ2 where µ1 ∈ Rd and µ2 ∈ Rd are mean vectors of two
67
+ different populations, is a special case with p = 2, B = (µ1, µ2)⊤ and C = (1, −1). There is a large literature
68
+ accommodating the Hotelling T 2 type statistic into the high-dimensional situation where d is large; see for
69
+ example, Bai and Saranadasa (1996), Chen and Qin (2010), Srivastava et al. (2013) among many others.
70
+ It can be generalized to test the equality of multiple mean vectors in high dimensions. Some notable work
71
+ includes Schott (2007), Cai and Xia (2014), Hu et al. (2017), Li et al. (2017), Zhang et al. (2017) and Zhou
72
+ et al. (2017). In most existing work, the random samples were assumed to be Gaussian or follow some linear
73
+ structure as that of Bai and Saranadasa (1996). In contrast, the testing problem we are concerned is much
74
+ more general. For one thing, all the aforementioned high dimensional mean test problems can be fitted into
75
+ our framework, apart from which, we can deal with the more general multivariate linear regression in the
76
+ presence of an increasing number of predictor variables. For another, we do not assume the Gaussianity or
77
+ any particular structure of the error vectors {Vi}n
78
+ i=1.
79
+ Throughout the paper, we assume that p < n and the design matrix X is of full column rank such that
80
+ 2
81
+
82
+ X⊤X is invertible. The conventional MANOVA test statistic for (1.3) is given by
83
+ Qn = |PY |2
84
+ F =
85
+ n
86
+
87
+ i=1
88
+ n
89
+
90
+ j=1
91
+ PijY ⊤
92
+ i Yj,
93
+ (1.4)
94
+ where | · |F stands for the Frobenius norm and
95
+ P = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1C(X⊤X)−1X⊤ = (Pij)n×n
96
+ is the orthogonal projection matrix onto the column space of the matrix X(X⊤X)−1C⊤. We shall reject the
97
+ null hypothesis H0 if Qn is larger than some critical value. In the univariate case where d = 1, the asymptotic
98
+ behavior of Qn has been extensively studied in literature; see G¨otze and Tikhomirov (1999) and G¨otze and
99
+ Tikhomirov (2002) for detailed discussions. The validity to perform a test for (1.3) using Qn when d is large
100
+ has been open for a long time. The first goal of the paper is to provide a solution to this open problem by
101
+ rigorously establishing a distributional approximation of the traditional MANOVA test statistic when d is
102
+ allowed to grow with n. Our key tool is the Gaussian approximation for degenerate U type statistics: under
103
+ fairly mild moment conditions, quadratic functionals of non-Gaussian random vectors can be approximated
104
+ by those of Gaussian vectors with the same covariance structure. It is worth mentioning that Chen (2018)
105
+ established a Gaussian approximation result for high dimensional non-degenerate U statistics by Stein’s
106
+ method, which can not be applied to the degenerate case here. From a technical point of view, we employ
107
+ completely different arguments to bound distance between the distribution functions of the test statistic and
108
+ its Gaussian analogue.
109
+ The main contributions of this paper are three-fold. Firstly, we develop a systematic theory for the
110
+ conventional MANOVA test statistic Qn in the high dimensional setting. More specifically, we shall establish
111
+ a dichotomy result: Qn can be approximated either by a linear combination of independent chi-squared
112
+ random variables or by a normal distribution under different conditions; see Theorem 2.1. While this reveals
113
+ the interesting theoretical properties of the test statistics, it causes difficulties in applications as one may
114
+ not know which asymptotic distribution to use in practice. To overcome this difficulty, as the second main
115
+ contribution of our paper, we propose using a new U type test statistic. Using the modified test statistic,
116
+ such a dichotomy does not appear; see Theorem 2.5 for the asymptotic result. Thirdly, we will propose a
117
+ new estimator for the second spectral moment of the covariance matrix via a data-splitting technique. To
118
+ the best of our knowledge, it is the first work concerning an unbiased and ratio consistent estimator in the
119
+ multivariate linear regression model.
120
+ We now introduce some notation. Let I{·} denote the indicator function. For random variables X ∈ R
121
+ and Y ∈ R, the Kolmogorov distance is defined by ρ(X, Y ) = supz∈R |P(X ≤ z) − P(Y ≤ z)|. For q > 0,
122
+ we write ∥X∥q = (E|X|q)1/q if E|X|q < ∞.
123
+ For two matrices A = (aij)i≤I,j≤J and B = (bij)i≤I,j≤J,
124
+ A ◦ B = (aijbij)i≤I,j≤J denotes their Hardmard product. For any positive integer m, we use Im to denote
125
+ m × m identity matrix. For two sequences of positive numbers (an) and (bn), we write an ≲ bn if there
126
+ exists some constant C such that an ≤ Cbn for all large n. We use C, C1, C2, . . . to denote positive constants
127
+ whose value may vary at different places.
128
+ 3
129
+
130
+ 2
131
+ Theoretical results
132
+ We start with some notational definitions and basic assumptions. Let λ1(Σ) ≥ . . . ≥ λd(Σ) ≥ 0 denote the
133
+ eigenvalues of Σ = cov(V1) and let ς = |Σ|F = {�d
134
+ k=1 λ2
135
+ k(Σ)}1/2. For q ≥ 2, we define
136
+ Mq = E
137
+ ����
138
+ V ⊤
139
+ 1 V2
140
+ ς
141
+ ����
142
+ q
143
+ and Lq = E
144
+ ����
145
+ V ⊤
146
+ 1 ΣV1
147
+ ς2
148
+ ����
149
+ q/2
150
+ .
151
+ (2.1)
152
+ Assumption 2.1. Recall that P11, . . . , Pnn are diagonal elements of the matrix P. Assume that
153
+ 1
154
+ m
155
+ n
156
+
157
+ i=1
158
+ P 2
159
+ ii → 0 as n → ∞.
160
+ Remark 1. Assumption 2.1 is quite natural and mild for testing (1.3). For instance, it automatically holds
161
+ for one sample test of mean vector as m−1 �n
162
+ i=1 P 2
163
+ ii = 1/n. Additionally, in the context of K-sample test, as
164
+ discussed in Section 3.1, Assumption 2.1 is satisfied as long as the minimum sample size goes to infinity. More
165
+ generally, since �n
166
+ i=1 Pii = m, a simple sufficient condition for Assumption 2.1 would be max1≤i≤n Pii → 0.
167
+ Further discussions on this condition will be given in Remark 6 and Example 2.1.
168
+ 2.1
169
+ Asymptotic distribution of the conventional MANOVA test statistics
170
+ Under the null hypothesis CB = 0, PXB = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1CB = 0 and hence Qn =
171
+ |PXB + PV |2
172
+ F
173
+ H0
174
+ = |PV |2
175
+ F, which can be further decomposed as
176
+ Qn
177
+ H0
178
+ =
179
+ n
180
+
181
+ i=1
182
+ n
183
+
184
+ j=1
185
+ PijV ⊤
186
+ i Vj =
187
+ n
188
+
189
+ i=1
190
+ PiiV ⊤
191
+ i Vi +
192
+ n
193
+
194
+ i=1
195
+
196
+ j̸=i
197
+ PijV ⊤
198
+ i Vj =: Dn + Q⋆
199
+ n.
200
+ (2.2)
201
+ Observe that Dn is a weighted sum of i.i.d. random variables and Q⋆
202
+ n is a second order non-degenerate U -
203
+ statistic of high dimensional random vectors. These two terms can be differently distributed under the high
204
+ dimensional setting. More specifically, since Dn and Q⋆
205
+ n are uncorrelated, we have var(Qn) = var(Dn) +
206
+ var(Q⋆
207
+ n), where
208
+ var(Dn) =
209
+ n
210
+
211
+ i=1
212
+ P 2
213
+ ii∥E0(V ⊤
214
+ 1 V1)∥2
215
+ 2 and var(Q⋆
216
+ n) = 2
217
+
218
+ m −
219
+ n
220
+
221
+ i=1
222
+ P 2
223
+ ii
224
+
225
+ ς2,
226
+ where E0(V ⊤
227
+ 1 V1) = V ⊤
228
+ 1 V1 − E(V ⊤
229
+ 1 V1). When the dimension d increases with the sample size n, the mag-
230
+ nitudes of var(Dn) and var(Q⋆
231
+ n) can be quite different for non-Gaussian {Vi}n
232
+ i=1; cf. Example 4.1. As a
233
+ consequence, Qn can exhibit different asymptotic null distributions. More precisely, to asymptotically quan-
234
+ tify the discrepancy between var(Dn) and var(Q⋆
235
+ n), under Assumption 2.1, we define
236
+ Λ2 =
237
+ �n
238
+ i=1 P 2
239
+ ii∥E0(V ⊤
240
+ 1 V1)∥2
241
+ 2
242
+ mς2
243
+ .
244
+ Before presenting the distributional theory for Qn, we first define its Gaussian analogue. Let Z1, . . . , Zn be
245
+ i.i.d. N(0, Σ) Gaussian random vectors and write Z = (Z1, . . . , Zn)⊤. Then the Gaussian analogue of Qn is
246
+ defined as the same quadratic functional of {Zi}n
247
+ i=1,
248
+ Gn = |PZ|2
249
+ F =
250
+ n
251
+
252
+ i=1
253
+ n
254
+
255
+ j=1
256
+ PijZ⊤
257
+ i Zj.
258
+ (2.3)
259
+ 4
260
+
261
+ Theorem 2.1. Let q = 2 + δ, where 0 < δ ≤ 1. Suppose Assumption 2.1 holds and
262
+ ∆q =
263
+ �n
264
+ i=1
265
+
266
+ j̸=i |Pij|q
267
+ mq/2
268
+ Mq +
269
+ �n
270
+ i=1 P q/2
271
+ ii
272
+ mq/2
273
+ Lq → 0.
274
+ (2.4)
275
+ 1. Assume Λ → 0. Then, under (2.4) and the null hypothesis, we have
276
+ ρ(Qn, Gn) ≤ C1Λ2/5 + Cq∆1/(2q+1)
277
+ q
278
+ + C2
279
+
280
+ 1
281
+ m
282
+ n
283
+
284
+ i=1
285
+ P 2
286
+ ii
287
+ �1/5
288
+ → 0.
289
+ 2. Assume Λ → ∞ and the Lindeberg condition holds for Wi = E0(PiiV ⊤
290
+ i Vi)/(Λς√m), that is, �n
291
+ i=1 E(W 2
292
+ i I{|Wi| >
293
+ ϵ}) → 0 for any ϵ > 0. Then, under the null hypothesis, we have
294
+ Qn − mtr(Σ)
295
+ Λς√m
296
+ ⇒ N(0, 1).
297
+ (2.5)
298
+ Remark 2. Theorem 2.1 illustrates an interesting dichotomy: the conventional MANOVA test statistic
299
+ Qn can have one of the two different asymptotic null distributions, depending on the magnitude of the
300
+ unknown quantity Λ.
301
+ This nature of dichotomy poses extra difficulty for utilizing Qn to test (1.3) in
302
+ practical implementation as we need to predetermine which asymptotic distribution to use. Any subjective
303
+ choice may lead to unreliable conclusion.
304
+ To illustrate this, suppose now Λ → 0.
305
+ For α ∈ (0, 1), let
306
+ G−1
307
+ n (α) denote the (1 − α)th quantile of Gn. Based on Theorem 2.1, an α level test for (1.3) is given by
308
+ Φ0 = I{Qn > G−1
309
+ n (α)}. However, if one implements Φ0 under the case where Λ → ∞, then the type I error
310
+ of Φ0 satisfies that P(Φ0 = 1 | H0) → 1/2, which implies that Φ0 in this scenario (Λ → ∞) is no better than
311
+ random guessing.
312
+ Remark 3. Recently much attention has been paid to studying the dichotomy and similar phase transition
313
+ phenomenon of the asymptotic distribution of classical tests under the high dimensional setting. For instance,
314
+ Xu et al. (2019) studied the Pearson’s chi-squared test under the scenario where the number of cells can
315
+ increase with the sample size and demonstrated that the corresponding asymptotic distribution can be either
316
+ chi-squared or normal. He et al. (2021) derived the phase transition boundaries of several standard likelihood
317
+ ratio tests on multivariate mean and covariance structures of Gaussian random vectors. In addition to these
318
+ tests, we suspect similar phenomenon can occur for many other traditional tests as the dimension increases
319
+ with the sample size. More importantly, as in our paper, investigating these phase transition phenomena
320
+ of classical tests not only contributes to the theoretical development but also motivates us to propose new
321
+ test procedure or more advanced approximation distributional theory which are suitable under the high
322
+ dimensional scenario.
323
+ The following lemma establishes an upper bound for ∆q.
324
+ Lemma 2.2. Assuming that Mq < ∞, then we have
325
+ ∆q < 2
326
+ � 1
327
+ m max
328
+ 1≤i≤n Pii
329
+ �δ/2
330
+ Mq.
331
+ Remark 4. Condition (2.4) can be viewed as the Lyapunov-type condition for high dimensional Gaussian
332
+ approximation of Qn. It is quite natural and does not impose any explicit restriction on the relation between
333
+ 5
334
+
335
+ the dimension d and the sample size n directly. In particular, (2.4) can be dimension free for some commonly
336
+ used models, namely, (2.4) holds for arbitrary dimension d ≥ 1 as long as n → ∞. For instance, suppose
337
+ that {Vi}n
338
+ i=1 follow the linear process model
339
+ Vi = Aξi (i = 1, . . . , n),
340
+ (2.6)
341
+ where A is a d × L matrix for some integer L ≥ 1, ξi = (ξi1, . . . , ξiL)⊤ and {ξiℓ}i,ℓ∈N are independent zero-
342
+ mean random variables with uniformly bounded qth moment E|ξiℓ|q ≤ C < ∞. Applying the Burkholder
343
+ inequality leads to Mq ≤ (1 + δ)q max1≤ℓ≤L ∥ξiℓ∥2q
344
+ q .
345
+ Consequently, Lemma 2.2 reveals that a sufficient
346
+ condition for ∆q → 0 is
347
+ 1
348
+ m max
349
+ 1≤i≤n Pii → 0.
350
+ (2.7)
351
+ It is worth mentioning that (2.7) depends only on the projection matrix P and does not impose any re-
352
+ striction on the dimension d. Moreover, under Assumption 2.1, (2.7) is automatically satisfied in view of
353
+ max1≤i≤n(Pii/m)2 ≤ m−2 �n
354
+ i=1 P 2
355
+ ii → 0.
356
+ 2.2
357
+ Modified U type test statistics
358
+ The dichotomous nature of the asymptotic null distribution makes Qn unsuitable for testing (1.3) in the high
359
+ dimensional setting. This motivates us to propose a modified U type test statistic of Qn for which such a
360
+ dichotomy does not occur. To fix the idea, let B0 ∈ Rp×d denote the coefficient matrix of model (1.2) under
361
+ the null hypothesis such that CB0 = 0 and Y
362
+ H0
363
+ = XB0 + V . Motivated by Theorem 2.1, a natural candidate
364
+ of the test statistic Qn would be
365
+ Qn,0 = Qn −
366
+ n
367
+
368
+ k=1
369
+ Pkk(Yk − B⊤
370
+ 0 Xk)⊤(Yk − B⊤
371
+ 0 Xk),
372
+ (2.8)
373
+ which coincides with Q⋆
374
+ n in (2.2) under the null hypothesis. However, B0 is unknown in practice and hence
375
+ Qn,0 is infeasible. The primary goal of this section is to propose a consistent empirical approximation Un for
376
+ Qn,0. In particular, motivated by the discussions in Section 2.1, the modified test statistic Un should satisfy
377
+ that
378
+ Un
379
+ H0
380
+ =
381
+ n
382
+
383
+ i=1
384
+
385
+ j̸=i
386
+ KijV ⊤
387
+ i Vj and
388
+ Un − Qn,0
389
+ √var(Qn,0)
390
+ H0
391
+ = oP(1),
392
+ for some symmetric matrix K = (Kij)n×n. The latter ensures that Un is asymptotically equivalent to Qn,0
393
+ in (2.8). Towards this end, let �B0 be the least square estimator of B under the constraint CB = 0. Then
394
+ Y − X �B0 = (In − P0)Y , where P0 = X(X⊤X)−1X⊤ − P is the projection matrix of model (1.2) under the
395
+ null hypothesis. In view of (2.8), the modified U type test statistic is then defined by
396
+ Un = Qn −
397
+ n
398
+
399
+ k=1
400
+ θk(Yk − �B⊤
401
+ 0 Xk)⊤(Yk − �B⊤
402
+ 0 Xk)
403
+ H0
404
+ =
405
+ n
406
+
407
+ i=1
408
+
409
+ Pii −
410
+ n
411
+
412
+ k=1
413
+ θk ¯P 2
414
+ ik,0
415
+
416
+ V ⊤
417
+ i Vi +
418
+ n
419
+
420
+ i=1
421
+
422
+ j̸=i
423
+
424
+ Pij −
425
+ n
426
+
427
+ k=1
428
+ θk ¯Pik,0 ¯Pjk,0
429
+
430
+ V ⊤
431
+ i Vj
432
+ =
433
+ n
434
+
435
+ i=1
436
+
437
+ j̸=i
438
+
439
+ Pij −
440
+ n
441
+
442
+ k=1
443
+ θk ¯Pik,0 ¯Pjk,0
444
+
445
+ V ⊤
446
+ i Vj,
447
+ (2.9)
448
+ 6
449
+
450
+ where ¯P0 = In − P0 = ( ¯Pij,0)n×n and the last equality follows by taking θ1, . . . , θn to be the solutions of the
451
+ following linear equations
452
+ n
453
+
454
+ k=1
455
+ ¯P 2
456
+ ik,0θk = Pii (i = 1, . . . , n).
457
+ (2.10)
458
+ It is worth mentioning that typically θk in (2.9) are not Pkk, as one would naturally like to use in view
459
+ of (2.8). We can view (2.10) as a detailed balanced condition as it removes the diagonals in (2.9). Denote
460
+ θ = (θ1, . . . , θn)⊤ and rewrite (2.10) in the more compact matrix form
461
+ ( ¯P0 ◦ ¯P0)θ = (P11, . . . , Pnn)⊤.
462
+ (2.11)
463
+ Let Pθ = P − ¯P0Dθ ¯P0 = (Pij,θ)n×n, where Dθ = diag(θ1, . . . , θn) is a diagonal matrix. Then Pii,θ = 0 for
464
+ all i = 1, . . . , n in view of (2.11) and
465
+ Un
466
+ H0
467
+ = tr(V ⊤PθV ) =
468
+ n
469
+
470
+ i=1
471
+
472
+ j̸=i
473
+ Pij,θV ⊤
474
+ i Vj.
475
+ Before proceeding, we first introduce a sufficient condition such that Un exists and is well defined.
476
+ Lemma 2.3. Assume that there exists a positive constant ϖ0 < 1/2 such that
477
+ max
478
+ 1≤i≤n Pii,0 ≤ ϖ0.
479
+ (2.12)
480
+ Then the matrix ¯P0◦ ¯P0 is strictly diagonally dominant and |Pθ|2
481
+ F = m−�n
482
+ i=1 θiPii. Moreover, if max1≤i≤n Pii ≤
483
+ ϖ1ζ for some positive constant ϖ1 < 1/2, where ζ = (1 − 2ϖ0)(1 − ϖ0), then we have max1≤i≤n |θi| ≤ ϖ1 <
484
+ 1/2.
485
+ Remark 5. Condition (2.12) ensures the matrix ¯P0 ◦ ¯P0 is invertible. Consequently the solution θ of (2.11)
486
+ exists and is unique. It is worth noting that θ is independent of the dimension d and only depends on the
487
+ projection matrices P and P0. Moreover, as shown in the proof of Lemma 2.3,
488
+ n
489
+
490
+ i=1
491
+ θiPii ≤ 1
492
+ ζ
493
+ n
494
+
495
+ i=1
496
+ P 2
497
+ ii and
498
+ max
499
+ 1≤i≤n |θi| ≤ 1
500
+ ζ max
501
+ 1≤i≤n Pii,
502
+ which are essential to upper bound the quantity ∆q,θ in Lemma 2.6 below. Consequently, under Assump-
503
+ tion 2.1, suppose �n
504
+ i=1 P 2
505
+ ii ≤ mζ/2 for sufficiently large n, we obtain
506
+ var(Un) = 2|Pθ|2
507
+ Fς2 = 2
508
+
509
+ m −
510
+ n
511
+
512
+ i=1
513
+ θiPii
514
+
515
+ ς2 > mς2,
516
+ which ensures the proposed test statistic Un is non-degenerate and well defined.
517
+ Remark 6. Since col(X(X⊤X)−1C⊤) ⊂ col(X), where col(·) denotes the column space, P0 = X(X⊤X)−1X⊤−
518
+ P defined above is also a projection matrix.
519
+ Hence max{Pii, Pii,0} ≤ X⊤
520
+ i (X⊤X)−1Xi uniformly for
521
+ i ∈ {1, . . . , n} and a sufficient condition for Lemma 2.3 would be
522
+ max
523
+ 1≤i≤n X⊤
524
+ i (X⊤X)−1Xi ≤ min{ϖ0, (1 − 2ϖ0)(1 − ϖ0)ϖ1},
525
+ (2.13)
526
+ 7
527
+
528
+ which is fairly mild on the design matrix X.
529
+ More specifically, it is commonly assumed (Huber, 1973,
530
+ Portnoy, 1985, Wu, 1986, Shao and Wu, 1987, Shao, 1988, Mammen, 1989, Navidi, 1989, Lahiri, 1992)
531
+ for the linear regression model that max1≤i≤n X⊤
532
+ i (X⊤X)−1Xi → 0, which ensures a kind of “robustness of
533
+ design” (Huber, 1973). It also implies Assumption 2.1 in view of Remark 1 and can be viewed as a imbalance
534
+ measure of model (1.2) (Shao and Wu, 1987).
535
+ Example 2.1. Suppose X1, . . . , Xn are independent Gaussian random vectors N(0, Γ), where the covariance
536
+ matrix Γ ∈ Rp×p has minimal eigenvalue λmin(Γ) > 0. Then, with probability at least 1−2 exp(−n/2)−n−1,
537
+ we have
538
+ max
539
+ 1≤i≤n X⊤
540
+ i (X⊤X)−1Xi ≤ 9p + 18√2p log n + 36 log n
541
+ n
542
+ .
543
+ (2.14)
544
+ Consequently, condition (2.13) holds with high probability as long as p/n is sufficiently small.
545
+ Proposition 2.4. Under the conditions of Lemma 2.3, we have E(Un) ≥ 0. In particular,
546
+ E(Un) = 0 if and only if CB = 0.
547
+ 2.3
548
+ Asymptotic distribution of the modified test statistics
549
+ The primary goal of this section is to establish a Gaussian approximation for the modified test statistic Un.
550
+ Following (2.3), the Gaussian analogue of Un is defined by
551
+ Gn = tr(Z⊤PθZ) =
552
+ n
553
+
554
+ i=1
555
+
556
+ j̸=i
557
+ Pij,θZ⊤
558
+ i Zj.
559
+ The following theorem establishes a non-asymptotic upper bound of the Kolmogorov distance between the
560
+ distribution functions of Un and its Gaussian analogue Gn. Compared with Theorem 2.1, it reveals that
561
+ the modification of the test statistic Qn in (2.9) removes the dichotomous nature of its asymptotic null
562
+ distribution.
563
+ Theorem 2.5. Let q = 2 + δ, where 0 < δ ≤ 1. Assume that (2.12) holds and that
564
+ ∆q,θ =
565
+ �n
566
+ i=1
567
+
568
+ j̸=i |Pij,θ|q
569
+ mq/2
570
+ Mq +
571
+ �n
572
+ i=1(�
573
+ j̸=i P 2
574
+ ij,θ)q/2
575
+ mq/2
576
+ Lq → 0.
577
+ Then, under Assumptions 2.1 and the null hypothesis, we have
578
+ ρ(Un, Gn) ≤ Cq∆1/(2q+1)
579
+ q,θ
580
+ + C
581
+
582
+ 1
583
+ m
584
+ n
585
+
586
+ i=1
587
+ P 2
588
+ ii
589
+ �1/5
590
+ → 0.
591
+ Similar to Lemma 2.2, we establish a similar upper bound for ∆q,θ in the following lemma.
592
+ Lemma 2.6. Under condition (2.12), we have
593
+ ∆q,θ ≲
594
+ � 1
595
+ m max
596
+ 1≤i≤n Pii
597
+ �δ/2
598
+ Mq.
599
+ 8
600
+
601
+ For α ∈ (0, 1), Proposition 2.4 and Theorem 2.5 motivate an α level test for (1.3) as follows,
602
+ Φθ = I
603
+
604
+ Un
605
+ ς|Pθ|F
606
+ √2 > c1−α
607
+
608
+ ,
609
+ (2.15)
610
+ where c1−α is the (1 − α)th quantile of the standardized Gn/√var(Gn).
611
+ Remark 7. It is worth mentioning that the approximating distribution Gn may or may not be asymptotically
612
+ normal.
613
+ Let λ1(Pθ), . . . , λn(Pθ) denote the eigenvalues of the symmetric matrix Pθ.
614
+ Being a quadratic
615
+ functional of Gaussian random vectors {Zi}n
616
+ i=1, Gn is distributed as a linear combination of independent
617
+ chi-squared random variables,
618
+ Gn
619
+ D=
620
+ d
621
+
622
+ k=1
623
+ n
624
+
625
+ i=1
626
+ λk(Σ)λi(Pθ)ηik(1) =
627
+ d
628
+
629
+ k=1
630
+ n
631
+
632
+ i=1
633
+ λk(Σ)λi(Pθ){ηik(1) − 1},
634
+ where {ηik(1)}i,k∈N are independent χ2
635
+ 1 random variables and the last equality follows from the fact that
636
+ �n
637
+ i=1 λi(Pθ) = �n
638
+ i=1 Pii,θ = 0. More specifically, the Lindeberg-Feller central limit theorem and Lemma 2.3
639
+ imply that Gn/√var(Gn) ⇒ N(0, 1) if and only if
640
+ λ1(Σ)
641
+ ς√m → 0.
642
+ (2.16)
643
+ Consequently, c1−α in (2.15) is asymptotically equal to the standard normal quantiles whenever (2.16) holds.
644
+ When m → ∞, condition (2.16) automatically holds for arbitrary dimension d ≥ 1 as λ1(Σ) ≤ ς.
645
+ Otherwise, (2.16) is equivalent to tr(Σ4)/ς4 → 0, which is a common assumption to ensure the asymptotic
646
+ normality of high dimensional quadratic statistics; see, for example, Bai and Saranadasa (1996), Chen and
647
+ Qin (2010), Cai and Ma (2013), Yao et al. (2018) and Zhang et al. (2018) among others. In particular, it
648
+ reveals that the asymptotic null distribution of Un can be non-normal if (2.16) is violated. For example, let
649
+ Y1, . . . , Yn ∈ Rd be i.i.d. random vectors with mean vector µY = E(Y1) and consider testing whether µY = 0.
650
+ Assume that Σ = cov(Y1) = (Σjk)d×d has entries Σjk = ϑ + (1 − ϑ)I{j = k} for some constant ϑ ∈ (0, 1).
651
+ Then λ1(Σ)/(ς√m) → 1 and it follows from Theorem 2.5 that
652
+ Un
653
+ √var(Un) =
654
+ �n
655
+ i=1
656
+
657
+ j̸=i Y ⊤
658
+ i Yj
659
+ ς√{2n(n − 1)}
660
+ ⇒ χ2
661
+ 1 − 1
662
+ √2
663
+ .
664
+ The simulation study in Section 5 shows that our Gaussian multiplier bootstrap approach have a satisfactory
665
+ performance regardless of whether Un is asymptotically normal or not.
666
+ 3
667
+ Applications
668
+ As mentioned in the introduction, our paradigm (1.3) is fairly general and it can be applied to many
669
+ commonly studied hypothesis testing problems. In this section, we consider two specific examples to illustrate
670
+ the usefulness of the proposed U type test statistic and the corresponding asymptotic distribution theory.
671
+ 3.1
672
+ High dimensional one-way MANOVA
673
+ Let {Yij}ni
674
+ j=1, i = 1, . . . , K, be K ≥ 2 independent samples following the model
675
+ Yij = µi + Vij (j = 1, . . . , ni; i = 1, . . . , K),
676
+ 9
677
+
678
+ where µ1, . . . , µK ∈ Rd are unknown mean vectors of interest, {Vij}j∈N are i.i.d. d-dimensional random
679
+ vectors with E(Vi1) = 0 and cov(Vi1) = Σ. We are interested in testing the equality of the K mean vectors,
680
+ namely, testing the hypotheses
681
+ H0 : µ1 = . . . = µK versus H1 : µi ̸= µl for some 1 ≤ i ̸= l ≤ K.
682
+ Following the construction of (2.9), we propose the U type test statistic
683
+ UnK =
684
+ K
685
+
686
+ i=1
687
+ Pii,K
688
+ ni
689
+
690
+ j=1
691
+
692
+ k̸=j
693
+ Y⊤
694
+ ijYik +
695
+ K
696
+
697
+ i=1
698
+
699
+ l̸=i
700
+ Pil,K
701
+ ni
702
+
703
+ j=1
704
+ nl
705
+
706
+ k=1
707
+ Y⊤
708
+ ijYlk,
709
+ (3.1)
710
+ where n = �K
711
+ i=1 ni is the total sample size,
712
+ Pii,K =
713
+ 1
714
+ n − 2
715
+ � n
716
+ ni
717
+ − n + K − 2
718
+ n − 1
719
+
720
+ and Pil,K =
721
+ 1
722
+ n − 2
723
+ � 1
724
+ ni
725
+ + 1
726
+ nl
727
+ − n + K − 2
728
+ n − 1
729
+
730
+ .
731
+ In the context of two sample test for mean vectors where K = 2, UnK in (3.1) reduces to
732
+ UnK =
733
+ �n1
734
+ i=1
735
+
736
+ j̸=i
737
+ �n2
738
+ k=1
739
+
740
+ l̸=k(Y1i − Y2k)⊤(Y1j − Y2l)
741
+ (n − 1)(n − 2)n1n2/n
742
+ ,
743
+ which coincides with the commonly used U type test statistic (Chen and Qin, 2010).
744
+ For each i ∈ {1, . . . , K}, let {Zij}j∈N be i.i.d. centered Gaussian random vectors with covariance matrix
745
+ cov(Zij) = Σ. Following (2.3), the Gaussian analogue of UnK is defined by
746
+ GnK =
747
+ K
748
+
749
+ i=1
750
+ Pii,K
751
+ ni
752
+
753
+ j=1
754
+
755
+ k̸=j
756
+ Z⊤
757
+ ijZik +
758
+ K
759
+
760
+ i=1
761
+
762
+ l̸=i
763
+ Pil,K
764
+ ni
765
+
766
+ j=1
767
+ nl
768
+
769
+ k=1
770
+ Z⊤
771
+ ijZlk.
772
+ Let nmin = min1≤l≤K nl. Since max1≤i≤n Pii ≤ n−1
773
+ min, Assumption 2.1 holds as long as nmin → ∞. The
774
+ following proposition establishes a non-asymptotic upper bound on the Kolmogorov distance between the
775
+ distribution functions of UnK and GnK.
776
+ Proposition 3.1. Let q = 2 + δ for some 0 < δ ≤ 1. Assume that nmin → ∞ and
777
+
778
+ Mq =
779
+ max
780
+ 1≤l,l′≤K E
781
+ ����
782
+ V⊤
783
+ l1Vl′2
784
+ ς
785
+ ����
786
+ q
787
+ < ∞, where ς = |Σ|F.
788
+ Then, under the null hypothesis, we have
789
+ ρ(UnK, GnK) ≤ Cq
790
+
791
+
792
+ Mqn−δ/2
793
+ min
794
+ �1/(2q+1)
795
+ → 0.
796
+ Remark 8. It is worth mentioning that both the dimension d and the number of groups K can grow with the
797
+ total sample size n. In particular, as discussed in Remark 4, if all the K samples follow the linear process
798
+ model in (2.6), ρ(UnK, GnK) → 0 as long as nmin → ∞.
799
+ 10
800
+
801
+ 3.2
802
+ High dimensional nonparametric one-way MANOVA
803
+ For each i ∈ {1, . . . , K}, let Fi denote the distribution function of Yi1. We consider testing whether these
804
+ K independent samples are equally distributed, namely, testing the hypotheses
805
+ H0 : F1 = . . . = FK versus H1 : Fi ̸= Fl for some 1 ≤ i ̸= l ≤ K.
806
+ (3.2)
807
+ Being fundamental and important in statistical inference, (3.2) has been extensively studied; see, for example,
808
+ Kruskal and Wallis (1952), Akritas and Arnold (1994), Brunner and Puri (2001), Rizzo and Sz´ekely (2010)
809
+ and Thas (2010) among many others. However, all the aforementioned works mainly focus on the traditional
810
+ low dimensional scenario and testing (3.2) for high dimensional random vectors has been much less studied.
811
+ In this section, we propose a new U type test statistic for (3.2) following the intuition of (2.9) and establish
812
+ the corresponding distributional theory. In particular, our asymptotic framework is fairly general and allows
813
+ both the dimension d and the number of groups K to grow with n.
814
+ To begin with, for each i ∈ {1, . . . , K}, let φi(t) = E{exp(ıt⊤Yij)} denote the characteristic function of
815
+ Yij, where ı stands for the imaginary unit. Then it is equivalent to test the hypotheses
816
+ H0 : φ1 = . . . = φK versus H1 : φi ̸= φl for some 1 ≤ i ̸= l ≤ K.
817
+ (3.3)
818
+ Denote Yij(t) = exp(ıt⊤Yij). Similar to (3.1), our test statistic for (3.3) is defined by
819
+ �UnK =
820
+ K
821
+
822
+ i=1
823
+ Pii,K
824
+ ni
825
+
826
+ j=1
827
+
828
+ k̸=j
829
+
830
+ Yij(t)Yik(t)w(t)dt +
831
+ K
832
+
833
+ i=1
834
+
835
+ l̸=i
836
+ Pil,K
837
+ ni
838
+
839
+ j=1
840
+ nl
841
+
842
+ k=1
843
+
844
+ Yij(t)Ylk(t)w(t)dt,
845
+ where w(t) ≥ 0 is a suitable weight function such that the integrals above are well defined. Discussions of
846
+ some commonly used weight functions are given in Remark 9 below.
847
+ Before proceeding, we first define the Gaussian analogue of �UnK under the null hypothesis that the K
848
+ samples are equally distributed. Define the covariance function of Y11(t) as
849
+ Σ(t, s) = E{Y11(t) − φ1(t)}{Y11(s) − φ1(s)} = φ1(t − s) − φ1(t)φ1(−s) (t, s ∈ Rd).
850
+ Throughout this section, by Mercer’s theorem, we assume that the covariance function above admits the
851
+ following eigendecomposition
852
+ Σ(t, s) =
853
+
854
+
855
+ m=1
856
+ λmϕm(t)ϕm(s) (t, s ∈ Rd),
857
+ where λ1 ≥ λ2 ≥ . . . ≥ 0 are eigenvalues and ϕ1, ϕ2, . . ., are the corresponding eigenfunctions. We now
858
+ apply the Karhunen–Lo`eve theorem. Let {Zijk}i,j,k∈N be independent standard normal random variables
859
+ and define Gaussian processes
860
+ Zij(t) =
861
+
862
+
863
+ m=1
864
+ √λmZijmϕm(t) (t ∈ Rd).
865
+ Then, following (2.3), the Gaussian analogue of �UnK is defined by
866
+ �GnK =
867
+ K
868
+
869
+ i=1
870
+ Pii,K
871
+ ni
872
+
873
+ j=1
874
+
875
+ k̸=j
876
+
877
+ Zij(t)Zik(t)w(t)dt +
878
+ K
879
+
880
+ i=1
881
+
882
+ l̸=i
883
+ Pil,K
884
+ ni
885
+
886
+ j=1
887
+ nl
888
+
889
+ k=1
890
+
891
+ Zij(t)Zlk(t)w(t)dt.
892
+ 11
893
+
894
+ Proposition 3.2. Let q = 2 + δ for some 0 < δ ≤ 1. Assume that nmin → ∞ and
895
+
896
+ Mq = E
897
+ �����
898
+
899
+ Rd E{Y11(t)}E0{Y12(t)}w(t)dt
900
+ F
901
+ �����
902
+ q
903
+ < ∞, where F2 =
904
+
905
+
906
+ m=1
907
+ λ2
908
+ m.
909
+ Then, under the null hypothesis that these K independent samples are equally distributed, we have
910
+ ρ(�UnK, �GnK) ≤ Cq
911
+
912
+
913
+ Mqn−δ/2
914
+ min
915
+ �1/(2q+1)
916
+ → 0.
917
+ Remark 9. It is worth mentioning that the proposed test statistic �UnK contains high dimensional integral
918
+ over t ∈ Rd, which can be computational intractable in practice. To make �UnK well defined and facilitate
919
+ the computation, we shall choose suitable weight function w(t) such that �UnK has a simple closed-form
920
+ expression. In the literature, various kinds of weight functions have been proposed such as the Gaussian
921
+ kernel function (Gretton et al., 2012), the Laplace kernel function (Gretton et al., 2012) and the energy
922
+ kernel function (Sz´ekely et al., 2007, Rizzo and Sz´ekely, 2010). For instance, let w(t) denote the density
923
+ function of the random vector Xκ/√η for some κ > 0, where X ∼ N(0, Id) and η ∼ χ2
924
+ 1 are independent
925
+ (equivalently Xκ/√η is a Cauchy random variable with location parameter 0 and scale parameter κ). Then
926
+ it is straightforward to verify that
927
+
928
+ Yij(t)Ylk(t)w(t)dt =
929
+
930
+ cos{t⊤(Yij − Ylk)}w(t)dt = exp(−κ|Yij − Ylk|),
931
+ which is the same as the Laplace kernel function with 1/κ being its bandwidth, where | · | stands for the
932
+ Euclidean distance. A more general result can be derived using Bochner’s Theorem, see e.g., Theorem 3.1
933
+ of Gretton et al. (2009). Consequently, the proposed test statistic �UnK reduces to
934
+ �UnK =
935
+ K
936
+
937
+ i=1
938
+ Pii,K
939
+ Ni
940
+
941
+ j=1
942
+
943
+ k̸=j
944
+ exp(−κ|Yij − Yik|) +
945
+ K
946
+
947
+ i=1
948
+
949
+ l̸=i
950
+ Pil,K
951
+ Ni
952
+
953
+ j=1
954
+ Nl
955
+
956
+ k=1
957
+ exp(−κ|Yij − Ylk|),
958
+ which is fairly convenient to compute in practice. Moreover, suitable choice of the weight function w(t) also
959
+ facilitate the analysis of the quantities Mq and F.
960
+ 4
961
+ Practical implementation
962
+ In this section, we propose an unbiased estimator for ς2, which is ratio-consistent under fairly mild moment
963
+ conditions. To begin with, since E(V ⊤
964
+ i Vj)2 = ς2 for any i ̸= j, a natural unbiased U type estimator for ς2
965
+ based on {Vi}n
966
+ i=1 would be
967
+ �ς2
968
+ o =
969
+ 1
970
+ n(n − 1)
971
+ n
972
+
973
+ i=1
974
+
975
+ j̸=i
976
+ (V ⊤
977
+ i Vj)2.
978
+ (4.1)
979
+ Let ¯P1 = In − X(X⊤X)−1X⊤ = (Pij,1)n×n and �V = ¯P1Y = (�V1, . . . , �Vn)⊤. It is worth noting that directly
980
+ substituting the residual vectors {�Vi}n
981
+ i=1 into (4.1) yields a feasible but generally biased estimator for ς2.
982
+ More specifically, for any i ̸= j,
983
+ E(�V ⊤
984
+ i �Vj)2 = ( ¯Pii,1 ¯Pjj,1 + ¯P 2
985
+ ij,1)ς2 + ¯P 2
986
+ ij,1E(V ⊤
987
+ 1 V1)(V ⊤
988
+ 2 V2) +
989
+ n
990
+
991
+ k=1
992
+ ( ¯Pik,1 ¯Pjk,1)2 �
993
+ ∥E0(V ⊤
994
+ 1 V1)∥2
995
+ 2 − 2ς2�
996
+ ,
997
+ 12
998
+
999
+ which reveals that (�V ⊤
1000
+ i �Vj)2 is no longer unbiased of ς2 even after proper scaling. This motivates us to
1001
+ propose a new unbiased estimator for ς2 via data-splitting, which excludes the bias terms (V ⊤
1002
+ i Vi)2 and
1003
+ (V ⊤
1004
+ i Vi)(V ⊤
1005
+ j Vj). Without loss of generality, we assume that the sample size n is even in what follows.
1006
+ 1. Randomly split {1, . . . , n} into two halves A and Ac. Denote MA = {(Xi, Yi), i ∈ A} and MAc =
1007
+ {(Xi, Yi), i ∈ Ac}.
1008
+ 2. For both MA and MAc, fit model (1.1) with the least squares estimates and compute
1009
+ �ΣA =
1010
+ 1
1011
+ n/2 − p
1012
+ �V ⊤
1013
+ A �VA and �ΣAc =
1014
+ 1
1015
+ n/2 − p
1016
+ �V ⊤
1017
+ Ac �VAc,
1018
+ where �VA and �VAc are the residual matrices of MA and MAc, respectively.
1019
+ 3. Compute the estimator �ς2
1020
+ A = tr(�ΣA�ΣAc).
1021
+ Since �ΣA and �ΣAc are independent and both of them are unbiased estimators of Σ, �ς2
1022
+ A is unbiased for ς2 as
1023
+ E(�ς2
1024
+ A) = tr{E(�ΣA)E(�ΣAc)} = tr(Σ2) = ς2.
1025
+ Theorem 4.1. Assume that p/n < ϖ2 for some positive constant ϖ2 < 1/2 and that the least squares
1026
+ estimates are well defined for both MA and MAc. Then we have
1027
+ E
1028
+ ����
1029
+ �ςA
1030
+ ς − 1
1031
+ ����
1032
+ 2
1033
+ ≲ M4
1034
+ n2 + p × tr(Σ4)
1035
+ n2ς4
1036
+ + ∥E0(V ⊤
1037
+ 1 ΣV1)∥2
1038
+ 2
1039
+ nς4
1040
+ .
1041
+ Remark 10. The proof of Theorem 4.1 is given in Section 7.2, where a more general upper bound on
1042
+ E|�ςA/ς −1|τ is established for 1 < τ ≤ 2. Theorem 4.1 reveals that �ςA is ratio consistent under mild moment
1043
+ conditions. Suppose now {Vi}i∈N follow the linear process model (2.6) with max1≤ℓ≤L E|ξiℓ|4 ≤ C < ∞.
1044
+ Then M4 is bounded and ∥E0(V ⊤
1045
+ 1 ΣV1)∥2
1046
+ 2 ≲ tr(Σ4). Consequently,
1047
+ E
1048
+ ����
1049
+ �ςA
1050
+ ς − 1
1051
+ ����
1052
+ 2
1053
+ ≲ n−2 + tr(Σ4)
1054
+ nς4 .
1055
+ In this case, �ςA is ratio consistent for arbitrary dimension d ≥ 1 as long as n → ∞.
1056
+ Remark 11. There are totally
1057
+ � n
1058
+ n/2
1059
+
1060
+ different ways of splitting {1, . . . , n} into two halves. To reduce the
1061
+ influence of randomness of an arbitrary splitting, we can repeat the procedure independently for multiple
1062
+ times and then take the average of the resulting estimators. We refer to Fan et al. (2012) for more discussions
1063
+ about data-splitting and repeated data-splitting.
1064
+ Remark 12. Let �Σ = (n − p)−1 �V ⊤ �V . Observe that E(�V ⊤
1065
+ i �Vj) = ¯Pij,1tr(Σ). We can estimate ς2 via
1066
+ �ς2
1067
+ S =
1068
+ �n
1069
+ i,j=1 |�V ⊤
1070
+ i �Vj − ¯Pij,1tr(�Σ)|2
1071
+ (n − p + 2)(n − p − 1)
1072
+ =
1073
+ (n − p)2
1074
+ (n − p + 2)(n − p − 1)
1075
+
1076
+ |�Σ|2
1077
+ F − {tr(�Σ)}2
1078
+ n − p
1079
+
1080
+ ,
1081
+ which is same as the estimator proposed in Srivastava and Fujikoshi (2006), where {Vi}n
1082
+ i=1 are assumed to
1083
+ be Gaussian random vectors. See also Bai and Saranadasa (1996). However, for non-Gaussian {Vi}n
1084
+ i=1 such
1085
+ that ∥E0(V ⊤
1086
+ 1 V1)∥2
1087
+ 2 ̸= 2ς2, this estimator is generally biased as
1088
+ E(�ς2
1089
+ S) − ς2 =
1090
+ �n
1091
+ i=1 ¯P 2
1092
+ ii,1
1093
+ (n − p)(n − p + 2)
1094
+
1095
+ ∥E0(V ⊤
1096
+ 1 V1)∥2
1097
+ 2 − 2ς2�
1098
+ .
1099
+ In particular, the bias of �ς2
1100
+ S can diverge when ∥E0(V ⊤
1101
+ 1 V1)∥2
1102
+ 2 is much larger than ς2. Below we provide an
1103
+ example that typifies the diverging bias.
1104
+ 13
1105
+
1106
+ G
1107
+ G
1108
+ G
1109
+ G
1110
+ G
1111
+ G
1112
+ 2
1113
+ 4
1114
+ 6
1115
+ 8
1116
+ 10
1117
+ 12
1118
+ 0.0
1119
+ 0.5
1120
+ 1.0
1121
+ 1.5
1122
+ 2.0
1123
+ 2.5
1124
+ 3.0
1125
+ 3.5
1126
+ d × 100
1127
+ G
1128
+ Split
1129
+ SF
1130
+ Oracle
1131
+ G
1132
+ G
1133
+ G
1134
+ G
1135
+ G
1136
+ G
1137
+ 2
1138
+ 4
1139
+ 6
1140
+ 8
1141
+ 10
1142
+ 12
1143
+ 0.0
1144
+ 0.5
1145
+ 1.0
1146
+ 1.5
1147
+ 2.0
1148
+ d × 100
1149
+ Figure 1: Empirical averages of the values of |�ς/ς − 1|
1150
+ Example 4.1. Let {ξi}i∈N and {ξ′
1151
+ i}i∈N be two sequences of independent Gaussian random vectors N(0, Σ),
1152
+ where Σ = (Σij)n×n has entries Σij = ϑ|i−j| for some ϑ ∈ (0, 1). Following Wang et al. (2015), we draw
1153
+ i.i.d. innovations {Vi}n
1154
+ i=1 from a scale mixture of two independent multivariate Gaussian distributions as
1155
+ follows,
1156
+ Vi = νi × ξi + 3(1 − νi) × ξ′
1157
+ i (i = 1, . . . , n),
1158
+ where {νi}i∈N are independent Bernoulli random variables with P(νi = 1) = 0.9. A simulation study is given
1159
+ in Section 5 by setting ϑ = 0.3 and 0.7. We report in Figure 1 the average values of |�ς/ς −1| for �ςA, �ςo and �ςS,
1160
+ based on 1000 replications with the numerical setup (n, p, m) = (100, 20, 10) and d = 200, 400, 800, 1000, 1200.
1161
+ For both cases of ϑ, |�ςA/ς −1| and |�ςo/ς −1| are very close to 0, while |�ςS/ς −1| is quite large. More precisely,
1162
+ we can derive that ∥E0(V ⊤
1163
+ 1 V1)∥2
1164
+ 2 ≈ (18 + d)ς2.
1165
+ Substituting the ratio-consistent estimator �ς2
1166
+ A into var(Un) = 2|Pθ|2
1167
+ Fς2 yields Un/(�ςA|Pθ|F) ⇒ N(0, 2)
1168
+ under (2.16). Then, for α ∈ (0, 1), an asymptotic α level test is given by
1169
+ ΦZ = I
1170
+
1171
+ Un
1172
+ �ςA|Pθ|F
1173
+ √2 > z1−α
1174
+
1175
+ ,
1176
+ (4.2)
1177
+ where z1−α is the (1 − α)th quantile of the standard normal distribution.
1178
+ 5
1179
+ A simulation study
1180
+ In this section, we conduct a Monte Carlo simulation study to assess the finite sample performance of
1181
+ the proposed tests.
1182
+ In the model (1.1), we write Xi = (1, x⊤
1183
+ i )⊤ ∈ Rp to include an intercept.
1184
+ Here
1185
+ x1, . . . , xn ∈ Rp−1 are i.i.d. N(0, Ip−1) random vectors. Let m < p. For all k ∈ {1, . . . , p − m}, all entries
1186
+ of the coefficient vector Bk are i.i.d. uniform random variables in the interval (1, 2). After those Bk’s are
1187
+ generated, we keep their values throughout the simulation. Our goal is to identify the zero Bk’s by testing
1188
+ H0 : Bp−m+1 = Bp−m+2 = · · · = Bp = 0.
1189
+ 14
1190
+
1191
+ In our simulation, we set (p, m) = (20, 10), n = 100, 200 and d = 400, 800, 1200. We consider two different
1192
+ designs of the innovations (Vi): the one introduced in Example 4.1 and the one in Example 5.1 below. In
1193
+ both examples, the parameter ϑ is set to be 0.3 and 0.7.
1194
+ Example 5.1. Let {ξij}i,j∈N be i.i.d. random variables with E(ξ11) = 0 and var(ξ11) = 1. In particular, we
1195
+ consider two cases for (ξij); they are drawn from the standardized t5 distribution and the standardized χ2
1196
+ 5
1197
+ distribution, respectively. For some ϑ ∈ (0, 1), we generate
1198
+ Vi = √(1 − ϑ) × ξi + √ϑ × (ξi0, ξi0, . . . , ξi0)⊤, i ∈ N.
1199
+ We shall apply a Gaussian multiplier bootstrap approach to implement our proposed test. The procedure
1200
+ is as follows.
1201
+ 1. Compute the residual matrix �V = (�V1, . . . , �Vn)⊤ = ¯P1Y . Generate i.i.d. N(0, 1) random variables
1202
+ {ωij}i,j∈N and compute the bootstrap residuals V ⋆ = (V ⋆
1203
+ 1 , . . . , V ⋆
1204
+ n )⊤, where
1205
+ V ⋆
1206
+ i =
1207
+ 1
1208
+ √(n − p)
1209
+ n
1210
+
1211
+ j=1
1212
+ ωij �Vi (i = 1, . . . , n).
1213
+ 2. Use V ⋆ to compute �ς⋆
1214
+ A and the bootstrap test statistic U ⋆
1215
+ n = tr(V ⋆⊤PθV ⋆).
1216
+ 3. Repeat the first two steps independently B times and collect U ⋆
1217
+ nk and �ς⋆
1218
+ Ak, k = 1, . . . , B.
1219
+ 4. Let �c1−α be the (1 − α)th quantile of {U ⋆
1220
+ nk/(�ς⋆
1221
+ Ak|Pθ|F
1222
+ √2)}k=1,...,B. The our test is
1223
+ ΦB = I
1224
+
1225
+ Un
1226
+ �ςA|Pθ|F
1227
+ √2 > �c1−α
1228
+
1229
+ ,
1230
+ (5.1)
1231
+ and we shall reject the null hypothesis whenever ΦB = 1.
1232
+ Similar to Gn, U ⋆
1233
+ n is a quadratic functional of i.i.d. Gaussian random vectors conditional on {X, Y } and
1234
+ is distributed as a linear combination of independent chi-squared random variables. To justify the validity
1235
+ of the proposed Gaussian multiplier bootstrap approach, it suffices to bound the distance between the
1236
+ distribution functions of these two quadratic functionals, which can be established by verifying the normalized
1237
+ consistency (Xu et al., 2014) of the corresponding covariance matrix. However, this can be highly non-trivial
1238
+ in the high dimensional setting and is beyond the scope of current paper. Hence we leave it for future work.
1239
+ In our simulation, we set the bootstrap size B = 1000. As comparison, we also perform the test suggested
1240
+ in (4.2) based on the central limit theorem and the one proposed in Srivastava and Kubokawa (2013) which
1241
+ we denote by SK. For each test, we report the empirical size based on 2000 replications as displayed in
1242
+ Table 1 and Table 2. The results suggest that our proposed test by using the bootstrap procedure provides
1243
+ the best size accuracy in general as the empirical sizes are close to the nominal level α.
1244
+ For Example 4.1, both of the test by CLT and our Gaussian multiplier bootstrap method have better
1245
+ performance than the SK test since the latter is too conservative as d is large.
1246
+ As expected from our
1247
+ theoretical results, normal approximation can work reasonably well in this design.
1248
+ For Example 5.1, the Gaussian multiplier bootstrap method outperforms other two procedures in size
1249
+ accuracy for all cases. The SK test suffers from size distortion. The test by CLT inflates the size more than
1250
+ 15
1251
+
1252
+ Table 1: Empirical sizes for Example 4.1 with α = 0.05
1253
+ θ = 0.3
1254
+ θ = 0.7
1255
+ n
1256
+ d
1257
+ CLT
1258
+ GMB
1259
+ SK
1260
+ CLT
1261
+ GMB
1262
+ SK
1263
+ 100
1264
+ 400
1265
+ 0.057
1266
+ 0.047
1267
+ 0.041
1268
+ 0.059
1269
+ 0.051
1270
+ 0.036
1271
+ 800
1272
+ 0.049
1273
+ 0.045
1274
+ 0.033
1275
+ 0.063
1276
+ 0.056
1277
+ 0.026
1278
+ 1200
1279
+ 0.062
1280
+ 0.055
1281
+ 0.021
1282
+ 0.048
1283
+ 0.045
1284
+ 0.028
1285
+ 200
1286
+ 400
1287
+ 0.056
1288
+ 0.052
1289
+ 0.042
1290
+ 0.052
1291
+ 0.047
1292
+ 0.037
1293
+ 800
1294
+ 0.052
1295
+ 0.049
1296
+ 0.037
1297
+ 0.053
1298
+ 0.050
1299
+ 0.033
1300
+ 1200
1301
+ 0.045
1302
+ 0.044
1303
+ 0.029
1304
+ 0.050
1305
+ 0.046
1306
+ 0.035
1307
+ Table 2: Empirical sizes for Example 5.1 with α = 0.05
1308
+ t5
1309
+ χ2
1310
+ 5
1311
+ θ
1312
+ n
1313
+ d
1314
+ CLT
1315
+ GMB
1316
+ SK
1317
+ CLT
1318
+ GMB
1319
+ SK
1320
+ 0.3
1321
+ 100
1322
+ 400
1323
+ 0.068
1324
+ 0.058
1325
+ 0.023
1326
+ 0.083
1327
+ 0.065
1328
+ 0.036
1329
+ 800
1330
+ 0.082
1331
+ 0.066
1332
+ 0.023
1333
+ 0.074
1334
+ 0.058
1335
+ 0.016
1336
+ 1200
1337
+ 0.082
1338
+ 0.068
1339
+ 0.015
1340
+ 0.067
1341
+ 0.053
1342
+ 0.011
1343
+ 200
1344
+ 400
1345
+ 0.073
1346
+ 0.059
1347
+ 0.022
1348
+ 0.067
1349
+ 0.054
1350
+ 0.018
1351
+ 800
1352
+ 0.071
1353
+ 0.057
1354
+ 0.012
1355
+ 0.074
1356
+ 0.058
1357
+ 0.014
1358
+ 1200
1359
+ 0.076
1360
+ 0.059
1361
+ 0.011
1362
+ 0.077
1363
+ 0.058
1364
+ 0.011
1365
+ 0.7
1366
+ 100
1367
+ 400
1368
+ 0.074
1369
+ 0.055
1370
+ 0.002
1371
+ 0.082
1372
+ 0.062
1373
+ 0.002
1374
+ 800
1375
+ 0.084
1376
+ 0.066
1377
+ 0.001
1378
+ 0.085
1379
+ 0.071
1380
+ 0.000
1381
+ 1200
1382
+ 0.073
1383
+ 0.057
1384
+ 0.000
1385
+ 0.076
1386
+ 0.062
1387
+ 0.001
1388
+ 200
1389
+ 400
1390
+ 0.083
1391
+ 0.067
1392
+ 0.001
1393
+ 0.080
1394
+ 0.064
1395
+ 0.000
1396
+ 800
1397
+ 0.068
1398
+ 0.050
1399
+ 0.000
1400
+ 0.075
1401
+ 0.062
1402
+ 0.000
1403
+ 1200
1404
+ 0.070
1405
+ 0.051
1406
+ 0.001
1407
+ 0.074
1408
+ 0.056
1409
+ 0.000
1410
+ 16
1411
+
1412
+ the GMB method, which can be explained by the fact that condition (3.1) does not hold and the CLT for
1413
+ Un fails. More specifically, for both θ = 0.3 and θ = 0.7, elementary calculations show that λ1(Σ)/ς → 1.
1414
+ As a result, (2.16) is violated as m = 10; see also the comment at the end of Section 2.2 for discussion on
1415
+ the non-normality of Un. To have more insight, we display in Figure 2 the density plots of Un/√var(Un) for
1416
+ n = 100 as well as the density of N(0, 1). As we can see from the plots, the distribution of Un/√var(Un) is
1417
+ skewed to the right for all cases, which explains the inflated sizes of the CLT test.
1418
+ More simulation studies on power comparison of these three tests are conducted in Section 7.1.
1419
+ Figure 2: Density plots of Un/√var(Un) and N(0, 1)
1420
+ −4
1421
+ −2
1422
+ 0
1423
+ 2
1424
+ 4
1425
+ 0.0
1426
+ 0.1
1427
+ 0.2
1428
+ 0.3
1429
+ 0.4
1430
+ 0.5
1431
+ Density
1432
+ d=400
1433
+ d=800
1434
+ d=1200
1435
+ Normal
1436
+ −4
1437
+ −2
1438
+ 0
1439
+ 2
1440
+ 4
1441
+ 0.0
1442
+ 0.1
1443
+ 0.2
1444
+ 0.3
1445
+ 0.4
1446
+ 0.5
1447
+ −4
1448
+ −2
1449
+ 0
1450
+ 2
1451
+ 4
1452
+ 0.0
1453
+ 0.1
1454
+ 0.2
1455
+ 0.3
1456
+ 0.4
1457
+ 0.5
1458
+ x
1459
+ Density
1460
+ −4
1461
+ −2
1462
+ 0
1463
+ 2
1464
+ 4
1465
+ 0.0
1466
+ 0.1
1467
+ 0.2
1468
+ 0.3
1469
+ 0.4
1470
+ 0.5
1471
+ x
1472
+ density
1473
+ 6
1474
+ Data analysis
1475
+ We apply the proposed method to two data sets. Our first dataset came from a study of the impact of the
1476
+ gut microbiome on host serum metabolome and insulin sensitivity in non-diabetic Danish adults (Pedersen
1477
+ et al., 2016). It consists of measurements of 1201 metabolites (325 serum polar metabolites and 876 serum
1478
+ molecular lipids) on 289 serum samples using mass spectrometry.
1479
+ The cleaned dataset was downloaded
1480
+ from https://bitbucket.org/hellekp/clinical-micro-meta-integration (Pedersen et al., 2018). We use this data
1481
+ set to identify insulin resistance (IR)-associated metabolites. IR was estimated by the homeostatic model
1482
+ assessment (Pedersen et al., 2016). Body mass index (BMI) is a confounder for this dataset since it is highly
1483
+ correlated with IR (Spearman’s ρ = 0.67) and is known to affect the serum metabolome. Two samples
1484
+ without IR measurement were excluded. For metabolites with zero measurements, zeros were replaced by
1485
+ half of the minimal nonzero value. Log transformation was performed to make the data more symmetrically
1486
+ distributed before analysis. The p-values associated with the three methods (GLT, GMB, and SK) are all
1487
+ 17
1488
+
1489
+ very close to zero, indicating a strong dependence between metabolites and IR. We further perform a linear
1490
+ regression analysis on each metabolite using IR and BMI as the covariates. Figure 3 (left panel) presents
1491
+ the histogram of the p-values on testing the significance of the coefficients associated with IR. We see a
1492
+ high peak close to zero, which provides strong evidence on the association between metabolites and IR. We
1493
+ further apply the Holm–Bonferroni procedure to the p-values to control the family-wise error rate at the 5%
1494
+ level, resulting in 164 discoveries.
1495
+ Our second dataset is from the study of the smoking effect on the human upper respiratory tract (Charlson
1496
+ et al., 2010). The original data set contains samples from both throat and nose microbiomes and both body
1497
+ sides. Here we focus on the throat microbiome of the left body side, which includes 60 subjects consisting of 32
1498
+ nonsmokers and 28 smokers. More precisely, the data set is presented as a 60×856 abundance table recording
1499
+ the frequencies of detected operational taxonomic units (OTUs) in the samples using the 16S metagenomics
1500
+ approach, together with a metadata table capturing the sample-level information, including the smoking
1501
+ status and sex. We transform the OTU abundance using center log-ratio (CLR) transformation after adding
1502
+ a pseudo-count of 0.5 to the zero counts. Our goal is to test the association of throat microbiomes with
1503
+ smoking status adjusting for sex. The proposed method using either the normal approximation or bootstrap
1504
+ approximation detects a strong association between the throat microbiomes with smoking status. In contrast,
1505
+ the SK method fails to discover the association.
1506
+ We further perform an OTU-wise linear regression analysis using each OTU (after the CLR transfor-
1507
+ mation) as the response and the smoking status and sex as covariates. Figure 3 (right panel) presents the
1508
+ histogram of the p-values for testing the association between each OTU and smoking status after adjusting
1509
+ sex in each linear regression. Interestingly, adjusting the multiplicity using either the Holm–Bonferroni pro-
1510
+ cedure or the BH procedure at the 5% level gives zero discovery (Zhou et al., 2021). These results suggest
1511
+ that the association between individual OTU and smoking status is weak. However, after aggregating the
1512
+ weak effects from all the OTUs, the combined effect is strong enough to be detected by the proposed method.
1513
+ Table 3: P-values of the three methods applying to the metabolomics and microbiome data sets.
1514
+ Metabolomics
1515
+ Microbiome
1516
+ CLT
1517
+ GMB
1518
+ SK
1519
+ CLT
1520
+ GMB
1521
+ SK
1522
+ p-value
1523
+ 0.00
1524
+ 0.00
1525
+ 0.00
1526
+ 9.7 × 10−6
1527
+ 0.002
1528
+ 0.13
1529
+ References
1530
+ Michael G Akritas and Steven F Arnold. Fully nonparametric hypotheses for factorial designs I: Multivariate
1531
+ repeated measures designs. J. Amer. Statist. Assoc., 89(425):336–343, 1994. 11
1532
+ T. W. Anderson.
1533
+ An introduction to multivariate statistical analysis.
1534
+ Wiley Series in Probability and
1535
+ Statistics. 2003. 2
1536
+ Zhidong Bai and Hewa Saranadasa. Effect of high dimension: by an example of a two sample problem.
1537
+ Statist. Sinica, 6(2):311–329, 1996. 2, 9, 13
1538
+ 18
1539
+
1540
+ Figure 3: Histograms of the p-values for testing the association between individual omics feature and the
1541
+ variable of interest after adjusting for the confounder.
1542
+ Metabolomics
1543
+ Microbiome
1544
+ 0.00
1545
+ 0.25
1546
+ 0.50
1547
+ 0.75
1548
+ 1.00
1549
+ 0.00
1550
+ 0.25
1551
+ 0.50
1552
+ 0.75
1553
+ 1.00
1554
+ 0.0%
1555
+ 5.0%
1556
+ 10.0%
1557
+ 15.0%
1558
+ pvalue
1559
+ count/sum(count)
1560
+ Edgar Brunner and Madan L. Puri. Nonparametric methods in factorial designs. Statist. Papers, 42(1):1–52,
1561
+ 2001. 11
1562
+ T. Tony Cai and Zongming Ma.
1563
+ Optimal hypothesis testing for high dimensional covariance matrices.
1564
+ Bernoulli, 19(5B):2359–2388, 2013. 9
1565
+ T. Tony Cai and Yin Xia. High-dimensional sparse MANOVA. J. Multivariate Anal., 131:174–196, 2014. 2
1566
+ Emily S Charlson, Jun Chen, Rebecca Custers-Allen, Kyle Bittinger, Hongzhe Li, Rohini Sinha, Jennifer
1567
+ Hwang, Frederic D Bushman, and Ronald G Collman. Disordered microbial communities in the upper
1568
+ respiratory tract of cigarette smokers. PloS one, 5(12):e15216, 2010. 18
1569
+ Song Xi Chen and Ying-Li Qin. A two-sample test for high-dimensional data with applications to gene-set
1570
+ testing. Ann. Statist., 38(2):808–835, 2010. 2, 9, 10
1571
+ Xiaohui Chen. Gaussian and bootstrap approximations for high-dimensional U-statistics and their applica-
1572
+ tions. Ann. Statist., 46(2):642–678, 2018. 3
1573
+ Jianqing Fan, Shaojun Guo, and Ning Hao. Variance estimation using refitted cross-validation in ultrahigh
1574
+ dimensional regression. J. R. Stat. Soc. Ser. B. Stat. Methodol., 74(1):37–65, 2012. 13
1575
+ F. G¨otze and A. Tikhomirov. Asymptotic distribution of quadratic forms and applications. J. Theoret.
1576
+ Probab., 15(2):423–475, 2002. 3
1577
+ F. G¨otze and A. N. Tikhomirov. Asymptotic distribution of quadratic forms. Ann. Probab., 27(2):1072–1098,
1578
+ 1999. 3
1579
+ 19
1580
+
1581
+ Arthur Gretton, Kenji Fukumizu, and Bharath K Sriperumbudur. Discussion of: Brownian distance covari-
1582
+ ance. Ann. Appl. Stat., 3(4):1285–1294, 2009. 12
1583
+ Arthur Gretton, Karsten M. Borgwardt, Malte J. Rasch, Bernhard Sch¨olkopf, and Alexander Smola. A
1584
+ kernel two-sample test. J. Mach. Learn. Res., 13:723–773, 2012. 12
1585
+ Yinqiu He, Bo Meng, Zhenghao Zeng, and Gongjun Xu. On the phase transition of wilks’ phenomenon.
1586
+ Biometrika, 108(3):741–748, 2021. 5
1587
+ Jiang Hu, Zhidong Bai, Chen Wang, and Wei Wang. On testing the equality of high dimensional mean
1588
+ vectors with unequal covariance matrices. Ann. Inst. Statist. Math., 69(2):365–387, 2017. 2
1589
+ Peter J. Huber. Robust regression: asymptotics, conjectures and Monte Carlo. Ann. Statist., 1:799–821,
1590
+ 1973. 8
1591
+ William H Kruskal and W Allen Wallis. Use of ranks in one-criterion variance analysis. J. Amer. Statist.
1592
+ Assoc., 47(260):583–621, 1952. 11
1593
+ Soumendra Nath Lahiri. Bootstrapping M-estimators of a multiple linear regression parameter. Ann. Statist.,
1594
+ 20(3):1548–1570, 1992. 8
1595
+ Huiqin Li, Jiang Hu, Zhidong Bai, Yanqing Yin, and Kexin Zou. Test on the linear combinations of mean
1596
+ vectors in high-dimensional data. TEST, 26(1):188–208, 2017. 2
1597
+ Enno Mammen. Asymptotics with increasing dimension for robust regression with applications to the boot-
1598
+ strap. Ann. Statist., 17(1):382–400, 1989. 8
1599
+ William Navidi. Edgeworth expansions for bootstrapping regression models. Ann. Statist., 17(4):1472–1478,
1600
+ 1989. 8
1601
+ Helle Krogh Pedersen, Valborg Gudmundsdottir, Henrik Bjørn Nielsen, Tuulia Hyotylainen, Trine Nielsen,
1602
+ Benjamin AH Jensen, Kristoffer Forslund, Falk Hildebrand, Edi Prifti, Gwen Falony, et al. Human gut
1603
+ microbes impact host serum metabolome and insulin sensitivity. Nature, 535(7612):376–381, 2016. 17
1604
+ Helle Krogh Pedersen, Sofia K Forslund, Valborg Gudmundsdottir, Anders Østergaard Petersen, Falk Hilde-
1605
+ brand, Tuulia Hy¨otyl¨ainen, Trine Nielsen, Torben Hansen, Peer Bork, S Dusko Ehrlich, et al. A com-
1606
+ putational framework to integrate high-throughput ‘-omics’ datasets for the identification of potential
1607
+ mechanistic links. Nature protocols, 13(12):2781–2800, 2018. 17
1608
+ Stephen Portnoy. Asymptotic behavior of M estimators of p regression parameters when p2/n is large. II.
1609
+ Normal approximation. Ann. Statist., 13(4):1403–1417, 1985. 8
1610
+ Maria L. Rizzo and G´abor J. Sz´ekely. DISCO analysis: a nonparametric extension of analysis of variance.
1611
+ Ann. Appl. Stat., 4(2):1034–1055, 2010. 11, 12
1612
+ James R. Schott.
1613
+ Some high-dimensional tests for a one-way MANOVA.
1614
+ J. Multivariate Anal., 98(9):
1615
+ 1825–1839, 2007. 2
1616
+ 20
1617
+
1618
+ Jun Shao. On resampling methods for variance and bias estimation in linear models. Ann. Statist., 16(3):
1619
+ 986–1008, 1988. 8
1620
+ Jun Shao and C.-F. J. Wu. Heteroscedasticity-robustness of jackknife variance estimators in linear models.
1621
+ Ann. Statist., 15(4):1563–1579, 1987. 8
1622
+ Muni S. Srivastava and Yasunori Fujikoshi. Multivariate analysis of variance with fewer observations than
1623
+ the dimension. J. Multivariate Anal., 97(9):1927–1940, 2006. 13
1624
+ Muni S. Srivastava and Tatsuya Kubokawa. Tests for multivariate analysis of variance in high dimension
1625
+ under non-normality. J. Multivariate Anal., 115:204–216, 2013. 15
1626
+ Muni S. Srivastava, Shota Katayama, and Yutaka Kano. A two sample test in high dimensional data. J.
1627
+ Multivariate Anal., 114:349–358, 2013. 2
1628
+ G´abor J. Sz´ekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring and testing dependence by correlation of
1629
+ distances. Ann. Statist., 35(6):2769–2794, 2007. 12
1630
+ Olivier Thas. Comparing distributions. Springer Series in Statistics. Springer, New York, 2010. 11
1631
+ Lan Wang, Bo Peng, and Runze Li. A high-dimensional nonparametric multivariate test for mean vector.
1632
+ J. Amer. Statist. Assoc., 110(512):1658–1669, 2015. 14
1633
+ Jennifer Wessel and Nicholas J Schork. Generalized genomic distance–based regression methodology for
1634
+ multilocus association analysis. The American Journal of Human Genetics, 79(5):792–806, 2006. 2
1635
+ C.-F. J. Wu. Jackknife, bootstrap and other resampling methods in regression analysis. Ann. Statist., 14
1636
+ (4):1261–1350, 1986. With discussion and a rejoinder by the author. 8
1637
+ Mengyu Xu, Danna Zhang, and Wei Biao Wu. L2 asymptotics for high-dimensional data. arXiv preprint
1638
+ arXiv:1405.7244, 2014. 15
1639
+ Mengyu Xu, Danna Zhang, and Wei Biao Wu. Pearson’s chi-squared statistics: approximation theory and
1640
+ beyond. Biometrika, 106(3):716–723, 2019. 5
1641
+ Shun Yao, Xianyang Zhang, and Xiaofeng Shao. Testing mutual independence in high dimension via distance
1642
+ covariance. J. R. Stat. Soc. Ser. B. Stat. Methodol., 80(3):455–480, 2018. 9
1643
+ Matthew A. Zapala and Nicholas J. Schork. Multivariate regression analysis of distance matrices for testing
1644
+ associations between gene expression patterns and related variables. Proceedings of the National Academy
1645
+ of Sciences, 103(51):19430–19435, 2006. 2
1646
+ Matthew A Zapala and Nicholas J Schork. Statistical properties of multivariate distance matrix regression
1647
+ for high-dimensional data analysis. Frontiers in genetics, 3:190, 2012. 2
1648
+ Jin-Ting Zhang, Jia Guo, and Bu Zhou. Linear hypothesis testing in high-dimensional one-way MANOVA.
1649
+ J. Multivariate Anal., 155:200–216, 2017. 2
1650
+ 21
1651
+
1652
+ Xianyang Zhang, Shun Yao, and Xiaofeng Shao. Conditional mean and quantile dependence testing in high
1653
+ dimension. Ann. Statist., 46(1):219–246, 2018. 9
1654
+ Bu Zhou, Jia Guo, and Jin-Ting Zhang.
1655
+ High-dimensional general linear hypothesis testing under het-
1656
+ eroscedasticity. J. Statist. Plann. Inference, 188:36–54, 2017. 2
1657
+ Huijuan Zhou, Kejun He, Jun Chen, and Xianyang Zhang. Linda: Linear models for di���erential abundance
1658
+ analysis of microbiome compositional data. arXiv preprint arXiv:2104.00242, 2021. 18
1659
+ 22
1660
+
69E2T4oBgHgl3EQf7ggl/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
6dE4T4oBgHgl3EQfBwu-/content/tmp_files/2301.04855v1.pdf.txt ADDED
@@ -0,0 +1,1475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Estimation of thermal load on the nozzle base plate from small
2
+ plumes at high temperature
3
+ Kamal Khemani1, Pradeep Kumar1*, Ganesh Natarajan2
4
+ 1 Numerical Experiment Laboratory (Radiation & Fluid Flow Physics)
5
+ Indian Institute of Technology Mandi, Himachal Pradesh, 175075, India
6
+ 2 Discipline of Mechanical Engineering,
7
+ Indian Institute of Technology Palakkad, Palakkad, Kerala, 678557, India
8
+ Abstract
9
+ A numerical study is performed to estimate thermal load on the nozzle base plate, which is in the
10
+ upstream direction to the flow, from three hot plumes of pure (CO2), (H2O) and 50-50 (%) composition of
11
+ (CO2) and (H2O) expanding through a convergent-divergent (CD) nozzle in a quiescent medium at 1.1 bar
12
+ pressure and 298K temperature. The base plate of the nozzle heats up due to thermal radiation, emitting
13
+ from the hot gases in the form of plumes. The spectral radiative properties of major participating gases such
14
+ as (CO2), (H2O) are calculated from HITEMP-2010 database. A small CD nozzle which is designed for the
15
+ perfect expansion of air by 1D calculation with nozzle throat diameter 1.98 mm and area ratio 1.5942, is
16
+ considered as the design of nozzle for present study [1]. All three plumes are in the under-expanded state for
17
+ this CD nozzle and hence expands rapidly at supersonic speed as the plumes exit from the nozzle and forms
18
+ a series of expansion and compression waves. The hot plumes emanating from the nozzle develop very high
19
+ temperature in a small vicinity around the base plate, due to diffusion and develop very high temperature
20
+ on the base plate. Barring this region, the maximum amount of radiative flux on base plate for these three
21
+ plumes, i.e., CO2 plume, mixture plume and H2O plume are 4000 W/m2, 2300 W/m2 and 1300 W/m2,
22
+ respectively and the maximum temperature developed due to these corresponding fluxes are 323 K, 312 K
23
+ and 308 K, respectively.
24
+ Keywords: Compressible flow, gas radiation, thermal load, underexpanded
25
+ URL: [email protected] (Pradeep Kumar1*)
26
+ arXiv:2301.04855v1 [physics.comp-ph] 12 Jan 2023
27
+
28
+ NOMENCLATURE
29
+ English Symbols
30
+ c1, c2
31
+ First and second radiation constants
32
+ cp
33
+ Specific heat at constant pressure
34
+ e
35
+ Internal energy
36
+ h
37
+ Enthalpy
38
+ k
39
+ Thermal conductivity, turbulent kinetic energy
40
+ ˆn
41
+ Unit normal vector
42
+ p
43
+ Pressure
44
+ q
45
+ Heat flux
46
+ s
47
+ Direction vector
48
+ t
49
+ Time
50
+ u
51
+ Velocity
52
+ x
53
+ Cartesian coordinate coordinate
54
+ Ar
55
+ Area ratio
56
+
57
+ Spectral intensity
58
+ Ibη
59
+ Planck function
60
+ R
61
+ Universal gas constant
62
+ Y
63
+ Species mass-fraction
64
+ Greek Symbols
65
+ 2
66
+
67
+ βη
68
+ Spectral extinction coefficient
69
+ ϵ
70
+ Emissivity, turbulent dissipation rate
71
+ η
72
+ Wavenumber
73
+ κη
74
+ Spectral absorption coefficient
75
+ µ
76
+ Dynamic viscosity
77
+ ∇ · q
78
+ Divergence of radiative heat flux
79
+
80
+ Solid angle
81
+ φ
82
+ Azimuthal angle
83
+ Φ
84
+ Scattering phase function
85
+ ρ
86
+ Density of fluid
87
+ σsη
88
+ Spectral scattering coefficient
89
+ θ
90
+ Polar angle
91
+ τ
92
+ Viscous stress tensor, transmissivity of gas, optical thickness
93
+ Subscript
94
+ b
95
+ Blackbody
96
+ c
97
+ Conduction
98
+ cv
99
+ Convection
100
+ eff
101
+ Effective
102
+ η
103
+ Spectral
104
+ g
105
+ Gas
106
+ k
107
+ Turbulent kinetic energy
108
+ r
109
+ Radiation
110
+ t
111
+ Turbulent, total
112
+ w
113
+ Wall
114
+ 3
115
+
116
+ 1. Introduction
117
+ The exhaust plume from the nozzle is a product of high temperature and high pressure gases exiting from
118
+ the combustion chamber. These gases expand rapidly in the convergent divergent (CD) nozzle at supersonic
119
+ velocities because of the conversion of thermal energy into kinetic energy, which generates the thrust to lift
120
+ off the rocket. The structure of the plume is non uniform, containing different flow regimes and supersonic
121
+ shock patterns. It appear as bright luminous flame which emits radiation in the visible, ultraviolet (UV)
122
+ and infrared (IR) parts of the electromagnetic spectrum [2]. The major part of plume radiation comes from
123
+ participating gases like CO2, CO and H2O which show strong emission of thermal radiation in the infrared
124
+ region of the spectrum [3]. This heats up the base plate of the rocket and becomes the source of tracking by
125
+ enemies in the case of missiles, fighter jets and combat aircrafts.
126
+ Tien and Abu-Romia [4] used analytical method to estimated the amount of radiative heat flux on the
127
+ rocket base plate from exhaust CO2 and H2O gas plume with idealised physical models. They evaluated
128
+ apparent emissivity at base plate from semi infinite cylinder shape for H2O gas plume for a temperature
129
+ of 2000oR, pressure 1 atm and CO2 gas plume for a temperature of 2500oR. Nelson [5] used backward
130
+ Monte Carlo method to estimate radiative heat flux on rocket base plate from exhaust plume. They further
131
+ studied the effect of cone angle of exhaust plume and scattering albedo on the base plate heating from plume.
132
+ The increase in cone angle increased the heat flux on the base plate whereas increase of albedo decreased
133
+ the heat flux. However, increase in albedo increased the searchlight emission from plume. Baek and Kim
134
+ [6] calculated the heat load on the base plate from both exhaust plume and searchlight emission from the
135
+ particles. They used finite volume method to solve radiative transfer equation. Tan et al. [7] conducted a
136
+ study in which they changed the temperature distribution of plume from isothermal to non-isothermal and
137
+ concluded that the thermal load on thebase plate reduced 2-3 times for non-isothermal plume. They also
138
+ observed that by increasing optical thickness of medium the amount of radiative flux on the wall increased.
139
+ Everson and Nelson [8] developed reverse Monte Carlo method to predict base plate heating from plume
140
+ due to radiation and found that, reverse Monte Carlo was computationally more efficient than forward
141
+ Monte Carlo method. This was owing to the fact that only the rays that strikes the target point was only
142
+ considered. For calculations they used band models for gas spectrum and Henyey-Greenstein function for
143
+ particle scattering. They performed reverse Monte Carlo calculations for four different cases which included
144
+ pure scattering plume, gas only emission for main engine plume, solid rocket motor plume and a plume with
145
+ non-uniform temperature which absorbs, emits and scatters, and finally found that majority of emission
146
+ is due to alumina particles coming from the centre. While, H2O and Al2O2 emitted radiation from the
147
+ 4
148
+
149
+ center of the plume and moreover major contribution of emission came from Al2O3 particles. Kumar and
150
+ Ramamurthy [9] estimated radiative heat load on the rocket base plate using forward Monte-Carlo technique
151
+ for gray conical plume with axial and radial temperature variations. They found that the radiative heat
152
+ flux changed drastically with the change in radial temperature profile also the amount of radiative heat flux
153
+ decreased with the increase in altitude as plume cools down faster. Similar arguments were given by Gu and
154
+ Baek [10] as they examined radiative heat flux from WSGGM method for a solid rocket motor from which
155
+ the thermal load was estimated by long plumes of 5 and 10 km.
156
+ Accurate modelling of heat transfer due to radiation is very necessary for safe and efficient designing of
157
+ rocket. Estimation of radiative properties of gases is crucial and the most important part in determining
158
+ heat transfer due to radiation accurately. The radiative properties of participating gases can be calculated
159
+ using some of the most popular spectral database like High Resolution Transmission Spectroscopic Molecular
160
+ Absorption database (HITRAN) [11], Carbon-Dioxide Spectroscopic Database (CDSD) [12], High Temperature
161
+ spectroscopic absorption parameter (HITEMP) [13] etc.
162
+ The spectral absorption coefficients are highly
163
+ erratic in nature containing millions of spectral lines which attain same value multiple times. This unnecessarily
164
+ increases the computational cost required to solve the radiation transfer equation (RTE) as the line-by-line
165
+ method considers calculation for each and every line on the spectrum and is therefore, mostly used only for
166
+ benchmarking purposes [14].
167
+ Many methods are proposed to reduce the computation resource requirements such as Full spectrum
168
+ scaled and correlated k-Distribution (FSSK/FSCK) [14], Lookup based Full spectrum K-Distribution [15],
169
+ Spectral line weight sum of gray gases [16] etc. The accuracy of the above methods is well demonstrated for
170
+ uniform composition of gases [17, 18], however, the variation in composition of gaseous and their mixture
171
+ poses another level of challenge and further modelling is required [19]. In order to use look up table based
172
+ FSK method, some interpolation techniques should be adopted for the properties for current thermodynamic
173
+ states of gases in the domain. It is evident from the above literature that only a few work is available to
174
+ calculate the heat load on the rocket base plate, that to with fixed conical plume shape and radiative
175
+ properties of gases. The general heat transfer applications like, combustion, rocket propulsion, gasification
176
+ contain numerous thermodynamic states, thus it is useful to generate a database for absorption coefficient
177
+ at different temperatures, pressures and mole-fractions. The present case is optically thin thus, the RTE
178
+ is solved using the Planck mean absorption coefficient at different thermodynamic states, from look-up
179
+ table. The thermal load on the nozzle base plate has been calculated from the accurate solution of flow
180
+ and temperature fields by solving complete set of governing equation. The radiative property is obtained
181
+ 5
182
+
183
+ from the HITEMP-2010 database, stored in the form of lookup table for range of thermodynamic states
184
+ of gases and utilized during the solution of radiative transfer equation.
185
+ The thermodynamic states for
186
+ which data is available can directly be used. Further, the Planck mean absorption coefficient for unavailable
187
+ thermodynamic states can easily be calculated by using multidimensional linear interpolation technique.
188
+ The fvDOM numerical method is used for solution of RTE coupled with fluid flow using a pressure based
189
+ compressible flow application sonicRadFoam, modified from sonicFoam application of OpenFOAM [20].
190
+ Finally it includes the work done due to viscous forces, species transfer equation and RTE with Planck mean
191
+ absorption-emission model.
192
+ The manuscript is organised as section 2 describing the problem statement, and section 3 describing the
193
+ mathematical models and governing differential equations followed by validation in section 4, results and
194
+ discussions in section 5, and finally the present work is concluded in section 6.
195
+ 2. Problem description
196
+ The convergent-divergent (CD) nozzle has throat diameter and an area-ratio of 1.98 mm and 1.5942,
197
+ respectively, and the length of convergent and divergent section is 7 mm and 14 mm, respectively as shown
198
+ in Fig. 1 which also include the buffer zone for emanating the jet in the atmosphere. The base plate is
199
+ attached at the end and the fluid expands from a stagnation pressure and temperature of 7.11 bar and 2000
200
+ K, respectively, to a quiescent medium at the atmospheric condition of 1 atm pressure and 298K. The present
201
+ CD nozzle designed for perfect expansion of air by one dimensional calculation, has been considered for the
202
+ flow of three plumes whose constituents are pure CO2, pure water vapour and 50-50(%) CO2 and H2O from
203
+ above pressure and temperature. Initially whole domain is filled with N2 gas at 1 atm pressure and 298 K
204
+ temperature. The following assumptions have been considered for in the present study.
205
+ 1. Reynolds-averaged Navier-Stokes assumption is used to model turbulent flow.
206
+ 2. The participating medium only absorbs or emits the thermal radiation but does not scatters.
207
+ 3. Refractive index of medium and walls are equal to one.
208
+ 4. Turbulence radiation interaction is neglected.
209
+ 5. Constant turbulent Prandtl number assumption has been used in the present study:
210
+ 6
211
+
212
+ Figure 1: Schematic diagram of geometry for the calculation of the thermal load on the nozzle base plate from the hot plume
213
+ 2.1. Governing equations
214
+ The density and temperature fluctuations must be accounted for compressible flow of a fluid along with
215
+ velocity and pressure fluctuations. To account for these factors, the mass based averaging commonly known
216
+ as Favre averaging [21, 22], is used to describe the flow and energy transfer for compressible turbulent fluids.
217
+ which is defined as,
218
+ �φ = ρφ
219
+ ρ
220
+ (1)
221
+ where, ρ is the density of fluid. φ is a scalar and the averaging of density is defined below,
222
+ ρ = 1
223
+ T
224
+ � T
225
+ 0
226
+ ρ dT
227
+ (2)
228
+ ∂ρ
229
+ ∂t + ∂ρ �ui
230
+ ∂xi
231
+ = 0
232
+ (3)
233
+ ∂ρ �ui
234
+ ∂t
235
+ + ∂ρ �ui �uj
236
+ ∂xj
237
+ = − ∂p
238
+ ∂xi
239
+ + ∂�
240
+ τij
241
+ ∂xj
242
+ (4)
243
+ 7
244
+
245
+ Outlet
246
+ BasePlate
247
+ ww
248
+ 5
249
+ Wall
250
+ 7mm
251
+ Inlet
252
+ Axis
253
+ 14mm
254
+ 7 mm
255
+ 28mmwhere,
256
+
257
+ τij = µeff
258
+ � ∂ �ui
259
+ ∂xj
260
+ + ∂ �uj
261
+ ∂xi
262
+ − 2
263
+ 3 δij
264
+ ∂�
265
+ uk
266
+ ∂xk
267
+
268
+ − 2
269
+ 3ρkδij
270
+ (5)
271
+ where, µeff is the effective dynamic viscosity of fluid which is the summation of molecular and turbulent
272
+ dynamic viscosity of fluid i.e (µ + µt) and the molecular viscosity of gases is given by Sutherland
273
+ µ = As T 3/2
274
+ T + Ts
275
+ (6)
276
+ As and Ts are Sutherland’s constants and depend on the type of gas and it’s molecules, and µt is the turbulent
277
+ viscosity which is calculated as,
278
+ µt = ρ Cµ
279
+ k2
280
+ ϵ
281
+ (7)
282
+ where k is turbulent kinetic energy and ϵ is turbulent dissipation rate and Cµ is the closure constant and
283
+ these are modelled by two equation (k.ϵ) turbulence model and given as
284
+ ∂ρκ
285
+ ∂t + ∂ρ �ujκ
286
+ ∂xj
287
+ =
288
+
289
+ ∂xi
290
+ ��
291
+ µ + µt
292
+ σκ
293
+ � ∂κ
294
+ ∂xi
295
+
296
+ + Pκ − ρϵ
297
+ (8)
298
+ where, k = 1
299
+ 2
300
+ �3
301
+ i=1
302
+ ρu′′
303
+ i u′′
304
+ i
305
+ ρ
306
+ is the turbulent kinetic energy, Pk is the production of kinetic energy.
307
+ ∂ρϵ
308
+ ∂t + ∂ρ �ujϵ
309
+ ∂xj
310
+ =
311
+
312
+ ∂xi
313
+ ��
314
+ µ + µt
315
+ σϵ
316
+ � ∂ϵ
317
+ ∂xi
318
+
319
+ + Cϵ1
320
+ ϵ
321
+ κPκ − Cϵ2ρϵ2
322
+ κ Pκ
323
+ (9)
324
+ where, ϵ = ν
325
+
326
+ ∂u′′
327
+ i ∂u′′
328
+ i
329
+ ∂xjxj
330
+ is the turbulent disspation rate and the value of closure constants are as below. Cµ =
331
+ 0.09, σk = 1, sigmaϵ = 1.3, Cϵ1 = 1.44, C2 = 1.92 The pressure is calculated from equation of state for ideal
332
+ gas law as,
333
+ p = ρR �T
334
+ (10)
335
+ where, R is universal gas constant and T is temperature. The distribution of species is calculated by species
336
+ transport equation as below
337
+ ∂ρi �Yi
338
+ ∂t
339
+ + ∂ρi �ui �Yi
340
+ ∂xi
341
+ =
342
+
343
+ ∂xi
344
+
345
+ −ρµeff
346
+ ∂ �Yi
347
+ ∂xi
348
+
349
+ (11)
350
+ where, Yi is species mass-fraction and is given as,
351
+ Yi = ρi
352
+ ρ
353
+ (12)
354
+ 8
355
+
356
+ The distribution of temperature field is calculated from the energy equation as below
357
+ ∂ρ �E
358
+ ∂t
359
+ + ∂ρ �uj �E
360
+ ∂xj
361
+ + ∂ �ujp
362
+ ∂xj
363
+ = − ∂ �qj
364
+ ∂xj
365
+ + ∂ �uj �
366
+ τij
367
+ ∂xj
368
+ (13)
369
+ where, E is the total energy which includes internal energy e, kinetic energy K and turbulent kinetic energy
370
+ k. The heat flux is defined as,
371
+ qj = −cpµeff
372
+ Pr
373
+ ∂T
374
+ ∂xi
375
+ + �qr
376
+ (14)
377
+ cp depends on temperature and are taken from JANAF table of thermodynamics and given as below,
378
+ cp = R((((a4T + a3)T + a2)T + a1)T + a0)
379
+ (15)
380
+ a0, a1, a2, a3, a4 are constants of polynomial,
381
+ qr =
382
+ � ∞
383
+ 0
384
+
385
+
386
+ Iη(ˆs) |ˆn · ˆs| dΩ dη
387
+ (16)
388
+ where qr is the radiative heat flux which can be calculated on the wall, ˆn is the surface normal vector,
389
+ ∂qr/∂xj is the divergence of radiative heat flux and can be calculated as,
390
+ ∇ · q =
391
+ � ∞
392
+ 0
393
+ κη
394
+
395
+ 4πIbη −
396
+
397
+
398
+ Iη dη
399
+
400
+
401
+ or
402
+ ∇ · q =
403
+ � ∞
404
+ 0
405
+ κη (4πIbη − Gη) dη
406
+ (17)
407
+ where η is the wavenumber, Ibη is the Planck function and κη is the spectral absorption coefficient, Gη is
408
+ spectral irradiation, Iη(ˆs) is the intensity field which is obtained by solving the radiative transfer equation
409
+ (RTE) as explained in the subsequent paragraph. The above equations are subject to boundary conditions
410
+ as given in table 1.
411
+ The intensity field in equation 17 is obtained by solving the spectral radiative transfer equation (s-RTE)
412
+ for absorbing emitting (not scattering) medium as,
413
+ dIη
414
+ ds = κηIbη − κηIη
415
+ (18)
416
+ 9
417
+
418
+ the above equation is subjected to boundary condition,
419
+ Iη(rw, ˆs) = ϵwηIbη(rw) + 1 − ϵwη
420
+ π
421
+
422
+ ˆn·ˆs>0
423
+ Iη(rw, ˆs) |ˆn · ˆs| dΩ
424
+ (ˆn · ˆs < 0)
425
+ (19)
426
+ where, ϵwη is the spectral wall emissivity, Iη is the spectral intensity along ˆsi, Ibη is the Planck function, κη
427
+ is the spectral absorption coefficient, η is the wavenumber, and Ω is the solid angle. The length scale of the
428
+ current problem is very small, i.e., the optical length τ = κηL << 1, this means that the absorptivity of
429
+ the medium is far less than 1, therefore, the most of the radiation energy will escape the medium without
430
+ getting absorbed. Thus, the radiative source term (Eq. 17)
431
+ � ∞
432
+ 0
433
+ κη4πIbηdη <<
434
+ � ∞
435
+ 0
436
+ κηGηdη
437
+ The radiative source term Eq. 17 becomes
438
+ ∇ · q =
439
+ � ∞
440
+ 0
441
+ κη4πIbηdη
442
+ � ∞
443
+ 0
444
+ Ibηdη
445
+ � ∞
446
+ 0
447
+ Ibηdη = 4κpσT 4
448
+ where, κp is the Planck mean absorption coefficient. Therefore, the solution for the present case can be
449
+ Table 1: Boundary conditions for plume with thermal radiation simulation
450
+ Fields
451
+ Inlet
452
+ Outlet
453
+ Wall
454
+ Pressure (p)
455
+ totalPressure
456
+ Po = P + 0.5 ρ U 2
457
+ Po = 7.11 bar
458
+ fixedValue
459
+ P=1 atm
460
+ zeroGradient
461
+ ∇P = 0
462
+ Velocity (U)
463
+ pressureInletOutletVelocity
464
+ Po = P + 0.5 ρ U 2
465
+ inflow: U = (0,0,0)
466
+ outflow: ∇U = 0
467
+ inletOutlet
468
+ inflow: U = (0,0,0)
469
+ outflow: ∇U = 0
470
+ noSlip
471
+ U = (0,0,0)
472
+ Temperature (T)
473
+ fixedValue T = 2000 K
474
+ zeroGradient
475
+ ∇T = 0
476
+ qc + qr = 0 [23]
477
+ Species (x)
478
+ fixedValue x = 1
479
+ for pure H2O plume
480
+ zeroGradient
481
+ ∇x = 0
482
+ zeroGradient
483
+ ∇x = 0
484
+ 10
485
+
486
+ obtained by Planck Mean absorption coefficient based radiation property model. Thus, the RTE becomes,
487
+ dIp
488
+ ds = κp · (Ib − Ip) ,
489
+ (20)
490
+ with boundary conditions,
491
+ Ip = ϵwIb + 1 − ϵw
492
+ π
493
+
494
+ ˆn·ˆs>0
495
+ Ip |ˆn · ˆs| dΩ
496
+ (ˆn · ˆs < 0)
497
+ (21)
498
+ The Planck mean absorption coefficients are calculated for the range of thermodynamic states of gases in
499
+ the certain intervals as mentioned in ([18]) and stored in the form of lookup table. Furthermore, interpolation
500
+ techniques are employed to calculate the absorption coefficient which are not available in the lookup table.
501
+ The radiative heat transfer, work done due to viscous forces and species transport models have been
502
+ added into the existing application ”sonicFOAM” of the OpenFOAM and named as ”radSonicFOAM”. The
503
+ algorithms of the new application is described below and has been extensively verified and validated as
504
+ explained in the subsequent section and finally, has been used for the estimating the thermal load on the
505
+ nozzle base plate.
506
+ 2.2. Numerical Procedure and solution algorithm for solving plume flow with radiation
507
+ The above mass, momentum, species, energy and radiation transfer equation are discretized using finite
508
+ volume method [24]. Further second order upwind scheme is used for the face value interpolation and final set
509
+ of algebraic equation is solved iteratively, by the SIMPLE algorithm till the residual for mass, momentum,
510
+ species, energy and radiation reaches to 10−5 level. The algorithm of above solution method is stated below,
511
+ 1. Initialize pressure, velocity, species and temperature field.
512
+ 2. Solve mass, momentum, species transport and energy equations without radiation till convergence.
513
+ 3. Using converged field, initialize intensity field.
514
+ 4. Calculate Planck mean absorption coefficient from the converged field of temperature, pressure and
515
+ mole-fraction of species using the Planck mean look-up table and solve RTE till convergence.
516
+ 5. Compute divergence of radiative heat flux.
517
+ 6. Update the temperature field with radiation sink term.
518
+ 11
519
+
520
+ 7. Repeat 2 to 6 until all the fields reach at steady state. furthermore, the flow diagram of the above
521
+ algorithm is shown in fig 2.
522
+ 3. Verification and validation studies
523
+ The above mathematical modelling and solution algorithm are verified in three steps
524
+ • The calculated radiative properties are verified.
525
+ • The incompressible flow solution is verified with the published result.
526
+ • The radiative heat flux on the base plate is verified from the assumed shape of the plume in the sections
527
+ below.
528
+ 3.1. Verification of Planck mean absorption coefficient of pure H2O and CO2
529
+ The Planck mean absorption coefficients obtained for H2O and CO2 for various temperatures from
530
+ HITEMP-2010 using in-house C++ code [25, 26, 27], match with good agreement from Chu et al. [28]as
531
+ in Figure 3.
532
+ The Planck mean absorption coefficient of H2O decreases exponentially with increase in
533
+ temperature, whereas it first increases up to a temperature of 750 K then decreases till 2000 K for CO2.
534
+ The Planck mean absorption coefficient of H2O is higher than CO2 at lower temperatures, however, this is
535
+ opposite for higher temperature. This difference, decreases with increase in temperatures of compressible
536
+ flow.
537
+ 3.2. Validation of compressible flow field
538
+ Darwish et al. [1] have designed a convergent divergent (C-D) nozzle using one dimensional flow isentropic
539
+ relations for perfect expansion conditions for air. The designed C-D nozzle has an exit diameter of 2.5 mm
540
+ and throat diameter of 1.98 mm, thus the area ratio Ar = 1.5942. The schematic diagram of C-D nozzle
541
+ with buffer section where flow eminates is shown in Fig. 1. They simulated the flow using OpenFOAM for
542
+ axisymmetric geometry for this nozzle along with the buffer zone. They further performed experiments to
543
+ visualize the flow using shadow-graphic technique. In the present study, we will be using the same nozzle
544
+ to validate pressure based compressible flow application ”sonicFOAM”. The air is allowed to expand from
545
+ 7.1 atm pressure and 288 K to a quiescent medium at 1 atm pressure. The boundary conditions used for
546
+ this case is same as given in Table 1 except the temperature at the inlet is 288 K and the walls are at
547
+ zeroGradient (∇ · T = 0) boundary condition for temperature.
548
+ The flow is simulated for axisymmetric
549
+ 12
550
+
551
+ Figure 2: Flow chart for the solution of high temperature and pressure plume flow with radiation
552
+ 13
553
+
554
+ Start
555
+ Converged p,T,u,x without radiation
556
+ Time loop
557
+ Initialize intensity field
558
+ Obtain absorption coefficient from look-up
559
+ table and solve RTE to obtain V.q
560
+ No
561
+ Solve mass, momentum, species and energy
562
+ equation with V.q to obtain T
563
+ Converged ?
564
+ t-t+△t
565
+ Yes
566
+ Reached steady
567
+ state ?
568
+ No
569
+ Yes
570
+ EndFigure 3: Variation of Planck mean absorption coefficient of pure H2O and CO2 with different temperature at 1 bar pressure
571
+ geometry by creating a wedge of angle θ = 2.5o of unit cell in θ direction. It contains 38,400 cells and the
572
+ distance of first cell center from the wall is maintained at y+ ≈ 30. The standard k − ϵ model has been
573
+ used to model turbulence. Pressure-implicit split algorithm (PISO) is used to solve the governing flow and
574
+ energy equations. Thermophysical and transport properties for air are taken constant as, Cp = 1005kJ/kgK,
575
+ γ = 1.4, µ = 1.789 × 10−5PaS and Pr = 0.7. The time step used for the present simulation is 10−8 s. The
576
+ simulation has been performed for 7ms. The pressure and Mach number variation along centerline of nozzle
577
+ along with the results reported by Darwish et al. [1], are plotted in Figure. 4 and 5, respectively. The present
578
+ results are in good agreement with the literature results . There are no shocks or sudden discontinuities
579
+ inside the nozzle as the flow is perfectly expanded inside the nozzle. Since, the nozzle is designed with 1D
580
+ isentropic calculations and the present simulations are performed for 2D axisymmetric case, there is deviation
581
+ from 1D isentropic flow. Thus the small expansion and compression waves are formed which create small
582
+ diamond pattern that can be seen in profiles of pressure and Mach number along the axis of geometry.
583
+ 14
584
+
585
+ 50
586
+ Present Calculations
587
+ 45
588
+ B-
589
+ Chuetal.
590
+ 40
591
+ 35
592
+ 30
593
+ co.
594
+ 25
595
+ 20
596
+ 15
597
+ H.O
598
+ 10
599
+ 5
600
+ 500
601
+ 1000
602
+ 1500
603
+ 2000
604
+ T (K)Figure 4: Variation of pressure along the axis of geometry
605
+ Figure 5: Variation of Mach number along the axis of
606
+ geometry
607
+ 3.3. Verification of Rocket base plate heating with assumed plume shape
608
+ The axisymmetric approximation for RTE has been tested for rocket base plate heating problem from fixed
609
+ plume shape. The plume is assumed as connical shape with half cone angle of 15o having non-dimensional
610
+ length Z/R = 50 as shown in Figure. 6. The temperature of the plume Tp is uniform. The environment is
611
+ assumed to be cold and non-participating i.e., κ = 0 and the absorption coefficient of plume is κ = 0.5 m−1.
612
+ Figure 7, shows the radiative heat flux at the base plate from exhaust plume by both axisymmetric
613
+ and three-dimensional calculations. The result obtained from 3D simulations is in good agreement with
614
+ the results published by Baek and Kim [6], whereas axisymmetric simulation result of radiative transfer
615
+ equations is very far from the result published. This requires reformulation of axisymmetric approximation
616
+ of radiative heat transfer in OpenFOAM. Therefore, a three dimensional geometry has been used for the
617
+ further simulations as shown in Figure. 8a.
618
+ 4. Results and discussion
619
+ The heating of rocket base plate by thermal radiation from different plumes made of constituents of
620
+ pure H2O plume, CO2 plume and 50%- 50% mixture of H2O and CO2 plume are studied numerically
621
+ with OpenFOAM, an open source CFD package.
622
+ The present simulations are carried out on a full 3D
623
+ geometry with a pressure based compressible flow application sonicRadFoam. It has additional features
624
+ than existing sonicFoam, like work done due to viscous forces in energy equation, species transport equation
625
+ and emission/absorption due to gaseous radiation. The Planck mean radiation heat transfer model with
626
+ 15
627
+
628
+ PresentSimulations
629
+ B-Darwishetal
630
+ 6
631
+ 5
632
+ Pressure (bar)
633
+ 3
634
+ 2
635
+ 5
636
+ 10
637
+ 15
638
+ 20
639
+ 25
640
+ 30
641
+ 35
642
+ 40
643
+ 45
644
+ 50
645
+ Nozzle Central Axis (mm)2
646
+ 1.8
647
+ 1.6
648
+ 1.4
649
+ MachNumber
650
+ 1.2
651
+ 0.8
652
+ 0.6
653
+ PresentSimulations
654
+ 0.4
655
+ B-- Darwish etal.
656
+ 0.2
657
+ 0
658
+ 5
659
+ 10
660
+ 15
661
+ 20
662
+ 25
663
+ 30
664
+ 35
665
+ 40
666
+ 45
667
+ 50
668
+ NozzleCentralAxis(mm)Figure 6: Geometry of conical plume
669
+ Figure 7: Variation of non-dimensional radiative heat flux
670
+ by axisymmetric and 3D RTE solution at the base plate
671
+ from assumed plume shape
672
+ (a)
673
+ (b)
674
+ Figure 8: (a) Three dimensional geometry and meshing for simulation of plumes with radiation; (b)Cross sectional view of three
675
+ dimensional geometry
676
+ multidimensional linear interpolation technique for properties is also incorporated to perform radiation heat
677
+ transfer calculations due to validity of optically thin approximation.
678
+ The results of thermal load on the rocket base plate from exhaust plume of three different constituents,
679
+ i.e., pure H2O plume, pure CO2 plume and 50%-50% mixture of H2O and CO2 plume are presented in the
680
+ subsequent sections.
681
+ 4.1. Pure H2O plume
682
+ Pure H2O plume is formed by the combustion of pure H2 with liquid oxidizer LOX. The resulting
683
+ product contains mole-fraction of H2O (x = 1) which emanates from the nozzle in the form of the plume.
684
+ Initially the medium is filled with N2, and the H2O expands from 7.11 bar and 2000 K to a quiescent medium
685
+ 16
686
+
687
+ Plumeemission
688
+ Environment
689
+ ExhaustPlume
690
+ R
691
+ 15°
692
+ BasePlate
693
+ Z0.8
694
+ 3Dcalculation
695
+ 0.7
696
+ Axisymmetriccalculation
697
+ BaekandKim
698
+ 0.6
699
+ 0.5
700
+ -10/b
701
+ 0.4
702
+ 0.3
703
+ 0.2
704
+ ACAAC
705
+ 0.1
706
+ 1.5
707
+ 2
708
+ 2.5
709
+ 3
710
+ 3.5
711
+ 4
712
+ 4.5
713
+ 5
714
+ 5.5
715
+ 6
716
+ r/Rof 1 atm and 288 K Temperature.
717
+ The pressure remains constant in the convergent part of the nozzle, however it suddenly decreases at the
718
+ throat and the divergent part of the nozzle as shown in Figure.10a. The exit pressure at nozzle for H2O
719
+ plume is slightly higher than the pressure of quiescent medium, i.e., 1.4 bar, this essentially means that the
720
+ flow is underexpanded [29]. Due to this underexpansion scenario, there forms the series of strong expansion
721
+ and compression waves (oblique shocks) which evolves from the lip of the nozzle, as pressure tries to adjust
722
+ itself against medium pressure. The shock which evolve from the lip of the nozzle is in the shape of barrel so
723
+ it can be called as ”barrel shock” and a Mach disc appears after the shock which is formed due to singular
724
+ reflection. The pressure variation in divergent part of the nozzle enables the temperature reduction as shown
725
+ in Figure. 10b. Similar effect of pressure variation in the plume is seen on the temperature variation as
726
+ well. Thus, the temperature variation in the divergent part of the nozzle and in the plume enables the
727
+ heat transfer mechanism. However, heat transfer mechanism does not occur in the convergent part of the
728
+ nozzle, due to uniform temperature inside the convergent part of the nozzle. The physical quantities such
729
+ as pressure, temperature and velocity or Mach number vary rapidly across the shock. The shock pattern
730
+ is in the form of a diamond also known as diamond flow structure. The pressure varies between 1.4 bar to
731
+ 0.58 bar across the shock as in Figure 10a. Similarly, the temperature also varies sharply, i.e., upto 300 K
732
+ in the region from 23 mm to 25 mm as it can be seen from temperature profile across the axis in Fig. 10b.
733
+ The temperature first decreases due to expansion of gases and then it increases due to compression wave
734
+ and this pattern continues till pressure comes in equilibrium with the buffer zone pressure. After 40 mm,
735
+ flow stabilizes, as the pressure of fluid at that point becomes same as that of medium pressure. The trend
736
+ is opposite for Mach number as gas expands, the velocity of the flow increases and the maximum value of
737
+ Mach number achieved in this case is 2.25. The contour of Mach number and its profile along the centerline
738
+ distribution are shown in Fig. 9c and 10c, respectively. In the near field region of plume, after the inviscid
739
+ core central region, there forms a mixing layer where viscosity effects are felt and the primary species (H2O)
740
+ starts getting mixed with the atmospheric species (N2) and forms shear layer. The region just outside the
741
+ nozzle where species starts mixing is called as entrainment region of the plume. Moving downstream in the
742
+ direction of the flow, mixing layer widens for H2O being lighter molecule (molecular weight=18), as in Fig.
743
+ 9d. In the far field region, i.e., the region after the shock, species mixes completely till the centerline as it
744
+ can be seen in the H2O and N2 profiles along the centerline. Fig. 10d shows the profiles of H2O and N2
745
+ along the axis and contours of H2O and N2 are represented in the Figs. 9d and 9e, respectively.
746
+ The pressure, temperature and the species concentration of H2O contours constitutes the thermodynamic
747
+ 17
748
+
749
+ state of the H2O vapour, and Planck mean absorption coefficient of H2O has been accessed through lookup
750
+ tables and its contours is shown in Figure.11a. It has very high value in the convergent portion of the nozzle
751
+ due to very high pressure and decreases as pressure decreases in the divergent section of the nozzle and its
752
+ value is further reduced in the plume. The absorption coefficient is zero where only N2 gas is available plume
753
+ being very small thickness, the reabsorption does not occur and the major emission comes from the core of
754
+ the plume, as emission and absorption are almost same in the shear layer as the divergence of radiative heat
755
+ flux is almost zero in the shear layer and the regions of zero absorption coefficient as shown in Figure. 11b.
756
+ One thing to notice that the range of divergence of radiative flux is negative to positive, both the positive
757
+ value of the divergence of radiative flux reveals radiative sink term while negative value tells radiative source
758
+ term, Thus, radiation is heating the gas inside the divergent part of the nozzle while it is cooling the plume.
759
+ Further the energy is transferred by radiation mode of heat transfer to other region without any change.
760
+ The high temperature plume after emanating from the nozzle gets diffused and develop very high flux
761
+ and temperature in a very narrow region around the lip of the nozzle on the base plate. Barring this region,
762
+ the base plate receives the radiation energy emanating from the shear layer of plume. The radiative heat
763
+ flux on the base plate is shown in Fig. 12a, baring some region near to the lip of the nozzle. The maximum
764
+ value of radiative heat flux is 1300 W/m2 and it decreases along the radial direction as the view factor of
765
+ plume decreases. Similarly, the temperature developed due to this radiative flux is shown in Fig. 12b. The
766
+ maximum value which base plate attains due to radiation energy is 308 K and it decreases in the similar
767
+ manner of radiation flux along the radius.
768
+ 4.2. Pure CO2 plume
769
+ Although generation of pure CO2 plume is not very much realistic, however, for the theoretical understanding
770
+ the simulation has been performed for pure CO2 plume. The simulations for pure CO2 are performed by
771
+ supplying pure CO2 (x = 1) at the inlet of the nozzle and rest conditions are kept same as that of H2O
772
+ plume. This is also the case of underexpansion, so pressure at the lip of the nozzle varies from 1.4 bar to
773
+ 0.5 bar across the shocks. There is a formation of Mach disc at the end of the first shock. The contour of
774
+ pressure and distribution of pressure along the centerline is shown in Fig. 13a and 14a, respectively. The
775
+ temperature drop across the shock in the CO2 plume is less compared to H2O, however there is more drop
776
+ in temperature towards the end of the plume.
777
+ The temperature contour is shown in fig. 13b. The variation of temperature across the first shock is not
778
+ much drastic in comparison to H2O plume, also this plume cools faster than H2O plume i.e., minimum
779
+ temperature of this plume is 1200 K at the ends while it is 1350 K for H2O plume. The Mach number
780
+ 18
781
+
782
+ (a)
783
+ (b)
784
+ (c)
785
+ (d)
786
+ (e)
787
+ Figure 9: The contours of (a) Pressure (b) Temperature (c) Mach number (d) H2O (e) N2 for pure H2O plume
788
+ 19
789
+
790
+ p(N/m2)
791
+ 7.11e+05
792
+ 6.50e+5
793
+ 6.00e+5
794
+ 5.50e+5
795
+ 5.00e+5
796
+ 4.50e+5
797
+ 4.00e+5
798
+ 3.50e+5
799
+ 3.00e+5
800
+ 2.50e+5
801
+ 2.00e+5
802
+ 1.50e+5
803
+ 1.00e+5
804
+ 5.76e+04T(K)
805
+ 2000
806
+ 1900
807
+ 1800
808
+ 1700
809
+ 1600
810
+ 1500
811
+ 1400
812
+ 1300
813
+ 1200
814
+ 1100
815
+ 1000
816
+ 006
817
+ 800
818
+ 700
819
+ 600
820
+ 500
821
+ 400
822
+ 295Ma
823
+ 2.22
824
+ 2.00
825
+ 1.80
826
+ 1.60
827
+ 1.40
828
+ 1.20
829
+ 1.00
830
+ 0.80
831
+ 0.60
832
+ 0.40
833
+ 0.20
834
+ 0.00H20
835
+ 1.00
836
+ 0.90
837
+ 0.80
838
+ 0.70
839
+ 0.60
840
+ 0.50
841
+ 0.40
842
+ 0.30
843
+ 0.20
844
+ 0.10
845
+ 0.00N2
846
+ 1.00
847
+ 0.90
848
+ 0.80
849
+ 0.70
850
+ 0.60
851
+ 0.50
852
+ 0.40
853
+ 0.30
854
+ 0.20
855
+ 0.10
856
+ 0.00(a)
857
+ (b)
858
+ (c)
859
+ (d)
860
+ Figure 10: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species along the centerline for pure H2O plume
861
+ 20
862
+
863
+ 8
864
+ 6
865
+ 5
866
+ Pressure (bar)
867
+ 3
868
+ 2
869
+ 1
870
+ 0
871
+ 0
872
+ 5
873
+ 10
874
+ 15
875
+ 20
876
+ 25
877
+ 30
878
+ 35
879
+ 40
880
+ 45
881
+ 50
882
+ Nozzlecentralaxis(mm)2100
883
+ 2000
884
+ 1900
885
+ 1800
886
+ Temperature (K)
887
+ 1700
888
+ 1600
889
+ 1500
890
+ 1400
891
+ 1300
892
+ 1200
893
+ 0
894
+ 5
895
+ 10
896
+ 15
897
+ 20
898
+ 25
899
+ 30
900
+ 35
901
+ 40
902
+ 45
903
+ 50
904
+ Nozzle central axis (mm)2.5
905
+ 2.25
906
+ 2
907
+ 1.75
908
+ Machnumber
909
+ 1.5
910
+ 1.25
911
+ 0.75
912
+ 0.5
913
+ 0.25
914
+ 0
915
+ 0
916
+ 5
917
+ 10
918
+ 15
919
+ 20
920
+ 25
921
+ 30
922
+ 35
923
+ 40
924
+ 45
925
+ 50
926
+ Nozzlecentralaxis(mm)0.9
927
+ 0.8
928
+ 0.7
929
+ 0.6
930
+ H,O
931
+ 0.5
932
+ N.
933
+ 0.4
934
+ 0.3
935
+ 0.2
936
+ 0.1
937
+ 25
938
+ 35
939
+ 40
940
+ 45
941
+ 50
942
+ Nozzlecentral axis (mm)(a)
943
+ (b)
944
+ Figure 11: The contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure H2O plume
945
+ (a)
946
+ (b)
947
+ Figure 12: Profile of (a) Radiative heat flux (b) Temperature along the radius of the base plate for pure H2O plume
948
+ 21
949
+
950
+ k(m-1)
951
+ 5.60
952
+ 5.00
953
+ 4.50
954
+ 4.00
955
+ 3.50
956
+ 3.00
957
+ 2.50
958
+ 2.00
959
+ 1.50
960
+ 1.00
961
+ 0.50
962
+ 0.00(gw/m)b- A
963
+ 2.63e+06
964
+ 2.00e+6
965
+ 1.50e+6
966
+ -1.00e+6
967
+ 5.00e+5
968
+ 0.00
969
+ -5.00e+5
970
+ -1.00e+6
971
+ -1.50e+6
972
+ -2.00e+6
973
+ -2.50e+6
974
+ -3.00e+6
975
+ -3.76e+061400
976
+ 1200
977
+ 1000
978
+ q,(W/m")
979
+ 800
980
+ 600
981
+ 400
982
+ 1
983
+ 2
984
+ 3
985
+ 4
986
+ 5
987
+ 6
988
+ 7
989
+ 8
990
+ 9
991
+ 10
992
+ 11
993
+ 12
994
+ 13
995
+ 14
996
+ 15
997
+ Radiusofbaseplate(mm)310
998
+ 309
999
+ 308
1000
+ 307
1001
+ Temperature (K)
1002
+ 306
1003
+ 305
1004
+ 304
1005
+ 303
1006
+ 302
1007
+ 301
1008
+ 300
1009
+ 1
1010
+ 2
1011
+ 3
1012
+ 4
1013
+ 5
1014
+ 6
1015
+ 7
1016
+ 8
1017
+ 9
1018
+ 10
1019
+ 11
1020
+ 12
1021
+ 13
1022
+ 14
1023
+ 15
1024
+ Radiusof baseplate (mm)(a)
1025
+ (b)
1026
+ (c)
1027
+ (d)
1028
+ (e)
1029
+ Figure 13: Contours of (a) Pressure (b) Temperature (c) Mach number (d) CO2 (e) N2 for pure CO2 plume
1030
+ 22
1031
+
1032
+ p(N/m2)
1033
+ 7.11e+05
1034
+ 6.50e+5
1035
+ 6.00e+5
1036
+ 5.50e+5
1037
+ 5.00e+5
1038
+ 4.50e+5
1039
+ 4.00e+5
1040
+ 3.50e+5
1041
+ 3.00e+5
1042
+ 2.50e+5
1043
+ 2.00e+5
1044
+ 1.50e+5
1045
+ 1.00e+5
1046
+ 5.84e+04T (K)
1047
+ 2000
1048
+ 1900
1049
+ 1800
1050
+ 1700
1051
+ 1600
1052
+ 1500
1053
+ 1400
1054
+ 1300
1055
+ 1200
1056
+ 1100
1057
+ 1000
1058
+ 900
1059
+ 800
1060
+ 700
1061
+ 600
1062
+ 500
1063
+ 400
1064
+ 292Ma
1065
+ 2.22
1066
+ 2.00
1067
+ 1.80
1068
+ 1.60
1069
+ 1.40
1070
+ 1.20
1071
+ 1.00
1072
+ 0.80
1073
+ 0.60
1074
+ 0.40
1075
+ 0.20
1076
+ 0.00CO2
1077
+ 1.00
1078
+ 0.90
1079
+ 0.80
1080
+ 0.70
1081
+ 0.60
1082
+ 0.50
1083
+ 0.40
1084
+ 0.30
1085
+ 0.20
1086
+ 0.10
1087
+ 0.00N2
1088
+ 1.00
1089
+ 0.90
1090
+ 0.80
1091
+ 0.70
1092
+ 0.60
1093
+ 0.50
1094
+ 0.40
1095
+ 0.30
1096
+ 0.20
1097
+ 0.10
1098
+ 0.00(a)
1099
+ (b)
1100
+ (c)
1101
+ (d)
1102
+ Figure 14: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species for pure CO2 plume
1103
+ 23
1104
+
1105
+ 8
1106
+ 6
1107
+ 5
1108
+ Pressure (bar)
1109
+ 4
1110
+ 3
1111
+ 2
1112
+ 0
1113
+ 5
1114
+ 10
1115
+ 15
1116
+ 20
1117
+ 25
1118
+ 30
1119
+ 35
1120
+ 40
1121
+ 45
1122
+ 50
1123
+ Nozzlecentralaxis(mm)2100
1124
+ 2000
1125
+ 1900
1126
+ 1800
1127
+ 1700
1128
+ Temperature(
1129
+ 1600
1130
+ 1500
1131
+ 1400
1132
+ 1300
1133
+ 1200
1134
+ 1100
1135
+ 0
1136
+ 5
1137
+ 10
1138
+ 15
1139
+ 20
1140
+ 25
1141
+ 30
1142
+ 35
1143
+ 40
1144
+ 45
1145
+ 50
1146
+ Nozzlecentralaxis(mm)2.5
1147
+ 2.25
1148
+ 2
1149
+ 1.75
1150
+ Machnumber
1151
+ 1.5
1152
+ 1.25
1153
+ 0.75
1154
+ 0.5
1155
+ 0.25
1156
+ 0
1157
+ 0
1158
+ 5
1159
+ 10
1160
+ 15
1161
+ 20
1162
+ 25
1163
+ 30
1164
+ 35
1165
+ 40
1166
+ 45
1167
+ 50
1168
+ Nozzle central axis (mm)0.9
1169
+ 0.8
1170
+ 0.7
1171
+ Species distribution
1172
+ 0.6
1173
+ CO2
1174
+ 0.5
1175
+ N2
1176
+ 0.4
1177
+ 0.3
1178
+ 0.2
1179
+ 0.1
1180
+ 10
1181
+ 15
1182
+ 25
1183
+ 30
1184
+ 35
1185
+ 40
1186
+ 45
1187
+ 50
1188
+ Nozzlecentralaxis(mm)(a)
1189
+ (b)
1190
+ Figure 15: Contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure CO2 plume
1191
+ (a)
1192
+ (b)
1193
+ Figure 16: Profile of (a) Radiative heat flux (b) Temperature along the radius of base plate for pure CO2 plume
1194
+ 24
1195
+
1196
+ k(m-1)
1197
+ 30
1198
+ 28
1199
+ 26
1200
+ 24
1201
+ 22
1202
+ 20
1203
+ 18
1204
+ 16
1205
+ 14
1206
+ 12
1207
+ 10
1208
+ 8
1209
+ 6
1210
+ 4
1211
+ 2
1212
+ 0(gw/m)b- A
1213
+ 1.79e+07
1214
+ 1.60e+7
1215
+ 1.40e+7
1216
+ 1.20e+7
1217
+ 1.00e+7
1218
+ 8.00e+6
1219
+ 6.00e+6
1220
+ 4.00e+6
1221
+ 2.00e+6
1222
+ 0.00
1223
+ -2.00e+6
1224
+ -4.00e+6
1225
+ -6.00e+6
1226
+ -8.00e+6
1227
+ -1.00e+7
1228
+ -1.20e+7
1229
+ -1.42e+074500
1230
+ 4000
1231
+ 3500
1232
+ 3000
1233
+ 2500
1234
+ 2000
1235
+ 1500
1236
+ 1000
1237
+ 500
1238
+ 0
1239
+ 1
1240
+ 2
1241
+ 3
1242
+ 4
1243
+ 5
1244
+ 6
1245
+ 7
1246
+ 8
1247
+ 9
1248
+ 10
1249
+ 11
1250
+ 12
1251
+ 13
1252
+ 14
1253
+ 15
1254
+ Radius ofbaseplate(mm)325
1255
+ 320
1256
+ 315
1257
+ Temperature (K)
1258
+ 310
1259
+ 305
1260
+ 300
1261
+ 295
1262
+ 1
1263
+ 2
1264
+ 3
1265
+ 4
1266
+ 5
1267
+ 6
1268
+ 7
1269
+ 8
1270
+ 9
1271
+ 10
1272
+ 11
1273
+ 12
1274
+ 13
1275
+ 14
1276
+ 15
1277
+ Radiusofbaseplate(mm)(a)
1278
+ (b)
1279
+ Figure 17: Profile of (a) Radiative heat flux (b) Temperature on base plate along the radius of base plate for 50-50% mixture
1280
+ of CO2 − H2O plume
1281
+ contour and its distribution along the centerline are shown in Fig. 13c and 14c, respectively. The diffusion
1282
+ of CO2 in N2 is less in comparison to H2O due to higher molecular weight of CO2 (44) compared to H2O
1283
+ (18) as shown in Fig. 14d. The contours of CO2 and N2 mole fraction are shown in Fig. 13d and 13e,
1284
+ respectively.
1285
+ The absorption coefficient distribution by considering Planck mean absorption coefficient for CO2 plume
1286
+ is shown in Fig.
1287
+ 15a.
1288
+ Its value is almost zero everywhere except in the core of the plume and in the
1289
+ shear layer. As the absorption coefficient of CO2 is higher in the shear layer compared to H2O plume, the
1290
+ radiative heat flux on the rocket base plate is also higher, i.e., around 4000 W/m2 as shown in Fig. 16a. The
1291
+ corresponding temperature distribution on the base plate is shown in Fig. 16b, having a maximum value of
1292
+ 323 K, barring the diffusion region.
1293
+ 4.3. Mixture plume (50 % H2O and 50 % CO2)
1294
+ The combustion of hydrocarbon fuel with liquid oxidizer (LOX) gives 50-50% mixture of CO2 and H2O.
1295
+ Thus, for the present problem, we supply 50-50% mixture of both CO2 and H2O at the inlet of the nozzle
1296
+ and other conditions are kept same as previous cases for the simulation of this plume. This is also a case
1297
+ of underexpanded plume. The temperature variation along the centerline at the end of the buffer section is
1298
+ somewhat the average of both pure CO2 and H2O plume.
1299
+ The radiative transfer calculations are performed to determine the heat flux on the base plate from
1300
+ 25
1301
+
1302
+ 2400
1303
+ 2200
1304
+ 2000
1305
+ 1800
1306
+ 1600
1307
+ (w/M)"b
1308
+ 1400
1309
+ 1200
1310
+ 1000
1311
+ 800
1312
+ 600
1313
+ 400
1314
+ 1
1315
+ 2
1316
+ 3
1317
+ 4
1318
+ 5
1319
+ 6
1320
+ 7
1321
+ 8
1322
+ 9
1323
+ 10
1324
+ 11
1325
+ 12
1326
+ 13
1327
+ 14
1328
+ 15
1329
+ Radiusofbaseplate(mm)314
1330
+ 313
1331
+ 312
1332
+ 311
1333
+ 310
1334
+ 309
1335
+ 308
1336
+ 307
1337
+ 306
1338
+ 305
1339
+ 304
1340
+ 303
1341
+ 302
1342
+ 301
1343
+ 300
1344
+ 1
1345
+ 2
1346
+ 3
1347
+ 4
1348
+ 5
1349
+ 6
1350
+ 7
1351
+ 8
1352
+ 9
1353
+ 10
1354
+ 11
1355
+ 12
1356
+ 13
1357
+ 14
1358
+ 15
1359
+ Radius ofbaseplate(mm)CO2 − H2O plume. The maximum radiative heat flux on the base plate is 2300 W/m2 (Fig. 17a) and it
1360
+ decays with the radius of the base plate. The corresponding profiles of the temperature on the base plate
1361
+ is shown in Fig. 17b. It is noted that the flux and temperature profiles for CO2 and mixture plume are
1362
+ exponential decaying with radius, while it is almost linear for H2O. This is owing to the fact that, high
1363
+ diffusion of H2O causes more spreading of H2O and this emission from H2O has high view factor, while this
1364
+ is not the case with CO2 and mixture plume.
1365
+ 5. Conclusions
1366
+ The thermal load calculation on the base plate of nozzle from exhaust plume is performed in OpenFOAM.
1367
+ The ability of pressure based compressible flow application, ”sonicFoam” is tested to capture the flow fields
1368
+ for air expanding in a convergent divergent nozzle. The stagnation pressure and temperature at the inlet
1369
+ of the nozzle are 7.11 bar and 288 K due to which flow expands and achieves Mach 2.1 at the exit of the
1370
+ nozzle. The resulting pressure and Mach number variation at the centerline matches well with the standard
1371
+ published results.
1372
+ The same nozzle is then used with elevated stagnation temperature of 2000 K and same pressure at inlet,
1373
+ to estimate the heat load on base plate for three different plumes namely, pure H2O plume, pure CO2 plume
1374
+ and mixture plume. The ”sonicFoam” application is then modified by incorporating the work done due to
1375
+ viscous forces, species transport equation and finally clubbed with the RTE solver fvDOM along with Planck
1376
+ mean absorption emission model and named as ”radSonicFOAM”. All three plumes exit from the nozzle at
1377
+ underexpanded flow conditions, where exit pressure is higher than the back pressure. The expansion waves
1378
+ start from the lip of the nozzle due to which the temperature decreases as flow exit from the nozzle and
1379
+ Mach number increases to a maximum value of 2.25.
1380
+ The maximum amount of heat load in the present study due to thermal radiation on base plate is from
1381
+ pure CO2 plume, i.e., 4000 W/m2 due to the high value of absorption coefficient, barring the diffusion zone.
1382
+ This flux heats up the base plate and its temperature rises upto 323 K, followed by mixture plume, which
1383
+ receives maximum radiative heat flux of 2300 W/m2 and the corresponding rise in temperature is 312 K.
1384
+ For pure H2O plume, the heat flux is least, i.e., 1300 W/m2 with temperature rise of 308 K. For different
1385
+ plumes the variation in flux is different and this is mostly due to the difference in the absorption coefficient
1386
+ of the gases. Further, their molecular weights are also different, due to which there is difference in the flow
1387
+ field of the gases and also the different nature of flux and temperature variations on the nozzle base plate.
1388
+ Due to small length scale, the current case falls in optically thin regime, thus, the Planck mean absorption
1389
+ 26
1390
+
1391
+ model provides the satisfactory results, however, Planck mean absorption model may not be useful for other
1392
+ cases with big length scale. Therefore, The full spectrum radiative properties models are needed with the
1393
+ properties for all thermodynamic states existing in the plume. Furthermore, the solid fuel emanates particles
1394
+ which contribute most of the radiative thermal load on the nozzle base plate, therefore the current radiation
1395
+ heat transfer feature needs to further enhance by including the scattering model.
1396
+ References
1397
+ [1] M. Darwish, L. Orazi, D. Angeli, Simulation and analysis of the jet flow patterns from supersonic nozzles
1398
+ of laser cutting using openfoam, The International Journal of Advanced Manufacturing Technology
1399
+ 102 (9) (2019) 3229–3242.
1400
+ [2] F. Simmons,
1401
+ Rocket exhaust plume phenomenology,
1402
+ American
1403
+ Institute
1404
+ of Aeronautics
1405
+ and
1406
+ Astronautics, Inc., 2000.
1407
+ [3] M. F. Modest, Radiative heat transfer, Academic press, 2013.
1408
+ [4] C. Tien, M. Abu-Romia, A method of calculating rocket plume radiation to the base region, Journal of
1409
+ Spacecraft and Rockets 1 (4) (1964) 433–435.
1410
+ [5] H. Nelson, Backward monte carlo modeling for rocket plume base heating, Journal of Thermophysics
1411
+ and Heat Transfer 6 (3) (1992) 556–558.
1412
+ [6] S. W. Baek, M. Y. Kim, Analysis of radiative heating of a rocket plume base with the finite-volume
1413
+ method, International Journal of Heat and Mass Transfer 40 (7) (1997) 1501–1508.
1414
+ [7] H.-P. Tan, Y. Shuai, S.-K. Dong, Analysis of rocket plume base heating by using backward monte-carlo
1415
+ method, Journal of thermophysics and heat transfer 19 (1) (2005) 125–127.
1416
+ [8] J. Everson, H. Nelson, Rocket plume radiation base heating by reverse monte carlo simulation, Journal
1417
+ of thermophysics and heat transfer 7 (4) (1993) 717–723.
1418
+ [9] K. R. S. Sunil Kumar, Prediction of radiation from plumes, considering spatial temperature variations,
1419
+ Heat Transfer Engineering 21 (1) (2000) 55–73.
1420
+ [10] B. Gu, M. Y. Kim, S. W. Baek, Analysis of the ir signature and radiative base heating from a supersonic
1421
+ solid rocket exhaust plume, International Journal of Aeronautical and Space Sciences 20 (2) (2019) 423–
1422
+ 432.
1423
+ 27
1424
+
1425
+ [11] L. S. Rothman, I. E. Gordon, A. Barbe, D. C. Benner, P. F. Bernath, M. Birk, V. Boudon, L. R. Brown,
1426
+ A. Campargue, J.-P. Champion, et al., The hitran 2008 molecular spectroscopic database, Journal of
1427
+ Quantitative Spectroscopy and Radiative Transfer 110 (9-10) (2009) 533–572.
1428
+ [12] S. Tashkun, V. Perevalov, Cdsd-4000: High-resolution, high-temperature carbon dioxide spectroscopic
1429
+ databank, Journal of Quantitative Spectroscopy and Radiative Transfer 112 (9) (2011) 1403–1410.
1430
+ [13] L. Rothman, I. Gordon, R. Barber, H. Dothe, R. Gamache, A. Goldman, V. Perevalov, S. Tashkun,
1431
+ J. Tennyson, Hitemp, the high-temperature molecular spectroscopic database, Journal of Quantitative
1432
+ Spectroscopy and Radiative Transfer 111 (15) (2010) 2139–2150.
1433
+ [14] M. F. Modest, H. Zhang, The full-spectrum correlated-k distribution for thermal radiation from
1434
+ molecular gas-particulate mixtures, Journal of heat transfer 124 (1) (2002) 30–38.
1435
+ [15] C. Wang, W. Ge, M. F. Modest, B. He, A full-spectrum k-distribution look-up table for radiative transfer
1436
+ in nonhomogeneous gaseous media, Journal of Quantitative Spectroscopy and Radiative Transfer 168
1437
+ (2016) 46–56.
1438
+ [16] V. P. Solovjov, B. W. Webb, Slw modeling of radiative transfer in multicomponent gas mixtures, Journal
1439
+ of Quantitative Spectroscopy and Radiative Transfer 65 (4) (2000) 655–672.
1440
+ [17] S. Parvatikar, K. Khemani, P. Kumar, Benchmark test cases for non-gray radiative heat transfer
1441
+ calculation using fsk look-up table, in: Journal of Physics: Conference Series, Vol. 2116, IOP Publishing,
1442
+ 2021, p. 012066.
1443
+ [18] K. Khemani, S. Parvatikar, P. Kumar, Radiative heat transfer calculations using full spectrum k-
1444
+ distribution method for benchmark test cases, S¯adhan¯a 48 (1) (2023) 1–18.
1445
+ [19] K. Khemani, P. Kumar, Radiative heat transfer calculation for mixture of gases using full spectrum
1446
+ k-distribution method, in: Journal of Physics: Conference Series, Vol. 2116, IOP Publishing, 2021, p.
1447
+ 012065.
1448
+ [20] OpenCFD, OpenFOAM - The Open Source CFD Toolbox - User’s Guide, OpenCFD Ltd. (11 Apr.
1449
+ 2007).
1450
+ [21] D. C. Wilcox, et al., Turbulence modeling for CFD, Vol. 2, DCW industries La Canada, CA, 1998.
1451
+ [22] T. F. Edgar, R. M. Felder, J. McKenna, R. W. Rousseau, S. I. Sandier, R. C. Seagrave, Bird, stewart
1452
+ and lightfoot: Transport phenomena.
1453
+ 28
1454
+
1455
+ [23] G. Chanakya, P. Kumar, Investigation of thermal adiabatic boundary condition on semitransparent
1456
+ wall in combined radiation and natural convection, International Journal for Computational Methods
1457
+ in Engineering Science and Mechanics 23 (4) (2022) 349–366.
1458
+ [24] P. Kumar, Radiative heat transfer in a participating gray medium and its interaction with fluid flow,
1459
+ Ph.D. thesis, Indian Institute of Technology Kanpur (January 2009).
1460
+ [25] N. Bartwal, G. Chanakya, P. Kumar, Calculation of non-gray radiation transmissivity, absorptivity and
1461
+ absorption coefficient of water vapour from hitemp-2010 database at high temperature, in: 6th Asian
1462
+ Symposium on Computational Heat Transfer and Fluid Flow, IITM, Chennai, India, 2017.
1463
+ [26] N. Bartwal, P. Kumar, Calculation of non-gray radiation transmissivity, absorptivity of carbon-dioxide
1464
+ from hitemp2010 database at high temperature, in: 24th National & 2nd International ISHMT-ASTFE
1465
+ Heat and Mass Transfer Conference, BITS-Pilani, Hyderabad, India, 2017.
1466
+ [27] N. Bartwal, P. Kumar, Calculation of non-gray radiation absorptivity and absorption coefficient of
1467
+ mixture of gases from hitemp-2010 database, in: International Heat Transfer Conference Digital Library,
1468
+ Begel House Inc., 2018.
1469
+ [28] H. Chu, M. Gu, H. Zhou, F. Liu, Calculations of narrow-band transimissities and the planck mean
1470
+ absorption coefficients of real gases using line-by-line and statistical narrow-band models, Frontiers in
1471
+ Energy 8 (1) (2014) 41–48.
1472
+ [29] E. Franquet, V. Perrier, S. Gibout, P. Bruel, Free underexpanded jets in a quiescent medium: A review,
1473
+ Progress in Aerospace Sciences 77 (2015) 25–53.
1474
+ 29
1475
+
6dE4T4oBgHgl3EQfBwu-/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c438c38c09e6aa2d536506b68de2d9667fc4441860dd5c2749ff6da36696437f
3
+ size 2147291
8NE3T4oBgHgl3EQfqQrl/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3455354d525259c187b2ec3c9943f1d153e9e46a75f2631cb45a37be12de69f3
3
+ size 67589
99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9279125764de599c9e7f33e8b33770f1d10c8f654992f12824fb23ec7f175795
3
+ size 17283314
B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c4c713222d46f92fe07cbb0b506d4720c3f35782ded08635b2f52be9de8094d
3
+ size 327589
B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808ec59431c2f58932e0de7e4da80d9bf064a6cd9abc7803e77d9aa092cf67fc
3
+ size 4390957
B9AyT4oBgHgl3EQf4PrK/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52debdaf3e0ee9e7a2482f7b58591736b59ba569a99ef4d1c14aa68cfe4afc15
3
+ size 154784
BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eae4b3cfca7cea50b8c9c3a02faede020f5b1da89af4ead337c0fe541c31d573
3
+ size 8192045
BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ac4b5ceee3ca791739b8a1c83749b8821de098f3e8925ca32b212d2f4820daf
3
+ size 2564875
BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e191334d0c9b3239f6fee0eb2cccfaa9c0abf698228899e96fac67247efe61c
3
+ size 5505069
BdFQT4oBgHgl3EQf9zeq/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da77230c64ff1c41c028594d1d6a572ed81ea3b5f6bf718dc9a19758f6c8803d
3
+ size 218817
CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d66f38b7e61151b3b7cb7c47c3b814d0693494aec42e55334fc3c97144794147
3
+ size 415674
CdE4T4oBgHgl3EQfeQ2g/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aae97493fe8b0e1d354e04d71cd1f97905d1a98bf027f471340c7ec0a894b1a5
3
+ size 223345
CtFRT4oBgHgl3EQfwTji/content/tmp_files/2301.13638v1.pdf.txt ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Charge collection and efficiency measurements of the
2
+ TJ-Monopix2 DMAPS in 180 nm CMOS technology
3
+ Christian Bespin,𝑎,∗ Ivan Caicedo,𝑎 Jochen Dingfelder,𝑎 Tomasz Hemperek,𝑎,𝑒 Toko
4
+ Hirono,𝑎,𝑏 Fabian Hügging,𝑎 Hans Krüger,𝑎 Konstantinos Moustakas,𝑎,𝑐 Heinz
5
+ Pernegger,𝑑 Petra Riedler,𝑑 Lars Schall,𝑎 Walter Snoeys𝑑 and Norbert Wermes𝑎
6
+ 𝑎Physikalisches Institut, Universität Bonn,
7
+ Nußallee 12, Bonn, Germany
8
+ 𝑏Deutsches Elektronen-Synchrotron (DESY)
9
+ Notkestaße. 85, Hamburg, Germany
10
+ 𝑐Paul Scherrer Institut,
11
+ Forschungsstrasse 111, Villingen, Switzerland
12
+ 𝑑CERN
13
+ Espl. des Particules 1, Meyrin, Switzerland
14
+ 𝑒DECTRIS AG
15
+ Täfernweg 1, Baden-Dättwil, Switzerland
16
+ E-mail: [email protected]
17
+ Monolithic CMOS pixel detectors have emerged as competitive contenders in the field of high-
18
+ energy particle physics detectors. The use of commercial processes offers high-volume production
19
+ of such detectors. A series of prototypes has been designed in a 180 nm Tower CMOS process with
20
+ depletion of the sensor material and a column-drain readout architecture. The latest iteration, TJ-
21
+ Monopix2, features a large 2 × 2 cm2 matrix consisting of 512 × 512 pixels with 33.04 µm pitch. A
22
+ small collection electrode design aims at low power consumption and low noise while the radiation
23
+ tolerance for high-energy particle detector applications needs extra attention. With a goal to reach
24
+ radiation tolerance to levels of NIEL damage of 1 × 1015 1 MeV neq/cm2, a modification of the
25
+ standard process has been implemented by adding a low-dosed n-type silicon implant across the
26
+ pixel in order to allow for homogeneous depletion of the sensor volume. Recent lab measurements
27
+ and beam tests were conducted for unirradiated modules to study electrical characteristics and hit
28
+ detection efficiency.
29
+ 10th International Workshop on Semiconductor Pixel Detectors for Particles and Imaging (Pixel2022)
30
+ 12-16 December 2022
31
+ Santa Fe, New Mexico, USA
32
+ ∗Speaker
33
+ © Copyright owned by the author(s) under the terms of the Creative Commons
34
+ Attribution-NonCommercial-NoDerivatives 4.0 International License (CC BY-NC-ND 4.0).
35
+ https://pos.sissa.it/
36
+ arXiv:2301.13638v1 [physics.ins-det] 31 Jan 2023
37
+
38
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
39
+ Christian Bespin
40
+ 1.
41
+ Introduction
42
+ In recent years, advances in CMOS technologies have fueled the development of a new gener-
43
+ ation of monolithic active pixel sensors (MAPS) with fast readout and high radiation tolerance by
44
+ depleting the charge sensitive volume [1]. These depleted MAPS (DMAPS) devices are therefore an
45
+ interesting candidate for high-energy particle physics experiments with high radiation environments
46
+ and high particle rate. Depletion is achieved by either using high-voltage add-ons in the CMOS
47
+ technology and/or high resistivity substrates. The increasing availability of these features in com-
48
+ mercial CMOS processes could combine the features of the detector concept with possibly faster
49
+ and cheaper production than common hybrid pixel detectors for the mentioned purposes. The idea
50
+ behind and measurements results from one of multiple DMAPS prototypes, TJ-Monopix2 [2, 3],
51
+ will be presented in the following.
52
+ 2.
53
+ Design of TJ-Monopix2
54
+ TJ-Monopix2 is the latest DMAPS prototype from the TJ-Monopix development line which is
55
+ based on the ALPIDE pixel detector developed for the ALICE ITS upgrade [4]. It is fabricated in
56
+ the same 180 nm commercial CMOS process provided by Tower Semiconductor1. A modification
57
+ of the process used for ALPIDE has been implemented to increase the radiation tolerance to
58
+ levels ≥ 1 × 1015 neq cm−2 by adding a low dose n-type implant for homogeneous growth of the
59
+ depletion zone with applied bias voltage. In measurements on first prototypes with this modification,
60
+ a drop in hit detection efficiency was observed after irradiation [5, 6]. This could be improved
61
+ significantly by adding a gap in the n-type blanket or a deep p-type implant in the pixel corners
62
+ to shape the electrical field towards the collection electrode [7].
63
+ The cross-sections of these
64
+ two sensor designs is shown in fig. 1. Additionally, chips have been produced on Czochralski
65
+ P+ SUBSTRATE
66
+ P- EPITAXIAL LAYER
67
+ COLLECTION N-WELL
68
+ LOW DOSE N-TYPE IMPLANT
69
+ DEEP PWELL
70
+ PWELL
71
+ PWELL
72
+ NWELL
73
+ DEEP PWELL
74
+ PWELL
75
+ PWELL
76
+ NWELL
77
+ (a) Modification with gap in low dose n-type implant be-
78
+ low pixel electronics.
79
+ P+ SUBSTRATE
80
+ P- EPITAXIAL LAYER
81
+ COLLECTION N-WELL
82
+ LOW DOSE N-TYPE IMPLANT
83
+ DEEP PWELL
84
+ PWELL
85
+ PWELL
86
+ NWELL
87
+ DEEP PWELL
88
+ PWELL
89
+ PWELL
90
+ NWELL
91
+ EXTRA DEEP PWELL
92
+ EXTRA DEEP PWELL
93
+ (b) Modification with continuous n-type implant and deep
94
+ p-type implant below pixel electronics.
95
+ Figure 1: Cross-section variants of modified sensor process for TJ-Monopix2.
96
+ silicon to increase the available depletable volume compared to the thickness of the epitaxial layer
97
+ (O(10 µm)). Measurements on Czochralski silicon chips in TJ-Monopix1 showed a further increase
98
+ in hit detection efficiency after irradiation [8].
99
+ TJ-Monopix2 follows a small collection electrode approach with a pixel capacitance of
100
+ about 3 fF. The pixels of size 33 × 33 µm2 are read out using an established synchronous column-
101
+ drain technique from the FE-I3 readout chip [9]. Further changes from the predecessor TJ-Monopix1
102
+ 1https://towersemi.com
103
+ 2
104
+
105
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
106
+ Christian Bespin
107
+ include an improved front-end design, a new pixel masking scheme and a 3-bit DAC for local
108
+ threshold tuning. With these changes the threshold is expected to be reduced by a factor of 3 while
109
+ improving the threshold dispersion and noise behavior.
110
+ The digital periphery contains logic for register configuration, data handling and LVDS output
111
+ drivers. Slow control is done via a command protocol and decoder that was taken from the RD53B
112
+ readout chip [10]. Both pixel and register data is 8b10b encoded in a frame-based data stream
113
+ which allows operating the chip with four differential data lines.
114
+ 3.
115
+ Injection-based threshold and noise measurements
116
+ Initial tests have been performed in a laboratory setup to measure the threshold and noise
117
+ performance of TJ-Monopix2. All of these values are extracted from injecting different amounts of
118
+ charge into the pixel a given number of times and recording the amount of registered hits 𝑛hits. The
119
+ response function is a smeared step function of the form
120
+ 𝑛hits(𝑞) = 1
121
+ 2 · 𝑛injections ·
122
+
123
+ erf
124
+ �𝑞 − 𝑞thr
125
+ 𝜎
126
+
127
+ 2
128
+
129
+ + 1
130
+
131
+ (1)
132
+ with 𝑞 the injected charge amount, 𝑛injections the number of consecutive injections of 𝑞 and 𝑞thr the
133
+ charge at the threshold. 𝜎 denotes the gaussian smearing of the step function which is defined
134
+ as the electronic noise of the pixel. The threshold is defined as the charge at which 50 % of the
135
+ injected hits are recorded from the pixel. Histogramming the individual thresholds from each pixel
136
+ leads to the distribution shown in fig. 2a. The distribution has a mean value of 164 e− and a width
137
+ 50
138
+ 100
139
+ 150
140
+ 200
141
+ 250
142
+ 300
143
+ Threshold / e
144
+ 0
145
+ 250
146
+ 500
147
+ 750
148
+ 1000
149
+ 1250
150
+ 1500
151
+ 1750
152
+ # of pixels
153
+ Fit results:
154
+ = 164.3 e
155
+ = 13.0 e
156
+ Threshold distribution for enabled pixels
157
+ 1
158
+ 2
159
+ 3
160
+ 4
161
+ 5
162
+ 6
163
+ 7
164
+ TDAC
165
+ (a) Threshold distribution before in-pixel threshold tuning.
166
+ 50
167
+ 100
168
+ 150
169
+ 200
170
+ 250
171
+ 300
172
+ Threshold / e
173
+ 0
174
+ 2000
175
+ 4000
176
+ 6000
177
+ 8000
178
+ # of pixels
179
+ Fit results:
180
+ = 163.2 e
181
+ = 2.7 e
182
+ Threshold distribution for enabled pixels
183
+ 1
184
+ 2
185
+ 3
186
+ 4
187
+ 5
188
+ 6
189
+ 7
190
+ TDAC
191
+ (b) Threshold distribution after in-pixel threshold tuning.
192
+ Figure 2: Threshold distribution of TJ-Monopix2 before and after adjusting the in-pixel threshold DAC to
193
+ lower the dispersion. Color-coded is the value of the DAC setting for every pixel.
194
+ of 13 e− which is defined as the threshold dispersion. By adjusting the threshold DAC in each pixel
195
+ in order to even out the deviations from the target threshold, the threshold dispersion can be reduced
196
+ significantly. The resulting distribution after this so-called tuning process is shown in fig. 2b. While
197
+ the mean threshold stays basically the same, the dispersion could be reduced by a factor of almost 5.
198
+ Both the mean threshold and threshold dispersion are significantly lower than in TJ-Monopix1,
199
+ where losses in hit detection efficiency could be observed due to too large thresholds [8].
200
+ 3
201
+
202
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
203
+ Christian Bespin
204
+ 0
205
+ 5
206
+ 10
207
+ 15
208
+ 20
209
+ 25
210
+ 30
211
+ 35
212
+ ENC / e
213
+ 0
214
+ 100
215
+ 200
216
+ 300
217
+ 400
218
+ 500
219
+ 600
220
+ 700
221
+ 800
222
+ # of pixels
223
+ Fit results:
224
+ = 11.8 e
225
+ = 1.5 e
226
+ Noise distribution for enabled pixels
227
+ (a) Noise distribution of TJ-Monopix1 with noticeable tail
228
+ towards larger values.
229
+ 2
230
+ 4
231
+ 6
232
+ 8
233
+ 10
234
+ ENC / e
235
+ 0
236
+ 200
237
+ 400
238
+ 600
239
+ 800
240
+ 1000
241
+ 1200
242
+ 1400
243
+ # of pixels
244
+ Fit results:
245
+ = 5.6 e
246
+ = 0.6 e
247
+ Noise distribution for enabled pixels
248
+ (b) Noise distribution of TJ-Monopix2. There is no observ-
249
+ able tail and lower noise overall.
250
+ Figure 3: Noise distribution of TJ-Monopix1 (left) and TJ-Monopix2 (right) for comparison.
251
+ The corresponding histogram of the electronic noise is depicted in fig. 3. As a comparison,
252
+ the distribution from the predecessor TJ-Monopix1 is included, where a large tail towards higher
253
+ values was observed that led to a high operational threshold in order to limit the amount of noisy
254
+ pixels. It can be seen, that this tail is largely removed with slight changes to the analog front-end,
255
+ which in turn lowers the threshold for a regular operation of the chip.
256
+ 4.
257
+ Hit detection efficiency measurements
258
+ Two different pixel variations were investigated regarding their hit detection efficiency, that
259
+ will be presented in the following – a DC-coupled, more standard design which makes up most part
260
+ of the matrix and an AC-coupled investigative design realized in only a few columns of the matrix.
261
+ While the former was measured in more detail, some first results of the latter are included as well.
262
+ 4.1 Standard DC-coupled pixel flavor
263
+ First measurements to determine the hit detection efficiency have been performed in a 5 GeV
264
+ electron beam at the DESY II testbeam facility at DESY, Hamburg [11]. Three unirradiated modules
265
+ were tested with different sensor geometries: two chips with 30 µm thick epitaxial silicon and the
266
+ two geometries depicted in fig. 1 as well as one chip built on 300 µm Czochralski silicon with a gap
267
+ in the low dose n-type implant (see fig. 1a). It should be noted that the different substrate materials
268
+ offer different sensor thicknesses and therefore charge-sensitive volume depending on the depletion.
269
+ The measurements are not targeting a comparison between different types of silicon.
270
+ Figures 4a and 4b show the recorded cluster charge for a chip with epitaxial layer and with
271
+ Czochralski substrate. It can be observed that the collected charge is about 25 % larger in the Cz
272
+ sample, because the depletion depth is only limited by the thickness of the sensor (300 µm) which
273
+ is by far not fully depleted, but more depleted than the 30 µm thick epitaxial layer in the other chip.
274
+ The average cluster size is significantly larger in the Cz sample as well which results in a high
275
+ spatial resolution due to charge-weighted clustering. The cluster size distributions for the same
276
+ samples as above are depicted in figs. 4c and 4d. While cluster size 1 is predominant in the epitaxial
277
+ 4
278
+
279
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
280
+ Christian Bespin
281
+ 2000
282
+ 4000
283
+ 6000
284
+ 8000
285
+ 10000
286
+ 12000
287
+ Cluster charge / e
288
+ 0
289
+ 1000
290
+ 2000
291
+ 3000
292
+ 4000
293
+ 5000
294
+ #
295
+ MPV charge: 2579 e
296
+ Data
297
+ (a) Cluster charge distribution for an epitaxial silicon chip.
298
+ 2000
299
+ 4000
300
+ 6000
301
+ 8000
302
+ 10000
303
+ 12000
304
+ Cluster charge / e
305
+ 0
306
+ 500
307
+ 1000
308
+ 1500
309
+ 2000
310
+ 2500
311
+ #
312
+ MPV charge: 3235 e
313
+ Data
314
+ (b) Cluster charge for a Czochralski silicon chip.
315
+ 1
316
+ 2
317
+ 3
318
+ 4
319
+ 5
320
+ 6
321
+ Cluster size
322
+ 0
323
+ 20000
324
+ 40000
325
+ 60000
326
+ 80000
327
+ 100000
328
+ #
329
+ Mean cluster size: 1.51
330
+ (c) Cluster size distribution for an epitaxial silicon chip.
331
+ 1
332
+ 2
333
+ 3
334
+ 4
335
+ 5
336
+ 6
337
+ Cluster size
338
+ 0
339
+ 5000
340
+ 10000
341
+ 15000
342
+ 20000
343
+ 25000
344
+ 30000
345
+ 35000
346
+ #
347
+ Mean cluster size: 1.95
348
+ (d) Cluster size distribution for a Czochralski silicon chip.
349
+ Figure 4: Cluster charge and size distributions for a chip with 30 µm epitaxial silicon (left) and 300 µm
350
+ Czochralski silicon (right) at −6 V bias voltage. The latter can be depleted further than 30 µm resulting in a
351
+ larger cluster charge and size. Both chips were operated at a threshold of 200 e−.
352
+ sample, the Cz sample has mainly clusters of size 2. The corresponding average cluster size is 1.55
353
+ and 1.95, respectively.
354
+ Taking the pointing resolution of the beam telescope into account, an
355
+ intrinsic spatial resolution of 8.6 µm could be achieved in a Czochralski silicon sample.
356
+ The hit detection efficiency was measured with a beam telescope with six Mimosa26 planes
357
+ and a FE-I4 time reference plane which are all connected to a trigger logic unit to synchronize
358
+ individual detector hits time-wise. The efficiency for all three modules is shown in fig. 5 where
359
+ the result for every pixel was mapped onto a two by two pixel cell to increase the statistics to see
360
+ possible effects or efficiency losses within a single pixel. All samples were running at a threshold of
361
+ about 200 e− and achieve a hit detection efficiency around 99.80 % with slight deviations within the
362
+ error (estimated < 0.1 %). There are no losses observable in the pixel corners or between pixels.
363
+ 4.2 AC-coupled pixel flavor
364
+ Another pixel variation with different analog front-end was tested as well to determine its
365
+ performance in a particle beam. In this design the (positive) bias voltage is applied via a diode on
366
+ the top side of the chip and connected to the charge collection n-well. To avoid breakdown of the
367
+ front-end electronics due to the high voltage (≤ 50 V) on that well, the input signal is AC coupled to
368
+ 5
369
+
370
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
371
+ Christian Bespin
372
+ 0
373
+ 10
374
+ 20
375
+ 30
376
+ 40
377
+ 50
378
+ 60
379
+ column [ m]
380
+ 0
381
+ 10
382
+ 20
383
+ 30
384
+ 40
385
+ 50
386
+ 60
387
+ row [ m]
388
+ Region 1 (Center): In-pixel efficiency
389
+ for DUT
390
+ 90.00
391
+ 91.25
392
+ 92.50
393
+ 93.75
394
+ 95.00
395
+ 96.25
396
+ 97.50
397
+ 98.75
398
+ 100.00
399
+ (a) (99.80 ± 0.10) % efficiency for a chip built on epitaxial
400
+ silicon with gap in n-layer modification from fig. 1a.
401
+ 0
402
+ 10
403
+ 20
404
+ 30
405
+ 40
406
+ 50
407
+ 60
408
+ column [ m]
409
+ 0
410
+ 10
411
+ 20
412
+ 30
413
+ 40
414
+ 50
415
+ 60
416
+ row [ m]
417
+ Region 1 (Center): In-pixel efficiency
418
+ for DUT
419
+ 90.00
420
+ 91.25
421
+ 92.50
422
+ 93.75
423
+ 95.00
424
+ 96.25
425
+ 97.50
426
+ 98.75
427
+ 100.00
428
+ (b) (99.79 ± 0.10) % efficiency for a chip built on Cz sili-
429
+ con with gap in n-layer modification from fig. 1a.
430
+ 0
431
+ 10
432
+ 20
433
+ 30
434
+ 40
435
+ 50
436
+ 60
437
+ column [ m]
438
+ 0
439
+ 10
440
+ 20
441
+ 30
442
+ 40
443
+ 50
444
+ 60
445
+ row [ m]
446
+ Region 1 (Center): In-pixel efficiency
447
+ for DUT
448
+ 90.00
449
+ 91.25
450
+ 92.50
451
+ 93.75
452
+ 95.00
453
+ 96.25
454
+ 97.50
455
+ 98.75
456
+ 100.00
457
+ (c) (99.85 ± 0.10) % efficiency for a chip built on epitaxial
458
+ silicon with additional p-well modification from fig. 1b.
459
+ Figure 5: Hit detection efficiencies for different substrate materials with different thicknesses and sensor
460
+ geometries. Results were mapped onto a 2 x 2 pixel array for higher statistics and in-pixel resolution. The
461
+ chips were operated with −6 V bias voltage and at a 200 e− threshold.
462
+ the amplifier. This approach can potentially deplete the substrate further due to the higher voltage
463
+ than what can be applied in the standard pixel design. The hit detection efficiency was measured
464
+ for different bias voltages and is shown in fig. 6. At 5 V the efficiency is already above 99 % and
465
+ reaches the same value as for the DC coupled pixel flavor of 99.85 % at or before 25 V bias voltage.
466
+ This is, taking the slightly higher threshold into account, in agreement with the expectation that
467
+ there should be no noticeable difference in hit detection efficiency before irradiation between the
468
+ two pixel flavors. The larger applicable bias voltage could prove superior after irradiation to achieve
469
+ more depletion and therefore higher charge signal.
470
+ 5.
471
+ Conclusion
472
+ In summary, the performance of TJ-Monopix2 shows a significant improvement in threshold
473
+ value and dispersion compared to TJ-Monopix1, although the former is higher than its design value
474
+ 6
475
+
476
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
477
+ Christian Bespin
478
+ 0
479
+ 10
480
+ 20
481
+ 30
482
+ 40
483
+ 50
484
+ 60
485
+ column [ m]
486
+ 0
487
+ 10
488
+ 20
489
+ 30
490
+ 40
491
+ 50
492
+ 60
493
+ row [ m]
494
+ Region 1 (HV CASC): In-pixel efficiency
495
+ for DUT
496
+ 90.00
497
+ 91.25
498
+ 92.50
499
+ 93.75
500
+ 95.00
501
+ 96.25
502
+ 97.50
503
+ 98.75
504
+ 100.00
505
+ (a) (99.21 ± 0.10) % efficiency of an AC-coupled pixel
506
+ flavor at 5 V bias voltage and 250 e− threshold.
507
+ 0
508
+ 10
509
+ 20
510
+ 30
511
+ 40
512
+ 50
513
+ 60
514
+ column [ m]
515
+ 0
516
+ 10
517
+ 20
518
+ 30
519
+ 40
520
+ 50
521
+ 60
522
+ row [ m]
523
+ Region 1 (HV CASC): In-pixel efficiency
524
+ for DUT
525
+ 90.00
526
+ 91.25
527
+ 92.50
528
+ 93.75
529
+ 95.00
530
+ 96.25
531
+ 97.50
532
+ 98.75
533
+ 100.00
534
+ (b) (99.85 ± 0.10) % efficiency of an AC-coupled pixel
535
+ flavor at 25 V bias voltage and 200 e− threshold.
536
+ Figure 6: Hit detection efficiency of an AC-coupled pixel flavor at (6a) 5 V and (6b) 25 V bias voltage.
537
+ (120 e−). With the measured amount of charge in the sensor the threshold is still small enough
538
+ to detect a majority of hits even from large clusters before irradiation. The large signal compared
539
+ to the small pixel leads to a large cluster size and therefore high spatial resolution, where chips
540
+ on Czochralski substrate perform slightly better due to the larger sensor volume. For the tested
541
+ sensor materials with different thicknesses and sensor geometries, the hit detection efficiency is
542
+ around 99.8 % or better in all cases. The modified front-end version with bias applied on the charge
543
+ collection node achieves similar values for the hit detection efficiency while providing a larger
544
+ headroom in bias voltage to achieve efficient performance after radiation damage. The results for
545
+ irradiated modules will be presented in a forthcoming publication.
546
+ Acknowledgments
547
+ This project has received funding from the Deutsche Forschungsgemeinschaft DFG (grant WE
548
+ 976/4-1), the German Federal Ministry of Education and Research BMBF (grant 05H15PDCA9),
549
+ and the European Union’s Horizon 2020 research and innovation program under grant agreements
550
+ no. 675587 (Maria Sklodowska-Curie ITN STREAM), 654168 (AIDA-2020), and 101004761
551
+ (AIDAinnova). The measurements leading to these results have been performed at the Test Beam
552
+ Facility at DESY Hamburg (Germany), a member of the Helmholtz Association (HGF).
553
+ References
554
+ [1] I. Perić, A novel monolithic pixelated particle detector implemented in high-voltage CMOS
555
+ technology, Nuclear Instruments and Methods in Physics Research Section A: Accelerators,
556
+ Spectrometers, Detectors and Associated Equipment 582 (2007) 876.
557
+ [2] K. Moustakas, M. Barbero, I. Berdalovic, C. Bespin, P. Breugnon, I. Caicedo et al., CMOS
558
+ monolithic pixel sensors based on the column-drain architecture for the HL-LHC upgrade,
559
+ 7
560
+
561
+ TJ-Monopix2: DMAPS in 180 nm CMOS technology
562
+ Christian Bespin
563
+ Nuclear Instruments and Methods in Physics Research Section A: Accelerators,
564
+ Spectrometers, Detectors and Associated Equipment 936 (2019) 604.
565
+ [3] Konstantinos Moustakas, Design and Development of Depleted Monolithic Active Pixel
566
+ Sensors with Small Collection Electrode for High-Radiation Applications, Ph.D. thesis,
567
+ Rheinische Friedrich-Wilhelms-Universität Bonn, Sept., 2021.
568
+ [4] M. Mager, ALPIDE, the Monolithic Active Pixel Sensor for the ALICE ITS upgrade, Nuclear
569
+ Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers,
570
+ Detectors and Associated Equipment 824 (2016) 434 .
571
+ [5] I. Caicedo, M. Barbero, P. Barrillon, I. Berdalovic, S. Bhat, C. Bespin et al., The Monopix
572
+ chips: depleted monolithic active pixel sensors with a column-drain read-out architecture for
573
+ the ATLAS Inner Tracker upgrade, Journal of Instrumentation 14 (2019) C06006.
574
+ [6] C. Bespin, M. Barbero, P. Barrillon, I. Berdalovic, S. Bhat, P. Breugnon et al., DMAPS
575
+ Monopix developments in large and small electrode designs, Nuclear Instruments and
576
+ Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and
577
+ Associated Equipment 978 (2020) 164460.
578
+ [7] M. Dyndal, V. Dao, P. Allport, I.A. Tortajada, M. Barbero, S. Bhat et al., Mini-MALTA:
579
+ radiation hard pixel designs for small-electrode monolithic CMOS sensors for the High
580
+ Luminosity LHC, Journal of Instrumentation 15 (2020) P02005.
581
+ [8] C. Bespin, I. Berdalovic, I. Caicedo, R. Cardella, J. Dingfelder, L. Flores et al., Development
582
+ and characterization of a DMAPS chip in TowerJazz 180 nm technology for high radiation
583
+ environments, Nuclear Instruments and Methods in Physics Research Section A:
584
+ Accelerators, Spectrometers, Detectors and Associated Equipment 1040 (2022) 167189.
585
+ [9] I. Perić, L. Blanquart, G. Comes, P. Denes, K. Einsweiler, P. Fischer et al., The FEI3 readout
586
+ chip for the ATLAS pixel detector, Nuclear Instruments and Methods in Physics Research
587
+ Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 565 (2006)
588
+ 178.
589
+ [10] RD53 collaboration, RD53B Manual, Tech. Rep. CERN-RD53-PUB-19-002, CERN, Geneva
590
+ (Mar, 2019).
591
+ [11] R. Diener, J. Dreyling-Eschweiler, H. Ehrlichmann, I. Gregor, U. Kötz, U. Krämer et al., The
592
+ DESY II test beam facility, Nuclear Instruments and Methods in Physics Research Section A:
593
+ Accelerators, Spectrometers, Detectors and Associated Equipment 922 (2019) 265.
594
+ 8
595
+
CtFRT4oBgHgl3EQfwTji/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf,len=275
2
+ page_content='Charge collection and efficiency measurements of the TJ-Monopix2 DMAPS in 180 nm CMOS technology Christian Bespin,𝑎,∗ Ivan Caicedo,𝑎 Jochen Dingfelder,𝑎 Tomasz Hemperek,𝑎,𝑒 Toko Hirono,𝑎,𝑏 Fabian Hügging,𝑎 Hans Krüger,𝑎 Konstantinos Moustakas,𝑎,𝑐 Heinz Pernegger,𝑑 Petra Riedler,𝑑 Lars Schall,𝑎 Walter Snoeys𝑑 and Norbert Wermes𝑎 𝑎Physikalisches Institut, Universität Bonn, Nußallee 12, Bonn, Germany 𝑏Deutsches Elektronen-Synchrotron (DESY) Notkestaße.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
3
+ page_content=' 85, Hamburg, Germany 𝑐Paul Scherrer Institut, Forschungsstrasse 111, Villingen, Switzerland 𝑑CERN Espl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
4
+ page_content=' des Particules 1, Meyrin, Switzerland 𝑒DECTRIS AG Täfernweg 1, Baden-Dättwil, Switzerland E-mail: bespin@physik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
5
+ page_content='uni-bonn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
6
+ page_content='de Monolithic CMOS pixel detectors have emerged as competitive contenders in the field of high- energy particle physics detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
7
+ page_content=' The use of commercial processes offers high-volume production of such detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
8
+ page_content=' A series of prototypes has been designed in a 180 nm Tower CMOS process with depletion of the sensor material and a column-drain readout architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
9
+ page_content=' The latest iteration, TJ- Monopix2, features a large 2 × 2 cm2 matrix consisting of 512 × 512 pixels with 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
10
+ page_content='04 µm pitch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
11
+ page_content=' A small collection electrode design aims at low power consumption and low noise while the radiation tolerance for high-energy particle detector applications needs extra attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
12
+ page_content=' With a goal to reach radiation tolerance to levels of NIEL damage of 1 × 1015 1 MeV neq/cm2, a modification of the standard process has been implemented by adding a low-dosed n-type silicon implant across the pixel in order to allow for homogeneous depletion of the sensor volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
13
+ page_content=' Recent lab measurements and beam tests were conducted for unirradiated modules to study electrical characteristics and hit detection efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
14
+ page_content=' 10th International Workshop on Semiconductor Pixel Detectors for Particles and Imaging (Pixel2022) 12-16 December 2022 Santa Fe, New Mexico, USA ∗Speaker © Copyright owned by the author(s) under the terms of the Creative Commons Attribution-NonCommercial-NoDerivatives 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
15
+ page_content='0 International License (CC BY-NC-ND 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
16
+ page_content='0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
17
+ page_content=' https://pos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
18
+ page_content='sissa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
19
+ page_content='it/ arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
20
+ page_content='13638v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
21
+ page_content='ins-det] 31 Jan 2023 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
22
+ page_content=' Introduction In recent years, advances in CMOS technologies have fueled the development of a new gener- ation of monolithic active pixel sensors (MAPS) with fast readout and high radiation tolerance by depleting the charge sensitive volume [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
23
+ page_content=' These depleted MAPS (DMAPS) devices are therefore an interesting candidate for high-energy particle physics experiments with high radiation environments and high particle rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
24
+ page_content=' Depletion is achieved by either using high-voltage add-ons in the CMOS technology and/or high resistivity substrates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
25
+ page_content=' The increasing availability of these features in com- mercial CMOS processes could combine the features of the detector concept with possibly faster and cheaper production than common hybrid pixel detectors for the mentioned purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
26
+ page_content=' The idea behind and measurements results from one of multiple DMAPS prototypes, TJ-Monopix2 [2, 3], will be presented in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
27
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
28
+ page_content=' Design of TJ-Monopix2 TJ-Monopix2 is the latest DMAPS prototype from the TJ-Monopix development line which is based on the ALPIDE pixel detector developed for the ALICE ITS upgrade [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
29
+ page_content=' It is fabricated in the same 180 nm commercial CMOS process provided by Tower Semiconductor1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
30
+ page_content=' A modification of the process used for ALPIDE has been implemented to increase the radiation tolerance to levels ≥ 1 × 1015 neq cm−2 by adding a low dose n-type implant for homogeneous growth of the depletion zone with applied bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
31
+ page_content=' In measurements on first prototypes with this modification, a drop in hit detection efficiency was observed after irradiation [5, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
32
+ page_content=' This could be improved significantly by adding a gap in the n-type blanket or a deep p-type implant in the pixel corners to shape the electrical field towards the collection electrode [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
33
+ page_content=' The cross-sections of these two sensor designs is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
34
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
35
+ page_content=' Additionally, chips have been produced on Czochralski P+ SUBSTRATE P- EPITAXIAL LAYER COLLECTION N-WELL LOW DOSE N-TYPE IMPLANT DEEP PWELL PWELL PWELL NWELL DEEP PWELL PWELL PWELL NWELL (a) Modification with gap in low dose n-type implant be- low pixel electronics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
36
+ page_content=' P+ SUBSTRATE P- EPITAXIAL LAYER COLLECTION N-WELL LOW DOSE N-TYPE IMPLANT DEEP PWELL PWELL PWELL NWELL DEEP PWELL PWELL PWELL NWELL EXTRA DEEP PWELL EXTRA DEEP PWELL (b) Modification with continuous n-type implant and deep p-type implant below pixel electronics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
37
+ page_content=' Figure 1: Cross-section variants of modified sensor process for TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
38
+ page_content=' silicon to increase the available depletable volume compared to the thickness of the epitaxial layer (O(10 µm)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
39
+ page_content=' Measurements on Czochralski silicon chips in TJ-Monopix1 showed a further increase in hit detection efficiency after irradiation [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
40
+ page_content=' TJ-Monopix2 follows a small collection electrode approach with a pixel capacitance of about 3 fF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
41
+ page_content=' The pixels of size 33 × 33 µm2 are read out using an established synchronous column- drain technique from the FE-I3 readout chip [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
42
+ page_content=' Further changes from the predecessor TJ-Monopix1 1https://towersemi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
43
+ page_content='com 2 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin include an improved front-end design, a new pixel masking scheme and a 3-bit DAC for local threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
44
+ page_content=' With these changes the threshold is expected to be reduced by a factor of 3 while improving the threshold dispersion and noise behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
45
+ page_content=' The digital periphery contains logic for register configuration, data handling and LVDS output drivers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
46
+ page_content=' Slow control is done via a command protocol and decoder that was taken from the RD53B readout chip [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
47
+ page_content=' Both pixel and register data is 8b10b encoded in a frame-based data stream which allows operating the chip with four differential data lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
48
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
49
+ page_content=' Injection-based threshold and noise measurements Initial tests have been performed in a laboratory setup to measure the threshold and noise performance of TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
50
+ page_content=' All of these values are extracted from injecting different amounts of charge into the pixel a given number of times and recording the amount of registered hits 𝑛hits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
51
+ page_content=' The response function is a smeared step function of the form 𝑛hits(𝑞) = 1 2 · 𝑛injections · � erf �𝑞 − 𝑞thr 𝜎 √ 2 � + 1 � (1) with 𝑞 the injected charge amount, 𝑛injections the number of consecutive injections of 𝑞 and 𝑞thr the charge at the threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
52
+ page_content=' 𝜎 denotes the gaussian smearing of the step function which is defined as the electronic noise of the pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
53
+ page_content=' The threshold is defined as the charge at which 50 % of the injected hits are recorded from the pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
54
+ page_content=' Histogramming the individual thresholds from each pixel leads to the distribution shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
55
+ page_content=' 2a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
56
+ page_content=' The distribution has a mean value of 164 e− and a width 50 100 150 200 250 300 Threshold / e 0 250 500 750 1000 1250 1500 1750 # of pixels Fit results: = 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
57
+ page_content='3 e = 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
58
+ page_content='0 e Threshold distribution for enabled pixels 1 2 3 4 5 6 7 TDAC (a) Threshold distribution before in-pixel threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
59
+ page_content=' 50 100 150 200 250 300 Threshold / e 0 2000 4000 6000 8000 # of pixels Fit results: = 163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
60
+ page_content='2 e = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
61
+ page_content='7 e Threshold distribution for enabled pixels 1 2 3 4 5 6 7 TDAC (b) Threshold distribution after in-pixel threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
62
+ page_content=' Figure 2: Threshold distribution of TJ-Monopix2 before and after adjusting the in-pixel threshold DAC to lower the dispersion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
63
+ page_content=' Color-coded is the value of the DAC setting for every pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
64
+ page_content=' of 13 e− which is defined as the threshold dispersion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
65
+ page_content=' By adjusting the threshold DAC in each pixel in order to even out the deviations from the target threshold, the threshold dispersion can be reduced significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
66
+ page_content=' The resulting distribution after this so-called tuning process is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
67
+ page_content=' 2b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
68
+ page_content=' While the mean threshold stays basically the same, the dispersion could be reduced by a factor of almost 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
69
+ page_content=' Both the mean threshold and threshold dispersion are significantly lower than in TJ-Monopix1, where losses in hit detection efficiency could be observed due to too large thresholds [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
70
+ page_content=' 3 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 5 10 15 20 25 30 35 ENC / e 0 100 200 300 400 500 600 700 800 # of pixels Fit results: = 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
71
+ page_content='8 e = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
72
+ page_content='5 e Noise distribution for enabled pixels (a) Noise distribution of TJ-Monopix1 with noticeable tail towards larger values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
73
+ page_content=' 2 4 6 8 10 ENC / e 0 200 400 600 800 1000 1200 1400 # of pixels Fit results: = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
74
+ page_content='6 e = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
75
+ page_content='6 e Noise distribution for enabled pixels (b) Noise distribution of TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
76
+ page_content=' There is no observ- able tail and lower noise overall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
77
+ page_content=' Figure 3: Noise distribution of TJ-Monopix1 (left) and TJ-Monopix2 (right) for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
78
+ page_content=' The corresponding histogram of the electronic noise is depicted in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
79
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
80
+ page_content=' As a comparison, the distribution from the predecessor TJ-Monopix1 is included, where a large tail towards higher values was observed that led to a high operational threshold in order to limit the amount of noisy pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
81
+ page_content=' It can be seen, that this tail is largely removed with slight changes to the analog front-end, which in turn lowers the threshold for a regular operation of the chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
82
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
83
+ page_content=' Hit detection efficiency measurements Two different pixel variations were investigated regarding their hit detection efficiency, that will be presented in the following – a DC-coupled, more standard design which makes up most part of the matrix and an AC-coupled investigative design realized in only a few columns of the matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
84
+ page_content=' While the former was measured in more detail, some first results of the latter are included as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
85
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
86
+ page_content='1 Standard DC-coupled pixel flavor First measurements to determine the hit detection efficiency have been performed in a 5 GeV electron beam at the DESY II testbeam facility at DESY, Hamburg [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
87
+ page_content=' Three unirradiated modules were tested with different sensor geometries: two chips with 30 µm thick epitaxial silicon and the two geometries depicted in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
88
+ page_content=' 1 as well as one chip built on 300 µm Czochralski silicon with a gap in the low dose n-type implant (see fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
89
+ page_content=' 1a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
90
+ page_content=' It should be noted that the different substrate materials offer different sensor thicknesses and therefore charge-sensitive volume depending on the depletion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
91
+ page_content=' The measurements are not targeting a comparison between different types of silicon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
92
+ page_content=' Figures 4a and 4b show the recorded cluster charge for a chip with epitaxial layer and with Czochralski substrate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
93
+ page_content=' It can be observed that the collected charge is about 25 % larger in the Cz sample, because the depletion depth is only limited by the thickness of the sensor (300 µm) which is by far not fully depleted, but more depleted than the 30 µm thick epitaxial layer in the other chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
94
+ page_content=' The average cluster size is significantly larger in the Cz sample as well which results in a high spatial resolution due to charge-weighted clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
95
+ page_content=' The cluster size distributions for the same samples as above are depicted in figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
96
+ page_content=' 4c and 4d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
97
+ page_content=' While cluster size 1 is predominant in the epitaxial 4 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 2000 4000 6000 8000 10000 12000 Cluster charge / e 0 1000 2000 3000 4000 5000 # MPV charge: 2579 e Data (a) Cluster charge distribution for an epitaxial silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
98
+ page_content=' 2000 4000 6000 8000 10000 12000 Cluster charge / e 0 500 1000 1500 2000 2500 # MPV charge: 3235 e Data (b) Cluster charge for a Czochralski silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
99
+ page_content=' 1 2 3 4 5 6 Cluster size 0 20000 40000 60000 80000 100000 # Mean cluster size: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
100
+ page_content='51 (c) Cluster size distribution for an epitaxial silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
101
+ page_content=' 1 2 3 4 5 6 Cluster size 0 5000 10000 15000 20000 25000 30000 35000 # Mean cluster size: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
102
+ page_content='95 (d) Cluster size distribution for a Czochralski silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
103
+ page_content=' Figure 4: Cluster charge and size distributions for a chip with 30 µm epitaxial silicon (left) and 300 µm Czochralski silicon (right) at −6 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
104
+ page_content=' The latter can be depleted further than 30 µm resulting in a larger cluster charge and size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
105
+ page_content=' Both chips were operated at a threshold of 200 e−.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
106
+ page_content=' sample, the Cz sample has mainly clusters of size 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
107
+ page_content=' The corresponding average cluster size is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
108
+ page_content='55 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
109
+ page_content='95, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
110
+ page_content=' Taking the pointing resolution of the beam telescope into account, an intrinsic spatial resolution of 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
111
+ page_content='6 µm could be achieved in a Czochralski silicon sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
112
+ page_content=' The hit detection efficiency was measured with a beam telescope with six Mimosa26 planes and a FE-I4 time reference plane which are all connected to a trigger logic unit to synchronize individual detector hits time-wise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
113
+ page_content=' The efficiency for all three modules is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
114
+ page_content=' 5 where the result for every pixel was mapped onto a two by two pixel cell to increase the statistics to see possible effects or efficiency losses within a single pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
115
+ page_content=' All samples were running at a threshold of about 200 e− and achieve a hit detection efficiency around 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
116
+ page_content='80 % with slight deviations within the error (estimated < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
117
+ page_content='1 %).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
118
+ page_content=' There are no losses observable in the pixel corners or between pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
119
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
120
+ page_content='2 AC-coupled pixel flavor Another pixel variation with different analog front-end was tested as well to determine its performance in a particle beam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
121
+ page_content=' In this design the (positive) bias voltage is applied via a diode on the top side of the chip and connected to the charge collection n-well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
122
+ page_content=' To avoid breakdown of the front-end electronics due to the high voltage (≤ 50 V) on that well, the input signal is AC coupled to 5 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
123
+ page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
124
+ page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
125
+ page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
126
+ page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
127
+ page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
128
+ page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
129
+ page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
130
+ page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
131
+ page_content='00 (a) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
132
+ page_content='80 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
133
+ page_content='10) % efficiency for a chip built on epitaxial silicon with gap in n-layer modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
134
+ page_content=' 1a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
135
+ page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
136
+ page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
137
+ page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
138
+ page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
139
+ page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
140
+ page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
141
+ page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
142
+ page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
143
+ page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
144
+ page_content='00 (b) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
145
+ page_content='79 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
146
+ page_content='10) % efficiency for a chip built on Cz sili- con with gap in n-layer modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
147
+ page_content=' 1a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
148
+ page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
149
+ page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
150
+ page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
151
+ page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
152
+ page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
153
+ page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
154
+ page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
155
+ page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
156
+ page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
157
+ page_content='00 (c) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
158
+ page_content='85 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
159
+ page_content='10) % efficiency for a chip built on epitaxial silicon with additional p-well modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
160
+ page_content=' 1b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
161
+ page_content=' Figure 5: Hit detection efficiencies for different substrate materials with different thicknesses and sensor geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
162
+ page_content=' Results were mapped onto a 2 x 2 pixel array for higher statistics and in-pixel resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
163
+ page_content=' The chips were operated with −6 V bias voltage and at a 200 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
164
+ page_content=' the amplifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
165
+ page_content=' This approach can potentially deplete the substrate further due to the higher voltage than what can be applied in the standard pixel design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
166
+ page_content=' The hit detection efficiency was measured for different bias voltages and is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
167
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
168
+ page_content=' At 5 V the efficiency is already above 99 % and reaches the same value as for the DC coupled pixel flavor of 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
169
+ page_content='85 % at or before 25 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
170
+ page_content=' This is, taking the slightly higher threshold into account, in agreement with the expectation that there should be no noticeable difference in hit detection efficiency before irradiation between the two pixel flavors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
171
+ page_content=' The larger applicable bias voltage could prove superior after irradiation to achieve more depletion and therefore higher charge signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
172
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
173
+ page_content=' Conclusion In summary, the performance of TJ-Monopix2 shows a significant improvement in threshold value and dispersion compared to TJ-Monopix1, although the former is higher than its design value 6 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (HV CASC): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
174
+ page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
175
+ page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
176
+ page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
177
+ page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
178
+ page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
179
+ page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
180
+ page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
181
+ page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
182
+ page_content='00 (a) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
183
+ page_content='21 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
184
+ page_content='10) % efficiency of an AC-coupled pixel flavor at 5 V bias voltage and 250 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
185
+ page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (HV CASC): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
186
+ page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
187
+ page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
188
+ page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
189
+ page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
190
+ page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
191
+ page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
192
+ page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
193
+ page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
194
+ page_content='00 (b) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
195
+ page_content='85 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
196
+ page_content='10) % efficiency of an AC-coupled pixel flavor at 25 V bias voltage and 200 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
197
+ page_content=' Figure 6: Hit detection efficiency of an AC-coupled pixel flavor at (6a) 5 V and (6b) 25 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
198
+ page_content=' (120 e−).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
199
+ page_content=' With the measured amount of charge in the sensor the threshold is still small enough to detect a majority of hits even from large clusters before irradiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
200
+ page_content=' The large signal compared to the small pixel leads to a large cluster size and therefore high spatial resolution, where chips on Czochralski substrate perform slightly better due to the larger sensor volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
201
+ page_content=' For the tested sensor materials with different thicknesses and sensor geometries, the hit detection efficiency is around 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
202
+ page_content='8 % or better in all cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
203
+ page_content=' The modified front-end version with bias applied on the charge collection node achieves similar values for the hit detection efficiency while providing a larger headroom in bias voltage to achieve efficient performance after radiation damage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
204
+ page_content=' The results for irradiated modules will be presented in a forthcoming publication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
205
+ page_content=' Acknowledgments This project has received funding from the Deutsche Forschungsgemeinschaft DFG (grant WE 976/4-1), the German Federal Ministry of Education and Research BMBF (grant 05H15PDCA9), and the European Union’s Horizon 2020 research and innovation program under grant agreements no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
206
+ page_content=' 675587 (Maria Sklodowska-Curie ITN STREAM), 654168 (AIDA-2020), and 101004761 (AIDAinnova).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
207
+ page_content=' The measurements leading to these results have been performed at the Test Beam Facility at DESY Hamburg (Germany), a member of the Helmholtz Association (HGF).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
208
+ page_content=' References [1] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
209
+ page_content=' Perić, A novel monolithic pixelated particle detector implemented in high-voltage CMOS technology, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 582 (2007) 876.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
210
+ page_content=' [2] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
211
+ page_content=' Moustakas, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
212
+ page_content=' Barbero, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
213
+ page_content=' Berdalovic, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
214
+ page_content=' Bespin, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
215
+ page_content=' Breugnon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
216
+ page_content=' Caicedo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
217
+ page_content=', CMOS monolithic pixel sensors based on the column-drain architecture for the HL-LHC upgrade, 7 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 936 (2019) 604.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
218
+ page_content=' [3] Konstantinos Moustakas, Design and Development of Depleted Monolithic Active Pixel Sensors with Small Collection Electrode for High-Radiation Applications, Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
219
+ page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
220
+ page_content=' thesis, Rheinische Friedrich-Wilhelms-Universität Bonn, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
221
+ page_content=', 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
222
+ page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
223
+ page_content=' Mager, ALPIDE, the Monolithic Active Pixel Sensor for the ALICE ITS upgrade, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 824 (2016) 434 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
224
+ page_content=' [5] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
225
+ page_content=' Caicedo, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
226
+ page_content=' Barbero, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
227
+ page_content=' Barrillon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
228
+ page_content=' Berdalovic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
229
+ page_content=' Bhat, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
230
+ page_content=' Bespin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
231
+ page_content=', The Monopix chips: depleted monolithic active pixel sensors with a column-drain read-out architecture for the ATLAS Inner Tracker upgrade, Journal of Instrumentation 14 (2019) C06006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
232
+ page_content=' [6] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
233
+ page_content=' Bespin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
234
+ page_content=' Barbero, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
235
+ page_content=' Barrillon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
236
+ page_content=' Berdalovic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
237
+ page_content=' Bhat, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
238
+ page_content=' Breugnon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
239
+ page_content=', DMAPS Monopix developments in large and small electrode designs, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 978 (2020) 164460.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
240
+ page_content=' [7] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
241
+ page_content=' Dyndal, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
242
+ page_content=' Dao, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
243
+ page_content=' Allport, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
244
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
245
+ page_content=' Tortajada, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
246
+ page_content=' Barbero, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
247
+ page_content=' Bhat et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
248
+ page_content=', Mini-MALTA: radiation hard pixel designs for small-electrode monolithic CMOS sensors for the High Luminosity LHC, Journal of Instrumentation 15 (2020) P02005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
249
+ page_content=' [8] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
250
+ page_content=' Bespin, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
251
+ page_content=' Berdalovic, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
252
+ page_content=' Caicedo, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
253
+ page_content=' Cardella, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
254
+ page_content=' Dingfelder, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
255
+ page_content=' Flores et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
256
+ page_content=', Development and characterization of a DMAPS chip in TowerJazz 180 nm technology for high radiation environments, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 1040 (2022) 167189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
257
+ page_content=' [9] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
258
+ page_content=' Perić, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
259
+ page_content=' Blanquart, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
260
+ page_content=' Comes, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
261
+ page_content=' Denes, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
262
+ page_content=' Einsweiler, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
263
+ page_content=' Fischer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
264
+ page_content=', The FEI3 readout chip for the ATLAS pixel detector, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 565 (2006) 178.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
265
+ page_content=' [10] RD53 collaboration, RD53B Manual, Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
266
+ page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
267
+ page_content=' CERN-RD53-PUB-19-002, CERN, Geneva (Mar, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
268
+ page_content=' [11] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
269
+ page_content=' Diener, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
270
+ page_content=' Dreyling-Eschweiler, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
271
+ page_content=' Ehrlichmann, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
272
+ page_content=' Gregor, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
273
+ page_content=' Kötz, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
274
+ page_content=' Krämer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
275
+ page_content=', The DESY II test beam facility, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 922 (2019) 265.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
276
+ page_content=' 8' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'}
DNE0T4oBgHgl3EQfygIs/content/tmp_files/2301.02659v1.pdf.txt ADDED
@@ -0,0 +1,1969 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Bayesian Modelling
2
+ of
3
+ Visual Discrimination Learning in Mice
4
+ Pouya Baniasadi, PhD
5
+ Department of Physiology, Development and Neuroscience
6
+ UNIVERSITY OF CAMBRIDGE
7
+ August 2020
8
+ This project report is written in partial fulfilment of the requirement for the
9
+ Master of Philosophy in Basic and Translational Neuroscience
10
+ Supervised by
11
+ Dr. Jasper Poort
12
+ Prof. Máté Lengyel
13
+ Selective Vision Laboratory
14
+ Computational and Biological
15
+ Learning Laboratory
16
+ Department of Psychology
17
+ Department of Engineering
18
+ arXiv:2301.02659v1 [q-bio.NC] 15 Nov 2022
19
+
20
+ *
21
+ 入Dedication
22
+ For my parents Mahin and Ghasem, who taught me about pursuing dreams,
23
+ for their endless love, support and sacrifices
24
+ i
25
+
26
+ Declaration
27
+ This report describes work carried out at Cambridge University from Jan 2020 to Jul 2020
28
+ under the supervision of Dr Jasper Poort (Selective Vision Laboratory at the Department
29
+ of Psychology) and Prof. Máté Lengyel (Computational and Biological Learning Lab at
30
+ the Department of Engineering) as a part of the MPhil program in Basic and Translational
31
+ Neuroscience. I confirm that the material in this report is not copied from any published
32
+ material, nor is it a paraphrase or abstract of any published material unless it is identified
33
+ as such and a full source reference is given. I confirm that, other than where indicated
34
+ above, this document is my own work.
35
+ Pouya Baniasadi
36
+ August 2020
37
+ ii
38
+
39
+ Abstract
40
+ The brain constantly turns large flows of sensory information into selective representations
41
+ of the environment. It, therefore, needs to learn to process those sensory inputs that
42
+ are most relevant for behaviour. It is not well understood how learning changes neural
43
+ circuits in visual and decision-making brain areas to adjust and improve its visually guided
44
+ decision-making. To address this question, head-fixed mice were trained to move through
45
+ virtual reality environments and learn visual discrimination while neural activity was
46
+ recorded with two-photon calcium imaging. Previously, descriptive models of neuronal
47
+ activity were fitted to the data, which was used to compare the activity of excitatory and
48
+ different inhibitory cell types. However, the previous models did not take the internal
49
+ representations and learning dynamics into account. Here, I present a framework to infer
50
+ a model of internal representations that are used to generate the behaviour during the
51
+ task. We model the learning process from untrained mice to trained mice within the
52
+ normative framework of the ideal Bayesian observer and provide a Markov model for
53
+ generating the movement and licking. The framework provides a space of models where
54
+ a range of hypotheses about the internal representations could be compared for a given
55
+ data set.
56
+ iii
57
+
58
+ Contents
59
+ Contents
60
+ Declaration
61
+ ii
62
+ Abstract
63
+ iii
64
+ 1
65
+ Introduction
66
+ 1
67
+ 1.1
68
+ Mathematical preliminaries
69
+ . . . . . . . . . . . . . . . . . . . . . . . . .
70
+ 2
71
+ 2
72
+ The experiment
73
+ 6
74
+ 2.1
75
+ Experimental setup . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
76
+ 6
77
+ 2.2
78
+ Behavioral data and observations . . . . . . . . . . . . . . . . . . . . . .
79
+ 8
80
+ 3
81
+ Behavioral model part 1: internal representations
82
+ 13
83
+ 3.1
84
+ Structure of spatial states
85
+ . . . . . . . . . . . . . . . . . . . . . . . . . .
86
+ 14
87
+ 3.2
88
+ Space of models for spatial states . . . . . . . . . . . . . . . . . . . . . .
89
+ 16
90
+ 3.3
91
+ Bayesian learning model . . . . . . . . . . . . . . . . . . . . . . . . . . .
92
+ 18
93
+ 3.3.1
94
+ Learning reward probability within a state . . . . . . . . . . . . .
95
+ 19
96
+ 3.3.2
97
+ Learning state transitions
98
+ . . . . . . . . . . . . . . . . . . . . . .
99
+ 21
100
+ 4
101
+ Behavioral model part 2: the generative model
102
+ 25
103
+ 4.1
104
+ Spatial state parameter ˜λk: licking rate . . . . . . . . . . . . . . . . . . .
105
+ 26
106
+ 4.2
107
+ Parameter ˜νk: target speed within the current spatial state
108
+ . . . . . . .
109
+ 29
110
+ 4.3
111
+ Generative model of licking and speed
112
+ . . . . . . . . . . . . . . . . . . .
113
+ 30
114
+ 4.4
115
+ Estimation of model parameters . . . . . . . . . . . . . . . . . . . . . . .
116
+ 33
117
+ 5
118
+ Discussion
119
+ 34
120
+ 5.1
121
+ Limitations
122
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
123
+ 34
124
+ 5.2
125
+ Implications . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
126
+ 35
127
+ Bibliography
128
+ 37
129
+ iv
130
+
131
+ Chapter 1
132
+ Introduction
133
+ Learning modifies neural representations of behaviourally relevant information. While
134
+ changes in response selectivity to behaviourally relevant stimuli have been observed in
135
+ many studies across different species (Yang & Maunsell 2004, Yan et al. 2014, Poort
136
+ et al. 2015). There has been growing evidence that different cell types, classified using
137
+ molecular and cellular properties (Kepecs & Fishell 2014), have specific roles in learning
138
+ (Khan et al. 2018, Fishell & Kepecs 2019). However, the nature of these changes and
139
+ how they relate to sensory coding is not well understood (Yap & Greenberg 2018).
140
+ Probabilistic models of behavioural learning are an important approach to link the
141
+ changes in neural representations to internal representation of the environment and
142
+ decision-making (Fiser et al. 2010, Berkes et al. 2011, Heeger 2017). Given the non-
143
+ deterministic nature of events in the real world, human and animal learning must involve
144
+ at least some internal representations of the uncertainties in the environment (Barlow
145
+ et al. 1961). There has been an extensive body of research on how the nervous system
146
+ represents uncertainly about the environment (Pouget et al. 2003, Beck et al. 2008, Fiser
147
+ et al. 2010, Kriegeskorte & Douglas 2018).
148
+ Bayesian learning theory provides a normative framework of learning that represents
149
+ uncertainty in probabilistic outcomes(Bishop 2006).
150
+ In particular, the ideal observer
151
+ analysis uses the Bayesian learning theory for achieving optimal learning performance in
152
+ a given task (Geisler 2003, 2011). Learning can be conceptualised as the incorporation
153
+ of sensory information to update and improve performance on a given task. the ideal
154
+ observer performs at the theoretical limits of information processing to update their
155
+ 1
156
+
157
+ 1.1.
158
+ Mathematical preliminaries
159
+ beliefs. However, it is important to note that optimality in this context refers to the
160
+ optimal incorporation of information, which is not equivalent to achieving the optimal
161
+ solution in all trials. While the nervous system may or may not have representations
162
+ similar to an ideal observer, the ideal observer analysis provides a systematic framework to
163
+ formulate hypotheses about the internal representations and learning dynamics (Maloney
164
+ & Mamassian 2009, Orbán et al. 2008).
165
+ In this thesis, we describe a Bayesian learning model using the framework of ideal observer
166
+ learning. Our goal is to develop a model of internal representations of reward and space
167
+ that are used for learning and adjusting behaviour in the visual discrimination task. This
168
+ model will allow us in future work to relate the neuronal activity measurements (Poort
169
+ et al. 2015, Khan et al. 2018) to the internal representations that guide behaviour. We
170
+ continue this chapter with a brief overview of the basic mathematical ideas used to develop
171
+ the model. Then, in Chapter 2, we explain the experimental setup and describe the
172
+ behavioural data. A space of models (for the structure of Markov models) is introduced
173
+ in Chapter 3 which defines the internal representations of reward and state transitions.
174
+ Then, a Bayesian model of learning reward probabilities and state transitions is described
175
+ that uses the ideal observer framework. In Chapter 4, we introduce a generative Markov
176
+ model that uses internal representations to generate behaviour. We also discuss the use
177
+ of maximum likelihood estimation to estimate the model parameters. Finally, in Chapter
178
+ 5, we discuss the potential applications and limitations of the model and set out a path
179
+ for the continuation of the research.
180
+ 1.1
181
+ Mathematical preliminaries
182
+ In this section, I briefly introduce the concepts that provide the mathematical foundation
183
+ of the Behavioral model.
184
+ Markov chain model
185
+ A system has the Markov property if the predictions about future events only require the
186
+ knowledge of the system’s present state. In other words, given the present state of the
187
+ system, future events are conditionally independent of past events. A Markov chain is a
188
+ stochastic model of a sequence of events with the Markov property.
189
+ Let S = s1, s2, ..., sr be a set of states for a Markov chain. The process starts in one of
190
+ these states and moves sequentially from one state to another. Each move is called a step.
191
+ Let Xn be the current step. We denote by pij = P(Xn+1 = sj|Xn = si), the transition
192
+ 2
193
+
194
+ 1.1.
195
+ Mathematical preliminaries
196
+ probability of visiting state sj after visiting si. Note that by Markov property, given Xn,
197
+ Xn+1 is conditionally independent of the past states. A transition from si to sj can be
198
+ represented as a directed edge (si, sj) with a corresponding transition probability pij. The
199
+ sum of transition probabilities of the outgoing edges from a state should add up to 1.
200
+ Figure 1.1 illustrates a Markov chain with 4 states and transition probabilities.
201
+ Figure 1.1: Movement in a corridor simulated in the VR environment.
202
+ Let T = [pij] be the transition probability matrix for the Markov chain and let u be
203
+ the probability vector which represents the starting distribution (i.e., Xk ∼ u). Then
204
+ the probability that the chain is in state si after m steps is the i-th entry in the vector
205
+ u(m) := u T m. That is,
206
+ P(Xk+m|Xk) = u(i)(m),
207
+ where u(m) = uT m.
208
+ (1.1)
209
+ Bayesian learning
210
+ The probability of an event A is denoted by P(A). Consider another event B and its
211
+ corresponding probability P(B). The conditional probability P(A|B) is the conditional
212
+ probability of A given B. Bayes Theorem states that
213
+ P(A|B) = P(B|A) P(A)
214
+ P(B)
215
+ Consider a system that generates data and a space of possible models for describing
216
+ the behaviour of the system.
217
+ The probability distribution over the space of models
218
+ P(Model) represents the prior knowledge about the system. Suppose that a set of data
219
+ D is observed from the system. Then P(D | Model) is called the likelihood and P(Data)
220
+ 3
221
+
222
+ 0.3
223
+ 0.7
224
+ S1
225
+ S2
226
+ 0.4
227
+ 0.6
228
+ 1.0
229
+ S4
230
+ S3
231
+ 0.5
232
+ 0.51.1.
233
+ Mathematical preliminaries
234
+ is called the model evidence or marginal likelihood. The posterior distribution over the
235
+ models P(Model | D) represents our beliefs about the system after observing the data
236
+ D. Bayes rule provides a principled way of updating our beliefs about the system after
237
+ observing data. Formally,
238
+ P(Model | Data) = P(Data | Model) P(Model)
239
+ P(Data)
240
+ .
241
+ (1.2)
242
+ Dirichlet distribution learning of categorical probability values
243
+ Consider a random variable which can take on K possible categories. The categorical
244
+ distribution is a discrete probability distribution for the random variable, where the
245
+ probability of each category is separately specified.
246
+ The categorical distribution is a
247
+ generalisation of the Bernoulli distribution for a discrete variable with more than two
248
+ outcomes, such as the probability of outcomes for a 6-sided die. It is also a special case
249
+ of the multinomial distribution where the number of trials in one.
250
+ If the probabilities of each outcome for a categorical distribution are unknown, using
251
+ Bayesian learning, we can update prior probability distributions of probability values.
252
+ The Dirichlet distribution is a conjugate before the multinomial (and categorical) distri-
253
+ bution, meaning starting with a Dirichlet prior and multinomial likelihood, the resulting
254
+ posterior is also a Dirichlet distribution. The probability mass function for the Dirichlet
255
+ distribution DirK(α) with K categories is
256
+ f(p|α) =
257
+ 1
258
+ B(α)
259
+ K
260
+
261
+ i=1
262
+ pα(i)−1
263
+ i
264
+ ,
265
+ (1.3)
266
+ where α = (α(1), . . . , α(K)) is the vector of parameters. Furthermore,
267
+ B(α) =
268
+ K
269
+
270
+ i=1
271
+ Γ(α(i))
272
+ Γ
273
+ � �K
274
+ i=1 α(i)�
275
+ where for positive real number n,
276
+ Γ(n) =
277
+ � ∞
278
+ 0
279
+ xn−1e−xdx.
280
+ For integer values, Γ(n) = n! .
281
+ To learn probabilities for a categorical distribution, given a prior distribution DirK(α)
282
+ 4
283
+
284
+ 1.1.
285
+ Mathematical preliminaries
286
+ over the probability vector p = (p1, . . . , pK), and data c = {c1, . . . , ck} representing the
287
+ number of observation Dirichlet category, the posterior distribution is
288
+ P(p|x) = (p|x + α)
289
+ p|x ∼ DirK(x + α)
290
+ (1.4)
291
+ Finally, the Beta distribution is a special case of the Dirichlet distribution where the
292
+ outcomes are binary (true or false). To distinguish this special case, we may use the
293
+ notation Beta(β(1), β(2)) ≡ Dir2(α) where α = {β(1), β(2)}.
294
+ 5
295
+
296
+ Chapter 2
297
+ The experiment
298
+ In this chapter, I describe the experimental setup in Khan et al. (2018) and Poort et al.
299
+ (2015) for which we have developed a behavioural model in the later chapters. A summary
300
+ of previous findings and a description of the behavioural data accompanied by figures are
301
+ also included.
302
+ 2.1
303
+ Experimental setup
304
+ The experimental setup involves the placement of the mouse on a cylindrical treadmill
305
+ where its head is fixed to enable imaging of neural activity. The mouse can move forward
306
+ (and backward). In front of the mouse, a screen is shown to the animal where visual
307
+ feedback connected to the movement can simulate the movement of the subject in an
308
+ environment. By controlling the setup of the space and visual stimulus while allowing
309
+ imaging, the VR setup has been extensively used for studying the visual cortex and
310
+ hippocampus in mice in recent years (Harvey et al. 2009, Dombeck et al. 2010, Khan
311
+ et al. 2018, Poort et al. 2015, Saleem et al. 2018). Figure 2.1 illustrates the VR setup.
312
+ Figure 2.1: Movement in a corridor simulated in the VR environment.
313
+ 6
314
+
315
+ 2.1.
316
+ Experimental setup
317
+ Specifics of the corridor space and reward administration
318
+ We specifically consider the experimental setup described in Khan et al. (2018), Poort
319
+ et al. (2015). In these two studies, the activity of populations of neurons in V1 was mea-
320
+ sured with two-photon calcium imaging Chen et al. (2013) during a visual discrimination
321
+ task in a virtual reality (VR) environment. Head-fixed mice ran through a simulated
322
+ corridor where different types of visual stimuli were displayed on the walls. Three types
323
+ of wall patterns characterise the different corridors. In the grey corridor a short stretch
324
+ of circle patterns followed by grey walls for a random distance, before the pattern on
325
+ the walls abruptly changes to one of the grating corridors. The grating corridors either
326
+ displayed vertical gratings (illustrated in Figure 2.1) or angled gratings for a fixed length
327
+ (60 VR length units), before the grey corridor. An illustration of the corridor space is
328
+ displayed in Figure 2.2.
329
+ Figure 2.2: Illustration of the corridor space.
330
+ A milk dispenser was placed in front of the mouse to administer rewards. Mice received a
331
+ reward for licking the dispenser in a reward zone starting halfway in the vertical grating
332
+ corridor and halfway for around 10 VR-length units. If the mouse licked the dispenser in
333
+ the reward zone, it would trigger the opening of the reward valve and a drop of soy milk
334
+ would appear at the dispenser. No punishment was given for licking in the corridors with
335
+ grey and angled grating walls. All mice learnt to discriminate the two stimuli, starting at
336
+ the chance performance (behavioural d′ close to zero) and reaching the threshold criterion
337
+ of d′ > 2.0 within 5-9 days.
338
+ 7
339
+
340
+ reward zone2.2.
341
+ Behavioral data and observations
342
+ summary of previous findings
343
+ The motivation behind developing a behavioural model is to take advantage of the be-
344
+ havioural data for the future analysis of experiments similar to Khan et al. (2018). A
345
+ summary of results in Khan et al. (2018) is as follows. After learning the visual discrim-
346
+ ination task, neurons showed increased stimulus selectivity for the angled and vertical
347
+ gratings. Interestingly, this effect depended on the cell types. In particular, stimulus se-
348
+ lectivity for populations of pyramidal cells (PYR) along with parvalbumin (PV), somato-
349
+ statin (SOM), and vasoactive intestinal peptide-expressing (VIP) inhibitory interneurons
350
+ in layer 2/3 (L2/3) of the primary visual cortex (V1) were compared. Selectivity was
351
+ increased for PYR and PV cells. PV neurons became as selective as the PYR cells, and
352
+ showed changes in functional interactions, particularly with PYR cells. On the other
353
+ hand, SOM neurons became decorrelated from the network and PYR–SOM coupling
354
+ before learning predicted selectivity increases in individual PYR cells. While SOM inhi-
355
+ bition seemed to gate changes in selectivity, PV cells provided strong stimulus selective
356
+ inhibition after learning. A multivariate autoregressive linear model (MVAR model) fit-
357
+ ted the activity of the neurons, and further supported the statistical analysis results.
358
+ However, the MVAR model arguably neglects potentially important information in the
359
+ behavioural data. Even though speed is taken into account, its contribution to the be-
360
+ haviour of the MVAR model is negligible. Accordingly, one of the primary motivations of
361
+ the behavioural model proposed in this report is potential improvements in the (MVAR
362
+ model). This is discussed in more detail in Chapter 5.
363
+ 2.2
364
+ Behavioral data and observations
365
+ Behavioural data were collected during the experiment. The distance travelled from the
366
+ onset of the current corridor and the corridor type (determined by the wall patterns) is
367
+ continuously recorded. The observed variables of spatial location and visual stimuli at
368
+ each time are marked by a pair (x, y) ∈ (xLoc × Cor), where x is the distance travelled
369
+ from the onset of the current corridor pattern, and y is the corridor pattern. Set xLoc =
370
+ [0, max(x)] is an interval from 0 to the maximal length of an interval max(x) and set
371
+ Cor = {grey, vertical, angled} is the set of corridor types. The speed of the subject at
372
+ each time is also recorded. A list of licking times and valve opening times (indicating
373
+ reward administration) is also given by the data.
374
+ For the generative behavioural model in Chapter 4, we discretize the data into time
375
+ intervals of ∆τ seconds, each identified by an index t ∈ {1, 2, . . . , N}.
376
+ The value of
377
+ ∆τ determines the time resolution of the behavioural data. Since the imaging data of
378
+ 8
379
+
380
+ 2.2.
381
+ Behavioral data and observations
382
+ Khan et al. (2018) is taken in 1
383
+ 8 second intervals, time resolutions lower than 1
384
+ 8 seconds
385
+ are not useful.
386
+ Higher time resolutions may be desirable because they will decrease
387
+ the computational cost of the analysis, but the cost of losing time resolution must be
388
+ discussed. However, unless explicitly discussed, we can assume ∆τ =
389
+ 1
390
+ 8 for the data
391
+ analysis. Table 2.1 describes the notation used to describe the data. Note that some
392
+ of the records are behavioural, while others specify the values that are observed by the
393
+ subject.
394
+ Table 2.1: Behavioral and observational records for t ∈ {1, 2 . . . , N}.
395
+ Data
396
+ Type
397
+ Description
398
+ xt
399
+ Observation
400
+ xt is the true value of distance from the onset of the current
401
+ corridor at time step t.
402
+ yt
403
+ Observation
404
+ yt ∈ Cor = {grey, vertical, angled} is the true value of the
405
+ corridor type, which determines the visual stimuli at time
406
+ step t.
407
+ ot
408
+ Observation
409
+ ot is a binary value for whether the reward valve has opened
410
+ during the time step.
411
+ vt
412
+ Behavior
413
+ Speed (average) at time step t
414
+ lt
415
+ Behavior
416
+ Number of licks at time step t
417
+ Instance of data visualisations
418
+ The Figures below are instances of behavioural data visualizations from the experimental
419
+ data. Figures 2.4 and 2.3 (Poort et al. 2015) illustrate the licking behaviour at different
420
+ positions in different corridors, and Figures 2.5 (Poort et al. 2015) and 2.6 (Khan et al.
421
+ 2018) give a colour map of speed at the different positions in the different corridors. For
422
+ all Figures, the horizontal axis represents the position concerning the onset of the grating
423
+ corridor 1, and the vertical axis is the trial index. Higher trial numbers are later. The
424
+ black or red labels are data labels the ls associated with the experimental sessions.
425
+ The following observations about the licking behaviour have influenced parameter defini-
426
+ tions and assumptions about prior beliefs of the animal in Chapter 3. These observations
427
+ are consistent among all subjects.
428
+ reward association prior: The mice do not know reward associations before the
429
+ reward. However, the mice know that moving forward and licking the dispenser may
430
+ lead to a reward. Initially, the licking behaviour is frequent to explore the space
431
+ and discover reward associations. A uniformly random prior for reward probability
432
+ may be appropriate.
433
+ 1note that for the grey corridor this is obtained by shifting xt by the length of the grey corridor
434
+ 9
435
+
436
+ 2.2.
437
+ Behavioral data and observations
438
+ Change of visual discrimination: The behaviour of the mice in the grating
439
+ area and the grey area starts to diverge immediately, and the behaviour of the
440
+ mouse in angled and vertical grating corridors seems to be similar at first; the
441
+ differences of licking behaviour seem to be only after the reward is present in the
442
+ vertical grating corridor. The dissociation of the award from the angled grating is
443
+ realised substantially later than the dissociation of the reward from the grey area.
444
+ It seems that at different points in the trial, the set of visually discriminated stimuli
445
+ is different.
446
+ Location is also taken into account As the learning progresses, the licking
447
+ concentrates close to the reward zone. It seems that the mice associate a spatial
448
+ region, characterised by both visual stimuli and spatial positioning, with the reward
449
+ area.
450
+ The following observations about the speed have influenced our generative model of speed
451
+ in Chapter 4.3. These observations are consistent among all subjects.
452
+ Reward association influences speed: the graphs suggest that the dissociation
453
+ of reward in upcoming regions is associated with higher speed while anticipation of
454
+ reward in upcoming regions is associated with reduction of speed.
455
+ Evidence for change in the internal model: while speed behaviour in the
456
+ grey corridor diverges from the grating corridor quickly, the divergence of speed
457
+ behaviour for angled grating and angled grating happen at a later point.
458
+ This
459
+ suggests that the mice initially correlate the grating areas with the reward, and
460
+ then learn to differentiate between the grating areas to dissociate the angled grating
461
+ with the reward.
462
+ Change of visual discrimination:: Similar to the licking behaviour, initially
463
+ speed behaviour seems to discriminate between the angled and vertical gratings
464
+ only after the reward is present in the vertical grating corridor.
465
+ This suggests
466
+ that the mice initially correlate the grating areas with reward, and then learn to
467
+ discriminate between the vertical and angled grating areas.
468
+ 10
469
+
470
+ 2.2.
471
+ Behavioral data and observations
472
+ Figure 2.3: Lick locations for M27. See the figure descriptions below.
473
+ Figure 2.4: Lick locations for M31 in all trials. The horizontal axis represents the location in
474
+ a corridor, with 0 being set at the onset of a grating corridor. Negative values are in the grey
475
+ corridors and positive values are in the grating corridors. The licking locations are marked by
476
+ coloured points. Red dots represent licking within 1 length unit before a valve opening, and
477
+ yellow indicates the licking after the opening of the reward valve, in a grating corridor. All
478
+ other lick locations are marked in black. The trial number on the vertical axis shows the
479
+ sequential order of the trials in each plot. The right plot shows all trials, where each trial is
480
+ passing through one grey corridor followed by a grating corridor. The middle and the left plots
481
+ show a closer look at the vertical and angled grating corridors. The red labels are labels for the
482
+ experimental sessions.
483
+ 11
484
+
485
+ grey walls
486
+ vertical gratings
487
+ angled gratings
488
+ Unrewarded Lick
489
+ Lick after valve opens Lick within 1 unit before valve opens
490
+ M27-date:20130515b-B2
491
+ M27-date:20130515b-B2
492
+ M27-date:20 130515b-B2
493
+ 2000
494
+ 900
495
+ M27-date:20130514b-B3
496
+ M27-date:20130514b-B3-
497
+ M27-date:20 130514b-B3
498
+ 1000
499
+ M27-date:20130513b-B3-..
500
+ M27-date:20130513b-B3
501
+ 800
502
+ M27-date:20130513-B1
503
+ M27-date:20130513-B1
504
+ M27-date:20130513-B
505
+ 1600
506
+ M27-date:20130511-B2
507
+ M27-date:20130511-B2---
508
+ 800 M27-date:20130510-B
509
+ 1400
510
+ M27-date:201305
511
+ M27-date:20130510-B1
512
+ 600
513
+ No.
514
+ M27-date:20130509-B
515
+ 1200
516
+ M27-date:20130509-B
517
+ Trial
518
+ 2
519
+ M27-date:20130509-
520
+ 600
521
+ .
522
+ Angled-g
523
+ 1000
524
+ rtical
525
+ M27-date:20130508-B
526
+ M27-date:20130508-B
527
+ 400上
528
+ M27-date:20130508-B
529
+ 800
530
+ 400
531
+ 300
532
+ 600
533
+ M27-date:20130507-B
534
+ M27-date:20130507-B
535
+ M27-date:20130507-B
536
+ 200
537
+ 400
538
+ 200
539
+ date:20130506
540
+ M27-date:201305
541
+ B
542
+ 100
543
+ M27-date:20 130
544
+ 200
545
+ 0
546
+ 0
547
+ .
548
+ 0
549
+ -250
550
+ -200
551
+ -150
552
+ -100
553
+ -50
554
+ 0
555
+ 50
556
+ 0
557
+ 20
558
+ 40
559
+ 60
560
+ 0
561
+ 20
562
+ 40
563
+ 60
564
+ x (VR length unit)
565
+ x (VR length unit)
566
+ x (VR length unit)grey walls
567
+ vertical gratings
568
+ angled gratings
569
+ Unrewarded Lick
570
+ Lick after valve opensLick within 1 unit before valve opens
571
+ M31-date:20130612b-B2
572
+ M31-date:20130612b-B2
573
+ 800
574
+ 1600 M31-date:20130612-B1
575
+ M31-date:20130612-B
576
+ M31-date:20130612-B1
577
+ M31-date:20130611b-B8
578
+ 800
579
+ M31-date:20130611b-B8
580
+ M31-date:20130611b-B8 -
581
+ 700
582
+ 1400 M31-date:20130611-B2
583
+ M31-date:20130611-B2
584
+ M31-date:20130611-B2
585
+ 700
586
+ M31-date:20130610b-B3
587
+ 600
588
+ M31-date:20130610b-B3
589
+ M31-date:20130610b-B3
590
+ 1200
591
+ 600
592
+ M31-date:20130610-B1
593
+ No.
594
+ M31-date:20130610-B1
595
+
596
+ 500
597
+ M31-date:20130610-B1
598
+ ....
599
+ 1000
600
+ Trial
601
+ 500
602
+ Trial
603
+ M31-date:20130609b-B2
604
+ M31-date:20130609b-B2
605
+ 9
606
+ M31-date:20130609b-B2
607
+ M31-date:20130609-B1
608
+ M31-date:20130609-B1
609
+ .
610
+ 800
611
+ M31-date:20130609-B1
612
+ Ver
613
+ M31-date:20130608-B2
614
+ M31-date:20130608-B2
615
+ M31-date:20130608-B2
616
+ 600
617
+ ...
618
+ 300
619
+ 300
620
+ M31-date:20130607-B3--
621
+ M31-date:20130607-B3
622
+ M31-date:20130607-B3
623
+ M31-date:20130606-B2
624
+ 400
625
+ 200
626
+ M31-date:20130606-B2
627
+ 200 M31-date:20130605-B4
628
+ M31- date:20130605-B4
629
+ M31-date:20130605-B4
630
+ M31-date:20130604:B3 :
631
+ M31-date:20130604-B3
632
+ 200
633
+ 100
634
+ 100
635
+ M31-date:20130604-B3-
636
+ M31-date:201306031
637
+ M31-date:20 130603b-B3
638
+ M31-date:20130603b-B3
639
+ 0
640
+ M31-date:20130603
641
+ M31-date:20130603-B2
642
+ -300
643
+ -200
644
+ -100
645
+ 0
646
+ 0
647
+ 20
648
+ 40
649
+ 60
650
+ 20
651
+ 40
652
+ 60
653
+ x (VR length unit)
654
+ x (VR length unit)
655
+ x (VR length unit)2.2.
656
+ Behavioral data and observations
657
+ Figure 2.5: Speed vs location for M31. See the figure descriptions below.
658
+ Figure 2.6: Speed and licks vs location for M70. The horizontal axis represents the location in
659
+ the corridor, with 0 being set at the onset of a grating corridor. Negative values are in the grey
660
+ corridors and positive values are in the grating corridors. The trial number on the vertical axis
661
+ shows the sequential order of the trials in each plot. The right plot shows all trials, where each
662
+ trial is passing through one grey corridor followed by a grating corridor. The middle and the
663
+ left plots show a closer look at the vertical and angled grating corridors. The colour for each
664
+ location of each trial represents the speed of the animal at that point according to the colour
665
+ scale; warmer colours represent higher speeds and cooler colours represent lower speeds. Note
666
+ that for Figure 2.5, the speed is averaged over 5 unit intervals due to virtual memory limits.
667
+ The white points show the lock locations for M70, and the small black star indicates a valve
668
+ opening location during a trial. The black labels are data labels associated with experimental
669
+ sessions.
670
+ 12
671
+
672
+ speed (units per second)
673
+ 0
674
+ 10
675
+ 20
676
+ 40
677
+ 50
678
+ M31-date:20130612b-B2
679
+ L M31-date:20130612b-B2
680
+ 900
681
+ M31-date:20130612b-B2
682
+ M
683
+ 800
684
+ 1600 M31-date:20130612-B1
685
+ M31-date:20130612-B1
686
+ M31-date:20130612-B1
687
+ M31-date:20130611b-B8
688
+ 800
689
+ M31-date:20130611b-B8
690
+ M31-date:20130611b B8
691
+ 700
692
+ 1400 |M31-date:20130611-B2
693
+ M31-date:20 130611-B2
694
+ M31-date:20130611-B2
695
+ 700
696
+ M31-date:20 1306 10b-B
697
+ 600
698
+ M31-date:20130610b-B3
699
+ 600
700
+ No.
701
+ M31-date:201306 10-B1
702
+ M31-date:20 1306 10-B1
703
+ 500
704
+ FM31-date:20130610-B1
705
+ 1000
706
+ Trial
707
+ 500
708
+ Trial
709
+ M31-date:20130609b-B2
710
+ M31-date:20130609b-B2
711
+ 9
712
+ M31-date:20130609b-B2
713
+ M31-date:20 130609-B1
714
+ M31-date:20130609-B1
715
+ 800
716
+ M31-date:20130609-B1
717
+ 400
718
+ Ver
719
+ M31-date:20130608-B2
720
+ M31-date:20130608-B2
721
+ M31-date:20130608-B2
722
+ 600
723
+ 300
724
+ M31-date 20 130607- B3
725
+ M31-da
726
+ M31-da
727
+ qe:20130607-B3
728
+ 200M31-da
729
+ 400
730
+ te:20130606-B2
731
+ M31-date:20130605 B4
732
+ M31-date-20130604 B3
733
+ M31-date:20 130604-B3
734
+ 200
735
+ 100
736
+ 100
737
+ M31-date:20 130604 B3
738
+ M31-date: 20130603b- B
739
+ M31-date:20 130603b-B3
740
+ M31-date:20130603b-B3
741
+ 0
742
+ M31-date.20130603-B2
743
+ -300
744
+ -200
745
+ -100
746
+ 0
747
+ 0
748
+ 20
749
+ 40
750
+ 60
751
+ U
752
+ 20
753
+ 40
754
+ 60
755
+ x (VR length unit)
756
+ x (VR length unit)
757
+ x (VR length unit)speed (units per second)
758
+ 0
759
+ 10
760
+ 20
761
+ 30
762
+ 40
763
+ 50
764
+ M70-date:20141028-B1
765
+ M70-date:20141028 B7
766
+ M70-date:20141028-B1
767
+ 200
768
+ 160
769
+ 350
770
+ 180
771
+ 140
772
+ 300
773
+ 160
774
+ 120
775
+ 250
776
+ 140
777
+ I No.
778
+ No.
779
+ Angled-g Trial l
780
+ 100
781
+ Trial No.
782
+ Trial
783
+ 120
784
+ 200
785
+ Vertical-g
786
+ 100
787
+ 80
788
+ 150
789
+ 80
790
+ M70-date:20141022-B1
791
+ 60
792
+ M70-date:20141022-B1
793
+ 60
794
+ M70-date:20141022-B1
795
+ 100
796
+ 40
797
+ 40
798
+ 50
799
+ 20
800
+ 20
801
+ 0
802
+ 0
803
+ -250
804
+ -200
805
+ -150
806
+ -100
807
+ -50
808
+ 0
809
+ 50
810
+ 0
811
+ 20
812
+ 40
813
+ 60
814
+ 0
815
+ 20
816
+ 40
817
+ 60
818
+ x (VR length unit)
819
+ x (VR length unit)
820
+ x (VR length unit)Chapter 3
821
+ Behavioral model part 1: internal
822
+ representations
823
+ The behavioural model presented here provides a framework for inferring an internal
824
+ model that can predict the animal’s behaviour at a given time. Before getting into the
825
+ specifics, consider a broad perspective on inferring a model that generates the current be-
826
+ haviour by incorporating past experiences. Figure 3.1 is a graphical model of big-picture
827
+ relation between the history of animal’s observations H, the internal model M that in-
828
+ corporates experience into internal representations, and the observed behaviour B. This
829
+ chapter discusses the relationship between the history of observations and behaviorally
830
+ relevant representations in the internal model (H → M in the graphical model of Figure
831
+ 3.1). I introduce a space of models where a range of hypotheses about the internal model
832
+ can be systematically examined. The internal representations about reward and space
833
+ are then used in the next chapter to construct a generative model of behaviour (M → B
834
+ in the graphical model of Figure 3.1). Then using a systematic approach, an internal
835
+ model is inferred that best describes the data (H and B).
836
+ Figure 3.1: Relation between history of experimental observations H, internal model M, and
837
+ behavior B. H and B are observed in the experimental data, but the internal model M is
838
+ unobserved.
839
+ 13
840
+
841
+ H
842
+ M
843
+ B3.1.
844
+ Structure of spatial states
845
+ By exploring and experiencing the environment, the brain uses experience to update its
846
+ beliefs (i.e., learning) about the environment using its internal representations. In this
847
+ learning model, the normative framework of Bayesian ideal observer analysis (Geisler
848
+ 2003, 2011) is used to learn behaviorally relevant internal representations. These include
849
+ learning about the probability of reward in different regions of the VR corridor, and
850
+ expectations about upcoming spatial regions when moving forward1.
851
+ Model of spatial states in Section 3.1 describes how the space (VR corridor) is divided into
852
+ states corresponding to spatial segments, where the representation of reward probability
853
+ within a state only depends on the information (history of reward outcomes) obtained at
854
+ that state. The structure of these states is a Markov chain. The space of models in Section
855
+ 3.2 prescribes a range of Markov chain structures of spatial states within which a model is
856
+ selected. For given states of a model, the dynamics for learning reward associations and
857
+ state transitions are considered within the normative framework of the Bayesian ideal
858
+ observer model in Section 3.3.
859
+ 3.1
860
+ Structure of spatial states
861
+ Animals’ observation of visual stimuli and spatial positioning is an observation of the
862
+ current (x, y) ∈ {xLoc, Cor}.
863
+ Observations about reward association at the current
864
+ location (x, y) may be relevant to reward association at some other locations.
865
+ It is
866
+ therefore necessary to define spatial regions where reward observations are relevant to
867
+ the entire region but explicitly irrelevant to other regions. To formalise this concept,
868
+ the objective of this section is to associate the segments of space with states where the
869
+ information about reward association is relevant to the current state and no other state.
870
+ A reasonable way to define such states is to group areas that are spatially close by, visually
871
+ similar, or both.
872
+ Defining states associated with spatial segments
873
+ Taking into account both spatial proximity and visual similarity, consider sectioning xLoc
874
+ into a finite set of mutually exclusive spatial segments each identified by a fixed y, and an
875
+ interval Ix for x values. We illustrate an example of spatial segmentation in Figure 3.2.
876
+ Denote by S a set of states and associate each segment with only one state (note that
877
+ multiple segments may be associated with the same state). Then we say that the mouse
878
+ is in state s if its position (x, y) is inside a segment that is associated with s. We associate
879
+ all positions in all corridors with only one state with the function f : (xLoc × Cor) → S.
880
+ 1The subject can only move forward due to the experimental setup.
881
+ 14
882
+
883
+ 3.1.
884
+ Structure of spatial states
885
+ The mouse may map locations onto states in multiple ways. By considering various ways
886
+ to map between locations and states, we can infer the mapping that best matches the
887
+ behavioural data (see 4.4).
888
+ Spatial state transition event and structural properties
889
+ Let Xk be the random variable describing the k-th visited spatial state, where a spatial
890
+ state transition event (i.e., transition to the next spatial step) happens when the subject
891
+ crosses the initial point of a segment associated with a state2. Given the current position,
892
+ the future positions do not depend on the history of visited positions, so given Xk, state
893
+ Xk+1 is conditionally independent of Xn for n < k. It follows that the state structure as
894
+ defined above satisfies the Markov property.
895
+ We assume that the spatial states are fully observable. In other words, given a state
896
+ structure, we assume that the subject always knows which state is the current state.
897
+ Observations of the animal may be noisy and inaccurate, so assuming fully observable
898
+ states is a simplification that may be contended with in a more sophisticated future
899
+ model. However, states are associated with intervals of space rather than precise points
900
+ in space, and they already incorporate some approximation about the spatial awareness
901
+ of the subject.
902
+ We assume that the mouse learns two things from the visual stimuli and licking in state
903
+ s. First, it learns the reward association in that state. Second, it learns the transition
904
+ from that state to other states. Let r(s) be the probability that licking in state s leads to
905
+ reward in state s. Also, denote by p(s,s′) = P(Xk+1 = s′|Xk = s) the transition probability
906
+ Figure 3.2: An example of dividing the corridor space into mutually exclusive spatial
907
+ segments. Each segment is then associated with exactly one state.
908
+ 2Note that the time spent in each state is not fixed in this Markov model.
909
+ 15
910
+
911
+ 3.2.
912
+ Space of models for spatial states
913
+ of visiting any state s′ after s. These parameters are initially unknown to the mouse and
914
+ should be learned. In Section 3.3, I discuss a semi-normative model of learning for these
915
+ parameters using the ideal observer framework.
916
+ It is worth noting that the state transitions of the Markov chain are sparse. To understand
917
+ the sparsity of state transitions, first note that x is a positive real value, which ranges
918
+ from 0 to the maximal length of a corridor with the same patterns, and y is a discrete
919
+ value with three possible entries. From the onset of a corridor, until the onset of the next
920
+ corridor, the spatial location is a continuous function of time. Within the period between
921
+ two consecutive onsets, if a state transition happens, it can only be to the state associated
922
+ with the next interval of x, with the same y. Moreover, when passing the onset of the
923
+ next corridor, there is a discrete change in the value of y, and x = 0 at the onset of the
924
+ new corridor. This event can only be a state transition to the start of a new corridor (a
925
+ state that starts at x = 0) so there are at most three such possible transitions. It follows
926
+ that the structure of states is a sparse Markov chain.
927
+ 3.2
928
+ Space of models for spatial states
929
+ To define a space of models M , we use two parameters for identifying a model in the
930
+ model space; one for the set of discriminated patterns (V), and one for the length of
931
+ segments (d).
932
+ Spatial model parameter V: set of discriminated visual stimuli
933
+ Let V be the set of visual stimuli that are discriminated in the spatial state model. The
934
+ set of possible choices for V is {V1, V2, V3} which are described below.
935
+ • V1 = {u := undifferentiated}, where the grey and grating are not discriminated.
936
+ • V2 = {g := grey, va := angled or vertical grating}, where the grey corridor is dis-
937
+ criminated from the grating corridors, but where angled and vertical grating corri-
938
+ dors are not discriminated.
939
+ • V3 = {g := grey, v := vertical, a := angled}, where the grey corridor, the angled and
940
+ vertical grating corridor are discriminated.
941
+ While set Cor contains the types of visual stimuli on the corridors, set V refers to subjec-
942
+ tive visual discrimination (or classification) between corridors by the mouse. Also note
943
+ that the choices for set V implicitly contain a mapping from Cor to V.
944
+ 16
945
+
946
+ 3.2.
947
+ Space of models for spatial states
948
+ Spatial model parameter d: length of states
949
+ Denote by d a value in the interval (0, max(x)] for the length of spatial segments. Value d
950
+ uniquely defines a sequence of intervals of x values. For example, the associated sequence
951
+ of intervals to d = 30 is {[0, 30), [30, 60), . . .}. Then state sij is associated with the j-th
952
+ interval of x, which is [(j −1)d , jd), and i ∈ C identifies the visual stimuli. For example,
953
+ for V = {g, p} and d = 30, the state sp,2 refers to intervals of x ∈ [30, 60) for both the
954
+ vertical and angled grating corridors.
955
+ Model space
956
+ Now it is possible to introduce a Markov model MV,d ∈ M with the set of states S that
957
+ are associated with the spatial intervals induced by V and d. Since the length of the a
958
+ Figure 3.3: Nine instances of Markov chain models MV,d for choices of V selected instances
959
+ of d. For d = xmax, there is only one state per and self transition event only occurs when the
960
+ corridor type changes. The length of the angled and vertically grating corridors is exactly 60
961
+ (VR length units) in the experiment. So for d = 60 and d = 20, there are exactly 1 and 3 states
962
+ associated with the relevant element in V. Note that the figure illustrates only selected instances
963
+ of the model space M .
964
+ 17
965
+
966
+ Mv,d
967
+ V = V1
968
+ V = V2
969
+ V = V3
970
+ [u]
971
+ (g ,va)
972
+ (g ,V,a)
973
+ d = max(x)
974
+ 09 = p
975
+ d = 203.3.
976
+ Bayesian learning model
977
+ Table 3.1: Parameters for the model of spatial states
978
+ .
979
+ Parameter
980
+ Type
981
+ Description
982
+ V
983
+ Spatial model
984
+ parameter
985
+ Set of discriminated visual stimuli on the corridors in the
986
+ model MV,d; Possible options are V1 = {u}, V2 = {g, p}
987
+ and V3 = {g, v, a}.
988
+ d
989
+ Spatial model
990
+ parameter
991
+ A constant length in (0, max(x)] for the length of the
992
+ spatial for model MV,d.
993
+ corridor is bounded by max(x), model MV,d is a finite state Markov model. For example,
994
+ MV1,max(x) and MV3,max(x) have exactly one and three states, respectively.
995
+ Figure 3.3
996
+ illustrates the states of Markov chain models MV,D for example cases of V and d.
997
+ Parameters V and d are free parameters that will be set during the model selection, which
998
+ will be further discussed in Section 4.4. The fit for parameter V, selected from V1, V2
999
+ or V3, is determined by which stimuli the animal discriminates. The true value for d is
1000
+ the length of spatial segments where information about reward associations and state
1001
+ transitions in the current segment is reasonably independent of segments associated with
1002
+ other states. For the sake of simplicity, it is assumed that d is a fixed value, and it is
1003
+ the same across different visual stimuli. However, relaxing this assumption is possible by
1004
+ having more free parameters, for example, by introducing a free parameter of distance
1005
+ for each element of V. For example, suppose V = V3. Then instead of a free parameter d,
1006
+ we could use three parameters in D = {dg, da, dv} which contains one free parameter of
1007
+ distance for every element of V. In the initial implementation of the model, one parameter
1008
+ d is considered.
1009
+ In summary, parameters V and d for a model MV,d determine the structure of the states
1010
+ in the Markov chain, where for each state the learning dynamics about reward association
1011
+ and state transitions is only dependent on the observations in that state. The learning
1012
+ dynamics are discussed in the next section.
1013
+ 3.3
1014
+ Bayesian learning model
1015
+ As first noted in Section 3.1, in any state s, the subject uses sensory information to
1016
+ learn r(s), the probability that licking in s leads to the administration of reward in s, or
1017
+ reward probability of s for short. Furthermore, state transition probability p(s,s′), which
1018
+ is the probability of visiting state s′ after visiting s, is also unknown to the subject and
1019
+ it is learned.
1020
+ Here, we use the ideal observer framework (Geisler 2003) to develop a
1021
+ semi-normative model for learning both reward associations and state transitions. In this
1022
+ 18
1023
+
1024
+ 3.3.
1025
+ Bayesian learning model
1026
+ section, the learning dynamics are discussed for a given model M ∈ M . Therefore, states
1027
+ S and their associated spatial intervals are unambiguous.
1028
+ 3.3.1
1029
+ Learning reward probability within a state
1030
+ Recall that reward is given to the subject immediately after the subject licks the dispenser
1031
+ in the reward zone (see Section 2.1 for details of the experimental setup). The reward is
1032
+ a fixed amount of milk administered via the dispenser. We noticed that even in trained
1033
+ animals, licking started before the reward zone (see example mice in Figures 2.3 and 2.4).
1034
+ This suggests that the mouse associates an extended region with the reward delivery
1035
+ which starts before the reward zone set by the experimenters.
1036
+ Reward outcome Rk of current spatial step k
1037
+ If the mouse licks the dispenser in state s, it collects some information about the unknown
1038
+ parameter r(s). If the subject does not lick the dispenser, it obtains no information about
1039
+ r(s). Let the random variable Rk = (R(T)
1040
+ k , R(F)
1041
+ k ) be the reward outcome of spatial step
1042
+ k, where R(T)
1043
+ k
1044
+ counts the number of positive outcomes, and R(F)
1045
+ k
1046
+ counts the number of
1047
+ negative outcomes in spatial step k. As a consequence of the experimental setup, the
1048
+ amount of reward and the frequency of licking in the experiment does not provide any
1049
+ additional information about a reward region. Furthermore, spatial states are defined to
1050
+ be regions where licking at different points within the region does not provide additional
1051
+ information about the reward. Therefore, each visit to a state provides only three possible
1052
+ reward outcomes:
1053
+ • Rk = (1, 0) for subject licking the dispenser in spatial step k followed by reward
1054
+ becoming available in spatial step k,
1055
+ • Rk = (0, 1) for subject licking the dispenser in spatial step k followed by no reward
1056
+ in spatial step k, and
1057
+ • Rk = (0, 0) for subject not licking the dispenser in spatial step k.
1058
+ Normative model for updating internal reward representations (Bayesian)
1059
+ Let us first discuss how an ideal observer updates its prior beliefs about r(s) after visiting
1060
+ state s in spatial step k. The ideal observer provides a theoretical upper limit of perfor-
1061
+ mance, given the collected data. It is therefore a normative framework for updating the
1062
+ beliefs about reward association. Let prior beliefs about r(s) right before visiting spatial
1063
+ 19
1064
+
1065
+ 3.3.
1066
+ Bayesian learning model
1067
+ step k be a Beta distribution
1068
+ Beta(β(1)
1069
+ k (s), β(2)
1070
+ k (s))
1071
+ over the interval [0, 1]. The reward outcome Rk = (R(T)
1072
+ k , R(F)
1073
+ k ) is the data that is newly
1074
+ collected about the reward. By Equation 1.4, the posterior is
1075
+ r(s)|Rk ∼ Beta(R(T)
1076
+ k
1077
+ + β(1)
1078
+ k (s), R(F)
1079
+ k
1080
+ + β(2)
1081
+ k (s)).
1082
+ Reward learning rate ηr
1083
+ The above is a theoretical bound on learning from observations in state s, assuming a
1084
+ prior Beta distribution over [0, 1] for the reward probability r(s). Some mice learn faster
1085
+ than others, and all of them will perform no better than the ideal observer model above.
1086
+ To allow for individual differences, and different learning rates, we introduce a model
1087
+ parameter ηr ∈ [0, 1], which dials the amount of data required for the same amount of
1088
+ learning as an ideal observer. The update rule (i.e., posterior) is
1089
+ r(s)|Rk ∼ Beta(ηrR(T)
1090
+ k
1091
+ + β(1)
1092
+ k (s), ηrR(F)
1093
+ k
1094
+ + β(2)
1095
+ k (s)).
1096
+ To keep track of learning parameters, let Bk =
1097
+
1098
+ βk(s) :=
1099
+
1100
+ β(1)
1101
+ k (s), β(2)
1102
+ k (s)
1103
+
1104
+ : s ∈ S
1105
+
1106
+ be
1107
+ the beta parameters for beliefs about reward probabilities of all states in spatial step k.
1108
+ Note that after visiting state s in spatial step k,
1109
+ βk+1(s) = ηrRk + βk(s)
1110
+ for s = Xk, and
1111
+ (3.1)
1112
+ βk+1(s′) = βk(s)
1113
+ for s′ ̸= Xk.
1114
+ Note that ηr is defined to have the same value across all states. If ηr = 1, the mice
1115
+ performs as well as the normative ideal observer, and if ηr = 0, the mouse never learns
1116
+ reward associations. For the values in between 0 and 1, the mouse requires extra data
1117
+ points for updating its beliefs to the same extent as an ideal observer model. The model
1118
+ parameter ηr can be interpreted as the data efficiency of learning. It could be used to
1119
+ compare individual learning differences among subjects. Furthermore, it is interesting
1120
+ to assess whether differences of ηr in individuals is predictive of comparative learning
1121
+ rates on other learning tasks. It also provides a qualitative way to assess the model. For
1122
+ example, if the value is unreasonably high, it may indicate a flaw in the state structure
1123
+ 20
1124
+
1125
+ 3.3.
1126
+ Bayesian learning model
1127
+ Table 3.2: Guide for variables (Var) and parameters (Par) relevant to internal reward
1128
+ representations.
1129
+ .
1130
+ Var/Par
1131
+ Type
1132
+ Description
1133
+ Rk
1134
+ observed
1135
+ A binary pair representing the reward outcome of step k,
1136
+ (1, 0) lick and reward within step k
1137
+ (0, 1) lick but no reward within step k
1138
+ (0, 0) no lick within step k
1139
+ B(k)
1140
+ inferred
1141
+ List of
1142
+
1143
+ β(1)
1144
+ k (s), β(2)
1145
+ k (s)
1146
+
1147
+ , for all s ∈ S, where
1148
+ Beta(
1149
+
1150
+ β(1)
1151
+ k (s), β(2)
1152
+ k (s)
1153
+
1154
+ ) represents the beliefs about
1155
+ r(s) at spatial step k.
1156
+ ηr
1157
+ model parameter
1158
+ A constant in the [0, 1] interval for learning rate of
1159
+ reward association.
1160
+ or an incorrect choice of prior.
1161
+ Implementation notes
1162
+ To simplify model implementation, we can derive the posterior distribution at step k by
1163
+ merely keeping a list record of the total count of positive and negative reward outcomes
1164
+ in state s. In particular, at step k, for state s, let ck(s) =
1165
+
1166
+ c(T)
1167
+ k (s), c(F)
1168
+ k (s)
1169
+
1170
+ be the total
1171
+ count of positive and negative outcomes in state s, from step 1 up to the start of step k.
1172
+ That is,
1173
+ ck(s) =
1174
+ k
1175
+
1176
+ n=1
1177
+ Xn=s
1178
+ Rk.
1179
+ For current spatial state k, a list of numbers can store values of ck(s).
1180
+ Assuming a
1181
+ uniform prior at the start of the experiment, or β(1)
1182
+ 1 (s) = β(2)
1183
+ 1 (s) = 1, the prior probability
1184
+ distribution of r(s) at step k is
1185
+ r(s) ∼ Beta(ηrc(T)
1186
+ k (s) + 1, ηrc(F)
1187
+ k (s) + 1),
1188
+ for which,
1189
+ βk(s) = ηrck(s) + 1.
1190
+ (3.2)
1191
+ 3.3.2
1192
+ Learning state transitions
1193
+ Learning dynamics for state transitions p(s,s′) is defined similarly to the reward associ-
1194
+ ations. Let E be the set of transition edges (directed edges), and let Adj(s) = {s′ :
1195
+ 21
1196
+
1197
+ 3.3.
1198
+ Bayesian learning model
1199
+ (s, s′) ∈ E} be the set of states which for Xk = s, outcome of Xk is in Adj(s). There-
1200
+ fore, transition probabilities from s, P(Xk+1|Xk = s) is a distribution of outcomes over
1201
+ Adj(s). Assuming fixed probability transitions, P(Xk+1|Xk = s) can be represented by a
1202
+ list of probabilities p(s) :=
1203
+
1204
+ p(s,s′) : s′ ∈ Adj(s)
1205
+
1206
+ . Note that if the subject is not familiar
1207
+ with the space, the true distribution is unknown, and the subject learns about these
1208
+ probabilities through experience.
1209
+ Normative model for updating internal transition representations
1210
+ (Bayesian)
1211
+ Every time the subject leaves state s and the next step is observed, one observation is
1212
+ made about the outcome of Xk+1 given Xk = s. Because the outcome is a multinomial
1213
+ random variable, where possible outcomes are states in Adj(s), we use a Dirichlet prior
1214
+ distribution to represent uncertainties about p(s). Specifically, at spatial step k,
1215
+ p(s) ∼ Dir
1216
+
1217
+ αk(s)
1218
+
1219
+ where the list of parameters αk(s) contains an element corresponding to each possible
1220
+ outcome. In particular,
1221
+ αk(s) =
1222
+
1223
+ αk(s, s′) : s′ ∈ Adj(s)
1224
+
1225
+ .
1226
+ Suppose Xk = s and consider an ideal observer whose prior beliefs about p(s) at spatial
1227
+ step k is described by Dir(αk(s)). Also suppose, the ideal observer visits the next state
1228
+ and makes the observation Xk+1 = ˘s. Then by Equation 1.4, the posterior distribution
1229
+ is
1230
+ p(s)|(Xk+1 = ˘s, Xk = s) ∼ Dir
1231
+
1232
+ αk+1(s)
1233
+
1234
+ where any element αk(s, s′) of αk+1(s) is updated as follows:
1235
+ αk+1(s, s′) = 1 + αk(s, s′)
1236
+ for s′ = ˘s, and
1237
+ αk+1(s, s′) = αk(s, s′)
1238
+ for s′ ̸= ˘s.
1239
+ Furthermore, for any other state s′′ ̸= s, it is obvious that the beliefs are not updated,
1240
+ i.e., αk+1(s′′ ̸= s) = αk(s′′ ̸= s).
1241
+ 22
1242
+
1243
+ 3.3.
1244
+ Bayesian learning model
1245
+ Table 3.3: Parameter guide for learning transition probabilities
1246
+ .
1247
+ Parameter(s)
1248
+ Type
1249
+ Description
1250
+ (Xk+1|Xk)
1251
+ observed
1252
+ Transition outcome from a given state Xk
1253
+ Ak
1254
+ inferred
1255
+ List of αk(s), for all s ∈ S, where Dir(αk(s))
1256
+ represents beliefs about p(s) at step k (list of state
1257
+ transition probabilities from s to adjacent states)
1258
+ ηp
1259
+ free parameter
1260
+ A constant in the [0, 1] interval for learning rate of
1261
+ transition probabilities
1262
+ Reward learning rate ηp
1263
+ Similar to introducing a learning rate for learning reward association, we introduce a
1264
+ ηp ∈ [0, 1] to account for data inefficiency compared to the ideal observer. Denote by Ak,
1265
+ the list of all learning parameters of state transition probabilities Ak =
1266
+
1267
+ αk(s) : s ∈ S
1268
+
1269
+ .
1270
+ Now, the update rule (posterior distribution) is
1271
+ p(s)|(Xk+1, Xk) ∼ Dir
1272
+
1273
+ αk+1(s)
1274
+
1275
+ where any element αk(s, s′) of a list of parameters in Ak is updated as follows:
1276
+ αk+1(s, s′) = ηp + αk(s, s′)
1277
+ for s = Xk and s′ = Xk+1
1278
+ (3.3)
1279
+ αk+1(s, s′) = αk(s, s′)
1280
+ otherwise.
1281
+ For an ideal observer, ηp = 1.
1282
+ The lower the value of ηp is, the slower the learning
1283
+ becomes, because the subject would require more data for similar updates in beliefs. If
1284
+ ηp = 0, the subject never learns from observing consecutive states. Note that the same
1285
+ parameter ηp is used for learning all transition probabilities.
1286
+ Implementation notes
1287
+ For prior beliefs about state transitions, a uniform prior would ensure that the prior does
1288
+ not privilege any probability value over another probability value. Then, for any entry
1289
+ α1(s, s′) of α1(s, s′), we assume that α1(s, s′) = 1
1290
+ So, at spatial step k, for entry αs′(s, k) of α(s, k),
1291
+ αk(s, s′) = ηpc(s,s′)(k) + 1.
1292
+ (3.4)
1293
+ 23
1294
+
1295
+ 3.3.
1296
+ Bayesian learning model
1297
+ where c(s,s′)(k) is the total number of observed transitions from s to s′ from step 1 to step
1298
+ k. By keeping track of c(s,s′)(k) in a matrix, any parameter in A(k) can be calculated on
1299
+ demand using Equation 3.4 for the current state.
1300
+ 24
1301
+
1302
+ Chapter 4
1303
+ Behavioral model part 2: the
1304
+ generative model
1305
+ In the previous chapter, I discussed the internal representations of spatial regions and
1306
+ reward probabilities within those regions. This chapter describes a model that utilizes
1307
+ internal representations to generate behaviour. The learning model for updating beliefs
1308
+ about reward probabilities and state transitions utilized a normative model of Bayesian
1309
+ learning. In contrast, we present a descriptive model of behaviour that does not explic-
1310
+ itly enforce any optimal decision-making criteria. Before making normative assumptions
1311
+ about behaviour, it is important to have a descriptive framework for systematically as-
1312
+ sessing assumptions about behaviour.
1313
+ Recall that location, visual stimulus, licking and speed of the mouse are recorded in the
1314
+ experimental data (see Chapter 2.2). To improve readability, Table 4.1 includes notation
1315
+ used to represent the behavioural data.
1316
+ A spatial state transition event triggers updating internal representations of reward prob-
1317
+ ability and spatial transitions. During the period between two transition events, the pa-
1318
+ rameters associated with internal representations (specified by elements of Bk and Ak)
1319
+ are unchanged. Assuming that the internal representations are guiding the behaviour,
1320
+ we define behavioural parameters for speed and licking rate derived from internal rep-
1321
+ resentations’ parameters. Figure 4.1 describes the conditional dependence structure of
1322
+ parameters associated with a spatial state. In this model, the internal representations
1323
+ are used to derive two parameters that guide the licking and speed behaviour. These
1324
+ 25
1325
+
1326
+ 4.1.
1327
+ Spatial state parameter ˜λk: licking rate
1328
+ Table 4.1: Behavioral and observational records for t ∈ {1, 2 . . . , N}.
1329
+ Data
1330
+ Type
1331
+ Description
1332
+ xt
1333
+ Observation
1334
+ xt is the true value of the distance from the onset of the current
1335
+ corridor at time step t.
1336
+ yt
1337
+ Observation
1338
+ yt ∈ Cor = {grey, vertical, angled} is the true value of the
1339
+ corridor type, which determines the visual stimuli at time
1340
+ step t.
1341
+ ot
1342
+ Observation
1343
+ ot is a binary value for whether the reward valve has opened
1344
+ during the time step.
1345
+ vt
1346
+ Behavior
1347
+ Speed (average) at time step t
1348
+ lt
1349
+ Behavior
1350
+ Number of licks at time step t
1351
+ parameters are target speed ˜νk, and licking rate ˜λk, and they are discussed in detail in
1352
+ the Section 4.2 and Section 4.1 respectively.
1353
+ Table 4.2: Description of updating internal representations of a given step using the graphical
1354
+ model of 4.1. Variables ( Var.) and their parents (Par(.)) are included in the first and second
1355
+ columns respectively. The third column (Type) indicates whether the outcome of the variable
1356
+ given its parents is stochastic ( Stoch.) or deterministic ( Deter.) given its parents. The
1357
+ conditional dependence of the variable on its parents is described in the last column.
1358
+ Var.
1359
+ Par(.)
1360
+ Type
1361
+ Update description
1362
+ Xk+1
1363
+ Xk
1364
+ Stoch.
1365
+ Stochastic outcome of the state immediately
1366
+ following Xk.
1367
+ Bk+1
1368
+ Bk, Rk, Xk
1369
+ Deter.
1370
+ Updating reward probability distribution of the
1371
+ previous state using Equation 3.1.
1372
+ Ak+1
1373
+ Ak, Xk+1, Xk
1374
+ Deter.
1375
+ Updating the transition probability distribution
1376
+ for the last transition using Equation 3.3.
1377
+ distributions for reward, to Bk+1
1378
+ rk
1379
+ Bk, Xk
1380
+ Deter.
1381
+ Reward distribution of the current state
1382
+ γk(ρ)
1383
+ Bk, Ak, Xk
1384
+ Deter.
1385
+ Discounted reward probability of present and
1386
+ future states given by Equation 4.6, with the
1387
+ discount factor ρ.
1388
+ ˜νk
1389
+ γk(ρ)
1390
+ Deter.
1391
+ Value of target speed in spatial step k adjusted by
1392
+ value of γk(ρ).
1393
+ ˜λk
1394
+ rk
1395
+ Deter.
1396
+ Licking rate in step k given by Equation 4.3.
1397
+ Rk
1398
+ ˜λk
1399
+ Stoch.
1400
+ Reward outcome of spatial state k
1401
+ 4.1
1402
+ Spatial state parameter ˜λk: licking rate
1403
+ Consider the relevance of the reward probability distribution for rk to the licking be-
1404
+ haviour.
1405
+ First, it is reasonable to consider the mouse regulating its licking rate us-
1406
+ 26
1407
+
1408
+ 4.1.
1409
+ Spatial state parameter ˜λk: licking rate
1410
+ ing its perception of expected reward probability in the current state.
1411
+ The expected
1412
+ value of the reward probability in the current state (in step k) is the expected value of
1413
+ Beta(β(1)
1414
+ k (s), β(2)
1415
+ k (s)), which is
1416
+ µ(rk) =
1417
+ β(1)
1418
+ k
1419
+ β(1)
1420
+ k β(2)
1421
+ k
1422
+ .
1423
+ (4.1)
1424
+ Figure 4.1: Graphical model of updating internal representations at a given spatial step, the
1425
+ associated learning parameters (green), and the associated behavioural parameters (blue). The
1426
+ dotted squares indicate internal representations that are not observed in the data. Variables
1427
+ inside circles have stochastic outcomes given their parents, and variables inside squares have
1428
+ deterministic outcomes given their parents. State transitions trigger updating these variables
1429
+ for the new step k + 1. Note that the model satisfies the Markov property. A description of the
1430
+ conditional dependencies is included in Table 4.2.
1431
+ 27
1432
+
1433
+ Xk
1434
+ Xk+1
1435
+ μ(rk)
1436
+ μ(rk+1)
1437
+ Yk(p)
1438
+ o(rk)
1439
+ Yk+1(p)
1440
+ o(rk+1)
1441
+ K+
1442
+ k+1
1443
+ Rk
1444
+ Rk+14.1.
1445
+ Spatial state parameter ˜λk: licking rate
1446
+ Second, independently from the expectation of reward, the degree of uncertainty about
1447
+ the true probability of reward may also be relevant to behaviour (Zhao & Warren 2015),
1448
+ and in particular, the rate of licking in the current state. More variance in the reward
1449
+ probability may mean that the current state should be further explored by licking, to
1450
+ decrease the uncertainty about reward values. The variance reward probability beliefs
1451
+ can also be calculated from the Beta() distribution.
1452
+ σ2(rk) =
1453
+ β(1)
1454
+ k β(2)
1455
+ k
1456
+ (β(1)
1457
+ k
1458
+ + β(2)
1459
+ k )2 (β(1)
1460
+ k
1461
+ + β(2)
1462
+ k
1463
+ + 1)
1464
+ (4.2)
1465
+ Let Lt be a random variable for the number of licks at time step t. We assume that the
1466
+ licking rate is generated by a Poisson distribution
1467
+ Lt ∼ Pois( ˜λk)
1468
+ where for model parameters ω1, ω2 and ω3,
1469
+ ˜λk = ω1µ(rk) + ω2σ(rk) + ω3,
1470
+ (4.3)
1471
+ is the licking rate at a time step spent within the current spatial step. The probability
1472
+ that Lt = lt, for a number of licks lt is given by
1473
+ P(Lt = lt) = λlt
1474
+ k e−λk
1475
+ lt!
1476
+ (4.4)
1477
+ Table 4.3: Parameters relevant to the licking behaviour.
1478
+ .
1479
+ Parameter
1480
+ Type
1481
+ Description
1482
+ ˜λk
1483
+ Spatial state
1484
+ parameter
1485
+ Rate of the Poisson distribution generating the
1486
+ licking behavior within a time step spent in spatial
1487
+ step k
1488
+ ω1
1489
+ Model parameter
1490
+ Weight of the expected reward probability of the
1491
+ current reward distribution for calculating the
1492
+ spatial state parameter ˜λk
1493
+ ω2
1494
+ Model parameter
1495
+ Weight of the standard deviation of the current
1496
+ reward distribution for calculating the
1497
+ spatial state parameter ˜λk
1498
+ ω3
1499
+ Model parameter
1500
+ base licking rate for calculating ˜λk
1501
+ 28
1502
+
1503
+ 4.2.
1504
+ Parameter ˜νk: target speed within the current spatial state
1505
+ 4.2
1506
+ Parameter ˜νk: target speed within the current
1507
+ spatial state
1508
+ We noticed that the mouse tends to speed up if it does not expect a reward in upcoming
1509
+ states (for example, see Figures 2.5 and 2.6). We model this behavior using a discounted
1510
+ measure of future rewards.
1511
+ Discounted future reward
1512
+ Expected average reward probability m steps after the current state s can be formulated
1513
+ as follows
1514
+
1515
+ s′∈S
1516
+ E(r[s′])P(Xk+m = s′|Xk = s)
1517
+ (4.5)
1518
+ Value of P(Xk+m|Xk) can be estimated by the transition probability matrix obtained
1519
+ by the expected value of transition probabilities and standard Markov chain transition
1520
+ properties (Equation 1.1) (Häggström et al. 2002). To estimate the values of the transition
1521
+ probability matrix, we use the expected value of transition probability for p(s,s′), using
1522
+ parameters of Dirichlet distributions for transition probabilities in Ak;
1523
+ E[p(s,s′)] =
1524
+ αk(s, s′)
1525
+
1526
+ s′′∈Adj(s)
1527
+ αk(s, s′′)
1528
+ is the estimated probability value for p(s,s′) entry of the transition probability matrix.
1529
+ To conclude the discussion for the calculation of expression 4.5, note that E(r[s′]) =
1530
+ β(1)
1531
+ k (s′)/
1532
+
1533
+ β(1)
1534
+ k (s′)β(2)
1535
+ k (s′)
1536
+
1537
+ .
1538
+ Now, let us define the discounted future reward γk(ρ) for a fixed value of ρ in the current
1539
+ step k to be
1540
+ γk(ρ) :=
1541
+
1542
+
1543
+ m=0
1544
+ ρm�
1545
+ s′∈S
1546
+
1547
+ E[r(s′)]P(Xk+m|Xk)
1548
+
1549
+ �∞
1550
+ m=0 ρm
1551
+ (4.6)
1552
+ Note that γk(ρ) is a normalised sum of discounted present and future expected re-
1553
+ ward probability values. Similar to the value function in reinforcement learning (Sut-
1554
+ ton & Barto 2018), or the concept of discounted cash flow in financial asset valuation
1555
+ 29
1556
+
1557
+ 4.3.
1558
+ Generative model of licking and speed
1559
+ (Damodaran 2012), it incorporates all future reward values by iteratively giving less
1560
+ weight to future rewards that are further away.
1561
+ When transitioning from one state to another, lower discounted future reward γk(ρ) is
1562
+ likely to indicate that the next reward is further away. In this case, the mouse may choose
1563
+ to adjust its behavior (Kleinfeld et al. 2006), by speeding up to pass the unrewarded
1564
+ regions more quickly. Since the discounted value of future reward does not change as
1565
+ long as the mouse is in the same spatial state, the desired speed at the current spatial
1566
+ step can be modeled as a spatial state parameter. Let the target speed ˜νk for the current
1567
+ state be
1568
+ ˜νk := vmax
1569
+
1570
+ 1 − γk(ρ)
1571
+
1572
+ (4.7)
1573
+ where vmax is a model parameter that puts an upper bound on the target speed. A simple
1574
+ model of speed for time step t is the following
1575
+ vt ∼ N( ˜νk, σ2
1576
+ ν).
1577
+ (4.8)
1578
+ However, physical constraints on the movement does not permit an instant jump in speed
1579
+ when the spatial state changes. The alternative model of speed that takes the physical
1580
+ constraints into considerations (by adding more parameters), is
1581
+ vt+1 ∼ N(E[vt+1], V ar[vt+1]),
1582
+ (4.9)
1583
+ where,
1584
+ (E[vt+1], V ar[vt+1]) =
1585
+
1586
+
1587
+
1588
+
1589
+
1590
+
1591
+
1592
+
1593
+
1594
+ (vt + δ+
1595
+ v, σ2
1596
+ v)
1597
+ for vt < ˜νk − ϵ,
1598
+ (vt + δ-
1599
+ v, σ2
1600
+ v)
1601
+ for vt > ˜νk + ϵ,
1602
+ (vt, σ2
1603
+ v)
1604
+ otherwise; i.e., for vt ∈ [ ˜νk − ϵ, ˜νk + ϵ].
1605
+ (4.10)
1606
+ where the model parameters δ+
1607
+ v and δ-
1608
+ v are constant values for acceleration and deceler-
1609
+ ation, σ2
1610
+ v is the variance of speed outcome in the next time-step. Furthermore, model
1611
+ parameter ϵ determines the range where non-random acceleration or deceleration is not
1612
+ enforced.
1613
+ 4.3
1614
+ Generative model of licking and speed
1615
+ For given spatial states structure (by fixing parameters V and d), there exists a function
1616
+ fV,d : (xLoc × Cor) → S that associates each position to states. Then it is possible to
1617
+ 30
1618
+
1619
+ 4.3.
1620
+ Generative model of licking and speed
1621
+ Table 4.4: Parameters relevant to the speed behavior.
1622
+ .
1623
+ Parameter
1624
+ Type
1625
+ Description
1626
+ ρ
1627
+ Model parameter
1628
+ Discount rate of future reward (Expression 4.6)
1629
+ ˜νk
1630
+ Spatial state
1631
+ parameter
1632
+ Target speed (Expression 4.7)
1633
+ σ2
1634
+ ˜ν
1635
+ Model parameter
1636
+ Variance of speed in the first model (Expression 4.8)
1637
+ σ2
1638
+ v
1639
+ Model parameter
1640
+ Variance of speed change
1641
+ Expression 4.9 (second model)
1642
+ δ+
1643
+ v, δ-
1644
+ v
1645
+ Model parameter
1646
+ Acceleration and deceleration rate (second model)
1647
+ ϵ
1648
+ Model parameter
1649
+ Range of random only of speed change (second model)
1650
+ determine time steps associated with state transitions. In Chapter 3.1, we assumed that
1651
+ the states are fully observable to the subject. Therefore, the subject knows the value of
1652
+ fV,d at any current time step.
1653
+ Binary variable Kt: indicator of spatial state transition event
1654
+ For the current time step t, let Kt be a binary variable such that
1655
+ Kt+1 =
1656
+
1657
+
1658
+
1659
+ 0,
1660
+ for fV,d(xt, yt) = fV,d(xt+1, yt+1)
1661
+ 1,
1662
+ for fV,d(xt, yt) ̸= fV,d(xt+1, yt+1).
1663
+ (4.11)
1664
+ That is to say, Kt = 1 if (xt, yt) and (xt+1, yt+1) are not in the same state, ans so a state
1665
+ transition has occurred. Note that a spatial state transition triggers an update in the
1666
+ beliefs about the environment (reward probability within states and state transitions).
1667
+ Then the internal representations in the graphical model of Figure 4.1 are updated to the
1668
+ next spatial step, and the behavioral parameters λkt+1 and ˜
1669
+ nukt+1 correspond to the new
1670
+ spatial step. For Kt = 0, the behavioral parameters ˜λkt+1 and ˜
1671
+ nukt+1 remain unchanged
1672
+ from the previous time-step.
1673
+ Figure 4.2 is the graphical model for the generative model of behavior within time steps.
1674
+ The model assumes that the spatial state associated with (xt, yt) is unambiguously de-
1675
+ termined by the subject (fully observable spatial states). Therefore, the value of Kt+1,
1676
+ which indicates a state transition, is also observed by the subject. Furthermore, Kt+1 can
1677
+ be deterministically inferred from the experimental data using the Equation 4.11. Hence,
1678
+ it is also observed in the behavioral data. If Kt+1 = 1, then the graphical model of up-
1679
+ dating internal representations is used to find the new behavioral parameters (indicated
1680
+ by green arrows). If Kt+1 = 0, the behavioral parameters remain unchanged from the
1681
+ previous step. A description of the relationships is included in Table 4.5.
1682
+ 31
1683
+
1684
+ 4.3.
1685
+ Generative model of licking and speed
1686
+ Table 4.5: Description of relationships in the generative model of behavior in the graphical
1687
+ model of 4.2. Variables ( Var.) and their parents (Par(.)) are included in the first and second
1688
+ column respectively. Third column (Type) indicates whether the outcome of the variable given
1689
+ its parents is stochastic ( Stoch.) or deterministic ( Deter.) given its parents. The conditional
1690
+ dependence of the variable on its parents is described in the last column.
1691
+ Var.
1692
+ Par(.)
1693
+ Type
1694
+ Update description
1695
+ Kt
1696
+ (xt, yt)
1697
+ (xt+1, yt+1)
1698
+ Stoch.
1699
+ Transition event indicator (Expression 4.11).
1700
+ ˜νkt+1
1701
+ ˜νkt, Kt
1702
+ Deter.
1703
+ For Kt = 0, ˜νkt+1 = ˜νkt. Otherwise, spatial state changes,
1704
+ and graphical model 4.1 updates the value.
1705
+ ˜λkt+1
1706
+ ˜λkt, Kt
1707
+ Deter.
1708
+ For Kt = 0, ˜λkt+1 = ˜λkt. Otherwise, spatial state changes,
1709
+ and graphical model 4.1 updates the value.).
1710
+ lt
1711
+ ˜λk
1712
+ Stoch.
1713
+ Poisson distributed value with rate ˜λk (Expression 4.3)
1714
+ vt
1715
+ ˜νk
1716
+ Stoch.
1717
+ Speed at time step t by first model (Expression 4.8 ),
1718
+ or second model (Expression 4.9.
1719
+ Figure 4.2: Graphical model of the generative model of behavior. Note that the variables and
1720
+ relationships drawn in yellow and brown are not part of the internal model, and they describe
1721
+ the conditional dependence of the observed values to the model variables. See table 4.5 for
1722
+ description of the relationships.
1723
+ 32
1724
+
1725
+ (Xt,yt)
1726
+ (Xt+1,Yt+1)
1727
+ Kt+1
1728
+ K
1729
+ lt+1
1730
+ Vt+14.4.
1731
+ Estimation of model parameters
1732
+ 4.4
1733
+ Estimation of model parameters
1734
+ Below, the general framework for estimating the model parameters is discussed. For a
1735
+ fixed spatial model of space MV,d, let θ be the list of model parameters
1736
+ θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2
1737
+ ˜ν),
1738
+ (using the second speed model), or
1739
+ θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2
1740
+ ˜v, δ
1741
+ +
1742
+ v, δ-
1743
+ v, ϵ)
1744
+ (using the first speed model).
1745
+ Given the model parameters, and given observational data, parents of vt and lt are deter-
1746
+ ministically set at each time point (see graphical model 4.2). Therefore, speed and licking
1747
+ are independent. So model likelihood of the generative model of behaviour at time step
1748
+ t is
1749
+ L
1750
+
1751
+ θ|(vt, lt)
1752
+
1753
+ = P(vt, lt|θ) = P(vt|θ) P(lt|θ)
1754
+ ∼ f
1755
+
1756
+ µt(θ), σt(θ)
1757
+
1758
+ g
1759
+
1760
+ lt; λt(θ)
1761
+
1762
+ where f are g are probability mass functions for Gaussian and Poisson distributions
1763
+ respectively. Note that their distribution parameters are deterministically fixed at each
1764
+ time point given the model parameters (see Equations 4.3, 4.8 and 4.9). Then model
1765
+ evidence for the generative model for up to time step N is
1766
+ L
1767
+
1768
+ θ
1769
+ ���{(vt, lt) : t = 1 . . . N}
1770
+
1771
+
1772
+ N
1773
+
1774
+ t=1
1775
+ f
1776
+
1777
+ vt; µt(θ), σt(θ)
1778
+
1779
+ g
1780
+
1781
+ lt; λt(θ)
1782
+
1783
+ (4.12)
1784
+ And we can then use the maximum likelihood estimation (MLE) to estimate the fitted
1785
+ model parameters
1786
+ θ∗ = argmax
1787
+ θ
1788
+ N
1789
+
1790
+ t=1
1791
+ ln
1792
+
1793
+ f
1794
+
1795
+ vt; µt(θ), σt(θ)
1796
+
1797
+ g
1798
+
1799
+ lt; λt(θ)
1800
+ ��
1801
+ (4.13)
1802
+ Note that for each spatial step, the graphical model is used for calculating the parameters
1803
+ µt(θ), σt(θ) and λt(θ).
1804
+ 33
1805
+
1806
+ Chapter 5
1807
+ Discussion
1808
+ The next step in the project is to first complete the model validation on synthetic data.
1809
+ Before applying the model to real data, it is important to scrutinize the behaviour of the
1810
+ generative model. We plan to do so by pre-determining values for a model parameter
1811
+ and generating synthetic behavioural data. The generated behaviour is then used as a
1812
+ given data set. If the model is well-behaved, the model parameters should be recoverable
1813
+ from the synthetic data. As different spatial state structures radically alter the learning
1814
+ dynamics, we will conduct the parameter recovery for spatial model parameters more
1815
+ diligently. By considering various alternative hypotheses (different values for d and V),
1816
+ the model evidence (equation 4.12) of alternative hypotheses will be compared. For a
1817
+ well-behaved model, the model evidence for the parameters used to generate data is
1818
+ expected to be the best.
1819
+ 5.1
1820
+ Limitations
1821
+ While our model assumes fully observable Markov states, noisy observations of the loca-
1822
+ tion and visual stimuli introduce uncertainty about the true current state of the system.
1823
+ Indeed, observations of the environment are often noisy and some behavioural models
1824
+ take this into account (Kang et al. n.d., Kersten & Mamassian 2009). While the learning
1825
+ rates of reward probability and transition probability capture some aspects of noisy obser-
1826
+ vations, they are not based on normative assumptions. Alternatives should be considered
1827
+ for future research (Laquitaine & Gardner 2018). Fortunately, there is an extensive body
1828
+ of research on partially observable Markov decision processes (Monahan 1982, Kaelbling
1829
+ 34
1830
+
1831
+ 5.2.
1832
+ Implications
1833
+ et al. 1996) that would provide a clear path for improving the current model.
1834
+ An alternative to estimating the model parameters using MLE in Chapter 4.4 is to use
1835
+ the maximum a posteriori estimation (MAP) (Murphy 2012, Griffiths & Yuille 2008).
1836
+ In contrast to MLE, which gives one estimated value for each parameter, MAP gives
1837
+ a distribution for each parameter, characterising the level of uncertainty about each
1838
+ parameter. Since some of the model parameters are qualitatively interpretable, MAP
1839
+ may be particularly relevant. In particular, a distribution over possible options for V,
1840
+ the set of discriminated visual stimuli, is highly relevant to the imaged activity of the
1841
+ visual cortex. The potential challenge of MAP is that the computational difficulty of
1842
+ the calculation may introduce implementation challenges that are difficult to resolve.
1843
+ Nonetheless, its estimation of model parameters are potentially more meaningful for
1844
+ studying visual perception.
1845
+ 5.2
1846
+ Implications
1847
+ During the experiments, two-photon calcium imaging and optogenetics were performed
1848
+ to determine changes in inputs and activity of individual excitatory and inhibitory cells
1849
+ within the primary visual cortex. Previously, a multivariate auto-regressive linear model
1850
+ (MVAR) was fitted to the neuronal data (Khan et al. 2018):
1851
+ qt+1 = qt + A × qt + ut + ξvt
1852
+ where qt is the vector of response levels at time step t for all n imaged neurons, A is
1853
+ an n × n matrix that includes the fitted interaction parameters, ut is a fitted vector for
1854
+ the stimulus-related input, and ξ is a fitted parameter for the contribution of current
1855
+ speed vt. The MVAR model was used to compare the activity of populations of different
1856
+ inhibitory and excitatory cell types. The only behavioural term that was included was
1857
+ speed vt, which did not make a significant contribution. An immediate application of
1858
+ the current behavioural model presented in this report is to potentially improve the
1859
+ MVAR model by including parameters related to internal representations, In particular,
1860
+ learned parameters that are likely to be relevant to behaviour, namely expected reward
1861
+ probability µ(rk), variance σ2(rk), and discounted future reward γk(ρ) could potentially
1862
+ improve the predictive power of the MVAR model.
1863
+ If the internal representation terms from the behavioural model improve the predictive
1864
+ power of the MVAR model, it will give new insights into the information encoded in
1865
+ neurons located in the primary visual cortex. Future experiments can then be designed to
1866
+ 35
1867
+
1868
+ 5.2.
1869
+ Implications
1870
+ systematically manipulate these internal terms to understand the precise representations
1871
+ (Heilbron et al. 2020). This will help us understand how the structure of the environment
1872
+ changes learning dynamics and internal representations.
1873
+ 36
1874
+
1875
+ Bibliography
1876
+ Bibliography
1877
+ Barlow, H. B. et al. (1961), ‘Possible principles underlying the transformation of sensory
1878
+ messages’, Sensory communication 1, 217–234.
1879
+ Beck, J. M., Ma, W. J., Kiani, R., Hanks, T., Churchland, A. K., Roitman, J., Shadlen,
1880
+ M. N., Latham, P. E. & Pouget, A. (2008), ‘Probabilistic population codes for bayesian
1881
+ decision making’, Neuron 60(6), 1142–1152.
1882
+ Berkes, P., Orbán, G., Lengyel, M. & Fiser, J. (2011), ‘Spontaneous cortical activ-
1883
+ ity reveals hallmarks of an optimal internal model of the environment’, Science
1884
+ 331(6013), 83–87.
1885
+ Bishop, C. M. (2006), Pattern recognition and machine learning, springer.
1886
+ Chen, T.-W., Wardill, T. J., Sun, Y., Pulver, S. R., Renninger, S. L., Baohan, A.,
1887
+ Schreiter, E. R., Kerr, R. A., Orger, M. B., Jayaraman, V. et al. (2013), ‘Ultrasensitive
1888
+ fluorescent proteins for imaging neuronal activity’, Nature 499(7458), 295–300.
1889
+ Damodaran, A. (2012), Investment valuation: Tools and techniques for determining the
1890
+ value of any asset, Vol. 666, John Wiley & Sons.
1891
+ Dombeck, D. A., Harvey, C. D., Tian, L., Looger, L. L. & Tank, D. W. (2010), ‘Functional
1892
+ imaging of hippocampal place cells at cellular resolution during virtual navigation’,
1893
+ Nature neuroscience 13(11), 1433–1440.
1894
+ Fiser, J., Berkes, P., Orbán, G. & Lengyel, M. (2010), ‘Statistically optimal perception
1895
+ and learning: from behavior to neural representations’, Trends in cognitive sciences
1896
+ 14(3), 119–130.
1897
+ Fishell, G. & Kepecs, A. (2019), ‘Interneuron types as attractors and controllers’, Annual
1898
+ 37
1899
+
1900
+ Bibliography
1901
+ review of neuroscience 43.
1902
+ Geisler, W. S. (2003), ‘Ideal observer analysis’, The visual neurosciences 10(7), 12–12.
1903
+ Geisler, W. S. (2011), ‘Contributions of ideal observer theory to vision research’, Vision
1904
+ research 51(7), 771–781.
1905
+ Griffiths, T. & Yuille, A. (2008), ‘A primer on probabilistic inference’, The probabilistic
1906
+ mind: Prospects for Bayesian cognitive science pp. 33–57.
1907
+ Häggström, O. et al. (2002), Finite Markov chains and algorithmic applications, Vol. 52,
1908
+ Cambridge University Press.
1909
+ Harvey, C. D., Collman, F., Dombeck, D. A. & Tank, D. W. (2009), ‘Intracellular dynam-
1910
+ ics of hippocampal place cells during virtual navigation’, Nature 461(7266), 941–946.
1911
+ Heeger, D. J. (2017), ‘Theory of cortical function’, Proceedings of the National Academy
1912
+ of Sciences 114(8), 1773–1782.
1913
+ Heilbron, M., Richter, D., Ekman, M., Hagoort, P. & De Lange, F. P. (2020), ‘Word
1914
+ contexts enhance the neural representation of individual letters in early visual cortex’,
1915
+ Nature communications 11(1), 1–11.
1916
+ Kaelbling, L. P., Littman, M. L. & Moore, A. W. (1996), ‘Reinforcement learning: A
1917
+ survey’, Journal of artificial intelligence research 4, 237–285.
1918
+ Kang, Y. H., Mahr, J., Nagy, M., Andrási, K., Csibra, G. & Lengyel, M. (n.d.), ‘Eye
1919
+ movements reflect causal inference during episodic memory retrieval’.
1920
+ Kepecs, A. & Fishell, G. (2014), ‘Interneuron cell types are fit to function’, Nature
1921
+ 505(7483), 318–326.
1922
+ Kersten, D. & Mamassian, P. (2009), ‘Ideal observer theory’, Encyclopedia of neuroscience
1923
+ 5, 89–95.
1924
+ Khan, A. G., Poort, J., Chadwick, A., Blot, A., Sahani, M., Mrsic-Flogel, T. D. & Hofer,
1925
+ S. B. (2018), ‘Distinct learning-induced changes in stimulus selectivity and interactions
1926
+ of gabaergic interneuron classes in visual cortex’, Nature neuroscience 21(6), 851–859.
1927
+ Kleinfeld, D., Ahissar, E. & Diamond, M. E. (2006), ‘Active sensation: insights from the
1928
+ 38
1929
+
1930
+ Bibliography
1931
+ rodent vibrissa sensorimotor system’, Current opinion in neurobiology 16(4), 435–444.
1932
+ Kriegeskorte, N. & Douglas, P. K. (2018), ‘Cognitive computational neuroscience’, Nature
1933
+ neuroscience 21(9), 1148–1160.
1934
+ Laquitaine, S. & Gardner, J. L. (2018), ‘A switching observer for human perceptual
1935
+ estimation’, Neuron 97(2), 462–474.
1936
+ Maloney, L. T. & Mamassian, P. (2009), ‘Bayesian decision theory as a model of human
1937
+ visual perception: Testing bayesian transfer’, Visual neuroscience 26(1), 147–155.
1938
+ Monahan, G. E. (1982), ‘State of the art—a survey of partially observable markov decision
1939
+ processes: theory, models, and algorithms’, Management science 28(1), 1–16.
1940
+ Murphy, K. P. (2012), Machine learning: a probabilistic perspective, MIT press.
1941
+ Orbán, G., Fiser, J., Aslin, R. N. & Lengyel, M. (2008), ‘Bayesian learning of vi-
1942
+ sual chunks by human observers’, Proceedings of the National Academy of Sciences
1943
+ 105(7), 2745–2750.
1944
+ Poort, J., Khan, A. G., Pachitariu, M., Nemri, A., Orsolic, I., Krupic, J., Bauza, M.,
1945
+ Sahani, M., Keller, G. B., Mrsic-Flogel, T. D. et al. (2015), ‘Learning enhances
1946
+ sensory and multiple non-sensory representations in primary visual cortex’, Neuron
1947
+ 86(6), 1478–1490.
1948
+ Pouget, A., Dayan, P. & Zemel, R. S. (2003), ‘Inference and computation with population
1949
+ codes’, Annual review of neuroscience 26(1), 381–410.
1950
+ Saleem, A. B., Diamanti, E. M., Fournier, J., Harris, K. D. & Carandini, M. (2018),
1951
+ ‘Coherent encoding of subjective spatial position in visual cortex and hippocampus’,
1952
+ Nature 562(7725), 124–127.
1953
+ Sutton, R. S. & Barto, A. G. (2018), Reinforcement learning: An introduction, MIT press.
1954
+ Yan, Y., Rasch, M. J., Chen, M., Xiang, X., Huang, M., Wu, S. & Li, W. (2014),
1955
+ ‘Perceptual training continuously refines neuronal population codes in primary visual
1956
+ cortex’, Nature neuroscience 17(10), 1380–1387.
1957
+ Yang, T. & Maunsell, J. H. (2004), ‘The effect of perceptual learning on neuronal re-
1958
+ 39
1959
+
1960
+ Bibliography
1961
+ sponses in monkey visual area v4’, Journal of Neuroscience 24(7), 1617–1626.
1962
+ Yap, E.-L. & Greenberg, M. E. (2018), ‘Activity-regulated transcription: bridging the
1963
+ gap between neural activity and behavior’, Neuron 100(2), 330–348.
1964
+ Zhao, M. & Warren, W. H. (2015), ‘How you get there from here:
1965
+ Interaction of
1966
+ visual landmarks and path integration in human navigation’, Psychological science
1967
+ 26(6), 915–924.
1968
+ 40
1969
+
DNE0T4oBgHgl3EQfygIs/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff