shaurya0512 commited on
Commit
ae46214
1 Parent(s): 9f7b537

deleted folder and created tar

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Full_text_JSON/prefixA/json/A00/A00-1000.json +0 -22
  2. Full_text_JSON/prefixA/json/A00/A00-1001.json +0 -847
  3. Full_text_JSON/prefixA/json/A00/A00-1002.json +0 -519
  4. Full_text_JSON/prefixA/json/A00/A00-1003.json +0 -903
  5. Full_text_JSON/prefixA/json/A00/A00-1004.json +0 -1181
  6. Full_text_JSON/prefixA/json/A00/A00-1005.json +0 -993
  7. Full_text_JSON/prefixA/json/A00/A00-1006.json +0 -925
  8. Full_text_JSON/prefixA/json/A00/A00-1007.json +0 -1155
  9. Full_text_JSON/prefixA/json/A00/A00-1008.json +0 -1403
  10. Full_text_JSON/prefixA/json/A00/A00-1009.json +0 -1257
  11. Full_text_JSON/prefixA/json/A00/A00-1010.json +0 -1055
  12. Full_text_JSON/prefixA/json/A00/A00-1011.json +0 -739
  13. Full_text_JSON/prefixA/json/A00/A00-1012.json +0 -1015
  14. Full_text_JSON/prefixA/json/A00/A00-1013.json +0 -894
  15. Full_text_JSON/prefixA/json/A00/A00-1014.json +0 -1542
  16. Full_text_JSON/prefixA/json/A00/A00-1015.json +0 -844
  17. Full_text_JSON/prefixA/json/A00/A00-1016.json +0 -1336
  18. Full_text_JSON/prefixA/json/A00/A00-1017.json +0 -1033
  19. Full_text_JSON/prefixA/json/A00/A00-1018.json +0 -989
  20. Full_text_JSON/prefixA/json/A00/A00-1019.json +0 -1240
  21. Full_text_JSON/prefixA/json/A00/A00-1020.json +0 -1300
  22. Full_text_JSON/prefixA/json/A00/A00-1021.json +0 -1151
  23. Full_text_JSON/prefixA/json/A00/A00-1022.json +0 -1359
  24. Full_text_JSON/prefixA/json/A00/A00-1023.json +0 -777
  25. Full_text_JSON/prefixA/json/A00/A00-1024.json +0 -1190
  26. Full_text_JSON/prefixA/json/A00/A00-1025.json +0 -1768
  27. Full_text_JSON/prefixA/json/A00/A00-1026.json +0 -1524
  28. Full_text_JSON/prefixA/json/A00/A00-1027.json +0 -1184
  29. Full_text_JSON/prefixA/json/A00/A00-1028.json +0 -905
  30. Full_text_JSON/prefixA/json/A00/A00-1029.json +0 -821
  31. Full_text_JSON/prefixA/json/A00/A00-1030.json +0 -663
  32. Full_text_JSON/prefixA/json/A00/A00-1031.json +0 -1184
  33. Full_text_JSON/prefixA/json/A00/A00-1032.json +0 -1209
  34. Full_text_JSON/prefixA/json/A00/A00-1033.json +0 -964
  35. Full_text_JSON/prefixA/json/A00/A00-1034.json +0 -719
  36. Full_text_JSON/prefixA/json/A00/A00-1035.json +0 -1010
  37. Full_text_JSON/prefixA/json/A00/A00-1036.json +0 -806
  38. Full_text_JSON/prefixA/json/A00/A00-1037.json +0 -1123
  39. Full_text_JSON/prefixA/json/A00/A00-1038.json +0 -540
  40. Full_text_JSON/prefixA/json/A00/A00-1039.json +0 -1166
  41. Full_text_JSON/prefixA/json/A00/A00-1040.json +0 -882
  42. Full_text_JSON/prefixA/json/A00/A00-1041.json +0 -759
  43. Full_text_JSON/prefixA/json/A00/A00-1042.json +0 -1041
  44. Full_text_JSON/prefixA/json/A00/A00-1043.json +0 -1028
  45. Full_text_JSON/prefixA/json/A00/A00-1044.json +0 -952
  46. Full_text_JSON/prefixA/json/A00/A00-1045.json +0 -963
  47. Full_text_JSON/prefixA/json/A00/A00-1046.json +0 -1548
  48. Full_text_JSON/prefixA/json/A00/A00-2000.json +0 -578
  49. Full_text_JSON/prefixA/json/A00/A00-2001.json +0 -1425
  50. Full_text_JSON/prefixA/json/A00/A00-2002.json +0 -1299
Full_text_JSON/prefixA/json/A00/A00-1000.json DELETED
@@ -1,22 +0,0 @@
1
- {
2
- "paper_id": "A00-1000",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:49.160434Z"
6
- },
7
- "title": "",
8
- "authors": [],
9
- "year": "",
10
- "venue": null,
11
- "identifiers": {},
12
- "abstract": "",
13
- "pdf_parse": {
14
- "paper_id": "A00-1000",
15
- "_pdf_hash": "",
16
- "abstract": [],
17
- "body_text": [],
18
- "back_matter": [],
19
- "bib_entries": {},
20
- "ref_entries": {}
21
- }
22
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1001.json DELETED
@@ -1,847 +0,0 @@
1
- {
2
- "paper_id": "A00-1001",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:11:58.688182Z"
6
- },
7
- "title": "BusTUC -A natural language bus route oracle",
8
- "authors": [
9
- {
10
- "first": "Tore",
11
- "middle": [],
12
- "last": "Amble",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Trondheim Norway",
17
- "location": {
18
- "postCode": "N-7491"
19
- }
20
- },
21
- "email": "amble@[email protected]"
22
- }
23
- ],
24
- "year": "",
25
- "venue": null,
26
- "identifiers": {},
27
- "abstract": "The paper describes a natural language based expert system route advisor for the public bus transport in Trondheim, Norway. The system is available on the Internet,and has been intstalled at the bus company's web server since the beginning of 1999. The system is bilingual, relying on an internal language independent logic representation.",
28
- "pdf_parse": {
29
- "paper_id": "A00-1001",
30
- "_pdf_hash": "",
31
- "abstract": [
32
- {
33
- "text": "The paper describes a natural language based expert system route advisor for the public bus transport in Trondheim, Norway. The system is available on the Internet,and has been intstalled at the bus company's web server since the beginning of 1999. The system is bilingual, relying on an internal language independent logic representation.",
34
- "cite_spans": [],
35
- "ref_spans": [],
36
- "eq_spans": [],
37
- "section": "Abstract",
38
- "sec_num": null
39
- }
40
- ],
41
- "body_text": [
42
- {
43
- "text": "A natural language interface to a computer database provides users with the capability of obtaining information stored in the database by querying the system in a natural language (NL) . With a natural language as a means of communication with a computer system, the users can make a question or a statement in the way they normally think about the information being discussed, freeing them from having to know how the computer stores or processes the information.",
44
- "cite_spans": [
45
- {
46
- "start": 180,
47
- "end": 184,
48
- "text": "(NL)",
49
- "ref_id": null
50
- }
51
- ],
52
- "ref_spans": [],
53
- "eq_spans": [],
54
- "section": "Introduction",
55
- "sec_num": "1"
56
- },
57
- {
58
- "text": "The present implementation represents a a major effort in bringing natural language into practical use. A system is developed that can answer queries about bus routes, stated as natural language texts, and made public through the Internet World Wide Web ( http : //www. idi. ntnu. no/bustuc/).",
59
- "cite_spans": [],
60
- "ref_spans": [],
61
- "eq_spans": [],
62
- "section": "Introduction",
63
- "sec_num": "1"
64
- },
65
- {
66
- "text": "Trondheim is a small city with a university and 140000 inhabitants. Its central bus systems has 42 bus lines, serving 590 stations, with 1900 departures per day (in average). That gives approximately 60000 scheduled bus station passings per day, which is somehow represented in the route data base.",
67
- "cite_spans": [],
68
- "ref_spans": [],
69
- "eq_spans": [],
70
- "section": "Introduction",
71
- "sec_num": "1"
72
- },
73
- {
74
- "text": "The starting point is to automate the function of a route information agent. The following example of a system response is using an actual request over telephone to the local route information company:",
75
- "cite_spans": [],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "Introduction",
79
- "sec_num": "1"
80
- },
81
- {
82
- "text": "Hi, I live in Nidarvoll and tonight i must reach a train to Oslo at 6 oclock.",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "Introduction",
87
- "sec_num": "1"
88
- },
89
- {
90
- "text": "and a typical answer would follow quickly: In between the question and the answer is a process of lexical analysis, syntax analysis, semantic analysis, pragmatic reasoning and database query processing.",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "Introduction",
95
- "sec_num": "1"
96
- },
97
- {
98
- "text": "One could argue that the information content could be solved by an interrogation, whereby the customer is asked to produce 4 items: station of departure, station of arrival, earliest departure timeand/or latest arrival time. It is a myth that natural language is a better way of communication because it is \"natural language\". The challenge is to prove by demonstration that an NL system can be made that will be preferred to the interrogative mode. To do that, the system has to be correct, user friendly and almost complete within the actual domain.",
99
- "cite_spans": [],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "Introduction",
103
- "sec_num": "1"
104
- },
105
- {
106
- "text": "The system, called BusTUC is built upon the classical system CHAT-80 (Warren and Pereira, 1982) . CHAT-80 was a state of the art natural language system that was impressive on its own merits, but also established Prolog as a viable and competitive language for Artificial Intelligence in general. The system was a brilliant masterpiece of software, efficient and sophisticated. The natural language system was connected to a small query system for international geography. The following query could be analysed and answered in a split second:",
107
- "cite_spans": [
108
- {
109
- "start": 61,
110
- "end": 95,
111
- "text": "CHAT-80 (Warren and Pereira, 1982)",
112
- "ref_id": null
113
- }
114
- ],
115
- "ref_spans": [],
116
- "eq_spans": [],
117
- "section": "Previous Efforts, CHAT-80, PRAT-89 and HSQL",
118
- "sec_num": "2"
119
- },
120
- {
121
- "text": "Which country bordering the Mediterranean borders a country that is bordered by a country whose population exceeds the population of India?",
122
- "cite_spans": [],
123
- "ref_spans": [],
124
- "eq_spans": [],
125
- "section": "Previous Efforts, CHAT-80, PRAT-89 and HSQL",
126
- "sec_num": "2"
127
- },
128
- {
129
- "text": "(The answer 'Turkey' has become incorrect as time has passed. The irony is that Geography was chosen as a domain without time.)",
130
- "cite_spans": [],
131
- "ref_spans": [],
132
- "eq_spans": [],
133
- "section": "Previous Efforts, CHAT-80, PRAT-89 and HSQL",
134
- "sec_num": "2"
135
- },
136
- {
137
- "text": "The abi!ity to answer ridiculously long queries is of course not the main goal. The main lesson is that complex sentences are analysed with a proper understanding without sacrificing efficiency. Any superfi-cial pattern matching technique would prove futile sooner or later.",
138
- "cite_spans": [],
139
- "ref_spans": [],
140
- "eq_spans": [],
141
- "section": "Previous Efforts, CHAT-80, PRAT-89 and HSQL",
142
- "sec_num": "2"
143
- },
144
- {
145
- "text": "Making a Norwegian CHAT-80, PRAT-89 At the University of Trondheim (NTNU), two students made a Norwegian version of CHAT-80,called PRAT-89 (Teigen and Vetland, 1988) , (Teigen and Vetland, 1989) . (Also, a similar Swedish project SNACK-85 was reported).",
146
- "cite_spans": [
147
- {
148
- "start": 131,
149
- "end": 165,
150
- "text": "PRAT-89 (Teigen and Vetland, 1988)",
151
- "ref_id": null
152
- },
153
- {
154
- "start": 168,
155
- "end": 194,
156
- "text": "(Teigen and Vetland, 1989)",
157
- "ref_id": null
158
- }
159
- ],
160
- "ref_spans": [],
161
- "eq_spans": [],
162
- "section": "2.1",
163
- "sec_num": null
164
- },
165
- {
166
- "text": "The dictionary was changed from English to Norwegian together with new rules for morphological analysis. The change of grammar from English to Norwegian proved to be amazingly easy. It showed that the langauges were more similar than one would believe, given that the languages are incomprehensible to each other's communities.",
167
- "cite_spans": [],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "2.1",
171
- "sec_num": null
172
- },
173
- {
174
- "text": "After changing the dictionary and graramar, the following Norwegian query about the same domain could be answered correctly in a few seconds.",
175
- "cite_spans": [],
176
- "ref_spans": [],
177
- "eq_spans": [],
178
- "section": "2.1",
179
- "sec_num": null
180
- },
181
- {
182
- "text": "Hvilke afrikanske land som hat en befolkning stoerre enn 3 millioner og mindre enn 50 millioner og er nord for Botswana og oest for Libya hat en hovedstad som hat en befolkning stoerre enn 100 tusen.",
183
- "cite_spans": [],
184
- "ref_spans": [],
185
- "eq_spans": [],
186
- "section": "2.1",
187
- "sec_num": null
188
- },
189
- {
190
- "text": "( A translation is beside the point o.f being a long query in Norwegian.)",
191
- "cite_spans": [],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "2.1",
195
- "sec_num": null
196
- },
197
- {
198
- "text": "A Nordic project HSQL (Help System for SQL) was accomplished in 1988-89 to make a joint Nordic effort interfaces to databases.",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "HSQL -Help System for SQL",
203
- "sec_num": "2.2"
204
- },
205
- {
206
- "text": "The HSQL project was led by the Swedish State Bureau (Statskontoret), with participants from Sweden, Denmark, Finland and Norway (Amble et al., 1990) . The aim of HSQL was to build a natural language interface to SQL databases for the Scandinavian languages Swedish, Danish and Norwegian. These languages are very similar, and the Norwegian version of CHAT-80 was easily extended to the other Scandinavian languages. Instead of Geography, a more typical application area was chosen to be a query system for hospital administration. We decided to target an SQL database of a hospital administration which had been developed already.",
207
- "cite_spans": [
208
- {
209
- "start": 129,
210
- "end": 149,
211
- "text": "(Amble et al., 1990)",
212
- "ref_id": "BIBREF0"
213
- }
214
- ],
215
- "ref_spans": [],
216
- "eq_spans": [],
217
- "section": "HSQL -Help System for SQL",
218
- "sec_num": "2.2"
219
- },
220
- {
221
- "text": "The next step was then to change the domain of discourse from Geography to hospital administration, using the same knowledge representation techniques used in CHAT-80. A semantic model of this domain was made, and then implemented in the CHAT-80 framework.",
222
- "cite_spans": [],
223
- "ref_spans": [],
224
- "eq_spans": [],
225
- "section": "HSQL -Help System for SQL",
226
- "sec_num": "2.2"
227
- },
228
- {
229
- "text": "The modelling technique that proved adequate was to use an extended Entity Relationship (ER) model with a class (type) hierarchy, attributes belonging to each class, single inheritance of attributes and relationships.",
230
- "cite_spans": [],
231
- "ref_spans": [],
232
- "eq_spans": [],
233
- "section": "HSQL -Help System for SQL",
234
- "sec_num": "2.2"
235
- },
236
- {
237
- "text": "After the remodelling, the system could answer queries in \"Scandinavian\" to an internal hospital database as well as CHAT-80 could answer Geography questions. HSQL produced a Prolog-like code FOL (First Order Logic) for execution. A mapping from FOL to the data base Schema was defined, and a translator from FOL to SQL was implemented. ",
238
- "cite_spans": [],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Coupling the system to an SQL database.",
242
- "sec_num": null
243
- },
244
- {
245
- "text": "The HSQL was a valuable experience in the effort to make transportable natural language interfaces. However, the underlying system CHAT-80 restricted the further development. After the HSQL Project was finished, an internal reseach project TUC (the Understanding Computer) was initiated at NTNU to carry on the results from HSQL. The project goals differed from those of HSQL in a number of ways, and would not be concerned with multimedia interfaces . On the other hand, portability and versatility were made central issues concerning the generality of the language and its applications. The research goals could be summarised as to",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "The The Understanding Computer",
250
- "sec_num": "2.3"
251
- },
252
- {
253
- "text": "\u2022 Give computers an operational understanding of natural language.",
254
- "cite_spans": [],
255
- "ref_spans": [],
256
- "eq_spans": [],
257
- "section": "The The Understanding Computer",
258
- "sec_num": "2.3"
259
- },
260
- {
261
- "text": "\u2022 Build intelligent systems with natural language capabilities.",
262
- "cite_spans": [],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "The The Understanding Computer",
266
- "sec_num": "2.3"
267
- },
268
- {
269
- "text": "\u2022 Study common sense reasoning in natural language.",
270
- "cite_spans": [],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "The The Understanding Computer",
274
- "sec_num": "2.3"
275
- },
276
- {
277
- "text": "A test criterion for the understanding capacity is that after a set of definitions in a Naturally Readable Logic, NRL, the system's answer to queries in NRL should conform to the answers of an idealised rational agent. NRL is defined in a closed context. Thus interfaces to other systems are in principle defined through simulating the environment as a dialogue partner.",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "The The Understanding Computer",
282
- "sec_num": "2.3"
283
- },
284
- {
285
- "text": "TUC is a prototypical natural language processor for English written in Prolog. It is designed to be a general purpose easily adaptable natural language processor. It consists of a general grammar for a subset of English, a semantic knowledge base, and modules for interfaces to other interfaces like UNIX, SQL-databases and general textual information sources.",
286
- "cite_spans": [],
287
- "ref_spans": [],
288
- "eq_spans": [],
289
- "section": "The The Understanding Computer",
290
- "sec_num": "2.3"
291
- },
292
- {
293
- "text": "It so happened that a Universtity Project was starteded in 1996, called TABOR ( \" Speech based user interfaces and reasoning systems \"), with the aim of building an automatic public transport route oracle, available over the public telephone. At the onset of the project, the World Wide Web was fresh, and not as widespread as today, and the telephone was still regarded as the main source of information for the public.",
294
- "cite_spans": [],
295
- "ref_spans": [],
296
- "eq_spans": [],
297
- "section": "The TABOR Project",
298
- "sec_num": "2.4"
299
- },
300
- {
301
- "text": "Since then, the Internet became the dominant medium, and it is as likeley to find a computer with Internet connection, as to find a local busroute table. ( The consequtive wide spreading of cellular phones changed the picture in favour of the telephone, but that is another story).",
302
- "cite_spans": [],
303
- "ref_spans": [],
304
- "eq_spans": [],
305
- "section": "The TABOR Project",
306
- "sec_num": "2.4"
307
- },
308
- {
309
- "text": "It was decided that a text based information system should be built, regardless of the status of the speech rocgnition and speech synthesis effort, which proved to lag behind after a while.",
310
- "cite_spans": [],
311
- "ref_spans": [],
312
- "eq_spans": [],
313
- "section": "The TABOR Project",
314
- "sec_num": "2.4"
315
- },
316
- {
317
- "text": "The resulting system BusTUC grew out as a natural application of TUC, and an English prototype could be built within a few months (Bratseth, 1997) .",
318
- "cite_spans": [
319
- {
320
- "start": 130,
321
- "end": 146,
322
- "text": "(Bratseth, 1997)",
323
- "ref_id": null
324
- }
325
- ],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "The BusTUC system",
329
- "sec_num": null
330
- },
331
- {
332
- "text": "Since the summer 1996, the prototype was put onto the Internet, and been developed and tested more or less continually until today. The most important extension was that the system was made bilingual (Norwegian and English) during the fall 1996.",
333
- "cite_spans": [],
334
- "ref_spans": [],
335
- "eq_spans": [],
336
- "section": "The BusTUC system",
337
- "sec_num": null
338
- },
339
- {
340
- "text": "In spring 1999, the BusTUC was finally adopted by the local bus company in Trondheim ( A/S Trondheim Trafikkselskap), which set up a server ( a 300 MHz PC with Linux).",
341
- "cite_spans": [],
342
- "ref_spans": [],
343
- "eq_spans": [],
344
- "section": "The BusTUC system",
345
- "sec_num": null
346
- },
347
- {
348
- "text": "Until today, over 150.000 questions have been answered, and BusTUC seems to stabilize and grow increasingly popular.",
349
- "cite_spans": [],
350
- "ref_spans": [],
351
- "eq_spans": [],
352
- "section": "The BusTUC system",
353
- "sec_num": null
354
- },
355
- {
356
- "text": "Anatomy of the bus route oracle The main components of the bus route information systems are:",
357
- "cite_spans": [],
358
- "ref_spans": [],
359
- "eq_spans": [],
360
- "section": "3",
361
- "sec_num": null
362
- },
363
- {
364
- "text": "\u2022 A parser system, consisting of a dictionary, a lexical processor, a grammar and a parser.",
365
- "cite_spans": [],
366
- "ref_spans": [],
367
- "eq_spans": [],
368
- "section": "3",
369
- "sec_num": null
370
- },
371
- {
372
- "text": "\u2022 A knowledge base (KB), divided into a semantic KB and an application KB",
373
- "cite_spans": [],
374
- "ref_spans": [],
375
- "eq_spans": [],
376
- "section": "3",
377
- "sec_num": null
378
- },
379
- {
380
- "text": "\u2022 A query processor, contalng a routing logic system, and a route data base.",
381
- "cite_spans": [],
382
- "ref_spans": [],
383
- "eq_spans": [],
384
- "section": "3",
385
- "sec_num": null
386
- },
387
- {
388
- "text": "The system is bilingual and contains a double set of dictionary, morphology and grammar. Actually, it detects which language is most probable by counting the number of unknown words related to each language, and acts accordingly. The grammars are surprisingly similar, but no effort is made to coalesce them. The Norwegian grammar is slightly bigger than the English grammar, mostly because it is more elaborated but also because Norwegian allows a freer word order.",
389
- "cite_spans": [],
390
- "ref_spans": [],
391
- "eq_spans": [],
392
- "section": "3",
393
- "sec_num": null
394
- },
395
- {
396
- "text": "Features of BusTUC For the Norwegian systems, the figures give an indication of the size of the domain: 420 nouns, 150 verbs, 165 adjectives, 60 prepositions, etc.",
397
- "cite_spans": [],
398
- "ref_spans": [],
399
- "eq_spans": [],
400
- "section": "3.1",
401
- "sec_num": null
402
- },
403
- {
404
- "text": "There are 1300 grammar rules ( 810 for English) although half of the rules are very low level.",
405
- "cite_spans": [],
406
- "ref_spans": [],
407
- "eq_spans": [],
408
- "section": "3.1",
409
- "sec_num": null
410
- },
411
- {
412
- "text": "The semantic net described below contains about 4000 entries.",
413
- "cite_spans": [],
414
- "ref_spans": [],
415
- "eq_spans": [],
416
- "section": "3.1",
417
- "sec_num": null
418
- },
419
- {
420
- "text": "A big name table of 3050 names in addition to the official station names, is required to capture the variety of naming. A simple spell correction is a part of the system ( essentially 1 character errors).",
421
- "cite_spans": [],
422
- "ref_spans": [],
423
- "eq_spans": [],
424
- "section": "3.1",
425
- "sec_num": null
426
- },
427
- {
428
- "text": "The pragmatic reasoning is needed to translate the output from the parser to a route database query language . This is done by a production system called Pragma, which acts like an advanced rewriting system with 580 rules.",
429
- "cite_spans": [],
430
- "ref_spans": [],
431
- "eq_spans": [],
432
- "section": "3.1",
433
- "sec_num": null
434
- },
435
- {
436
- "text": "In addition, there is another rule base for actually generating the natural language answers (120 rules).",
437
- "cite_spans": [],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": "3.1",
441
- "sec_num": null
442
- },
443
- {
444
- "text": "The system is mainly written in Prolog (Sicstus Prolog 3.7), with some Perl programs for the communication and CGI-scripts.",
445
- "cite_spans": [],
446
- "ref_spans": [],
447
- "eq_spans": [],
448
- "section": "3.1",
449
- "sec_num": null
450
- },
451
- {
452
- "text": "At the moment, there are about 35000 lines of programmed Prolog code (in addition to route tables which are also in Prolog).",
453
- "cite_spans": [],
454
- "ref_spans": [],
455
- "eq_spans": [],
456
- "section": "3.1",
457
- "sec_num": null
458
- },
459
- {
460
- "text": "Average response time is usually less than 2 seconds, but there are queries that demand up to 10 seconds.",
461
- "cite_spans": [],
462
- "ref_spans": [],
463
- "eq_spans": [],
464
- "section": "3.1",
465
- "sec_num": null
466
- },
467
- {
468
- "text": "The error rate for single, correct, complete and relevant questions is about 2 percent.",
469
- "cite_spans": [],
470
- "ref_spans": [],
471
- "eq_spans": [],
472
- "section": "3.1",
473
- "sec_num": null
474
- },
475
- {
476
- "text": "The grammar is based on a simple grammar for statements, while questions and commands are derived by the use of movements. The grammar formalism which is called Consensical Grammar, (CONtext SENSitive CompositionAL Grammar) is an easy to use variant of Extraposition Grammar (Pereira and Warren, 1980) , which is a generalisation of Definite Clause Grammars. Compositional grammar means that the semantics of a a phrase is composed of the semantics of the subphrases; the basic constituents being a form of verb complements. As for Extraposition grammars, a grammar is translated to Definite Clause Grammars, and executed as such.",
477
- "cite_spans": [
478
- {
479
- "start": 275,
480
- "end": 301,
481
- "text": "(Pereira and Warren, 1980)",
482
- "ref_id": null
483
- }
484
- ],
485
- "ref_spans": [],
486
- "eq_spans": [],
487
- "section": "The Parser System The Grammar System",
488
- "sec_num": "3.2"
489
- },
490
- {
491
- "text": "A characteristic syntactic expression in Consensical Grammar may define an incomplete construct in terms of a \"difference \" between complete constructs. When possible, the parser will use the subtracted part in stead of reading from the input, after a gap if necessary. The effect is the same as for Extraposition grammars, but the this format is more intuitive.",
492
- "cite_spans": [],
493
- "ref_spans": [],
494
- "eq_spans": [],
495
- "section": "The Parser System The Grammar System",
496
- "sec_num": "3.2"
497
- },
498
- {
499
- "text": "Examples of grammar rules.",
500
- "cite_spans": [],
501
- "ref_spans": [],
502
- "eq_spans": [],
503
- "section": "The Parser System The Grammar System",
504
- "sec_num": "3.2"
505
- },
506
- {
507
- "text": "which is analysed as for which X is it true that the (X) person has a dog that barked?",
508
- "cite_spans": [],
509
- "ref_spans": [],
510
- "eq_spans": [],
511
- "section": "The Parser System The Grammar System",
512
- "sec_num": "3.2"
513
- },
514
- {
515
- "text": "where the last line is analysed as a statement. Movement is easily handled in Consensical Grammar without making special phrase rules for each kind of movement. The following example shows how TUC manages a variety of analyses using movements: whichq(which(X)::P) ---> [which], statement(P) -the(X).",
516
- "cite_spans": [],
517
- "ref_spans": [],
518
- "eq_spans": [],
519
- "section": "The Parser System The Grammar System",
520
- "sec_num": "3.2"
521
- },
522
- {
523
- "text": "Max",
524
- "cite_spans": [],
525
- "ref_spans": [],
526
- "eq_spans": [],
527
- "section": "The Parser System The Grammar System",
528
- "sec_num": "3.2"
529
- },
530
- {
531
- "text": "Whose dog barked?",
532
- "cite_spans": [],
533
- "ref_spans": [],
534
- "eq_spans": [],
535
- "section": "Example:",
536
- "sec_num": null
537
- },
538
- {
539
- "text": "is analysed as if the sentence had been",
540
- "cite_spans": [],
541
- "ref_spans": [],
542
- "eq_spans": [],
543
- "section": "Example:",
544
- "sec_num": null
545
- },
546
- {
547
- "text": "Who has a dog that barked?",
548
- "cite_spans": [],
549
- "ref_spans": [],
550
- "eq_spans": [],
551
- "section": "Example:",
552
- "sec_num": null
553
- },
554
- {
555
- "text": "which is analysed as Which person has a dog that barked?",
556
- "cite_spans": [],
557
- "ref_spans": [],
558
- "eq_spans": [],
559
- "section": "Example:",
560
- "sec_num": null
561
- },
562
- {
563
- "text": "Who did Max say Bill thought believed Fido barked? ==> Joe",
564
- "cite_spans": [],
565
- "ref_spans": [],
566
- "eq_spans": [],
567
- "section": "Example:",
568
- "sec_num": null
569
- },
570
- {
571
- "text": "The experiences with Consensical grammars are a bit mixed however. The main problem is the parsing method itself, which is top down with backtracking. Many principles that would prove elegant for small domains turned out to be too costly for larger domains, due to the wide variety of modes of expressions, incredible ambiguities and the sheer size of the covered language. The disambiguation is a major problem for small grammars and large languages, and was solved by the following guidelines:",
572
- "cite_spans": [],
573
- "ref_spans": [],
574
- "eq_spans": [],
575
- "section": "The parser",
576
- "sec_num": null
577
- },
578
- {
579
- "text": "\u2022 a semantic type checking was integrated into the parser, and would help to discard sematica/ly wrong parses from the start.",
580
- "cite_spans": [],
581
- "ref_spans": [],
582
- "eq_spans": [],
583
- "section": "The parser",
584
- "sec_num": null
585
- },
586
- {
587
- "text": "\u2022 a heuristics was followed that proved almost irreproachable: The longest possible phrase of a category that is semantically correct is in most cases the preferred interpretation.",
588
- "cite_spans": [],
589
- "ref_spans": [],
590
- "eq_spans": [],
591
- "section": "The parser",
592
- "sec_num": null
593
- },
594
- {
595
- "text": "\u2022 due to the perplexity of the language, some committed choices (cuts) had to be inserted into the grammar at strategic places. As one could fear however, this implied that wrong choices being made at some point in the parsing could not be recovered by backtracking.",
596
- "cite_spans": [],
597
- "ref_spans": [],
598
- "eq_spans": [],
599
- "section": "The parser",
600
- "sec_num": null
601
- },
602
- {
603
- "text": "These problems also made it imperative to introduce a timeout on the parsing process of embarassing 10 seconds. Although most sentences, would be parsed within a second, some legal sentences of moderate size actually need this time.",
604
- "cite_spans": [],
605
- "ref_spans": [],
606
- "eq_spans": [],
607
- "section": "The parser",
608
- "sec_num": null
609
- },
610
- {
611
- "text": "Adaptability means that the system does not need to be reprogrammed for each new application.",
612
- "cite_spans": [],
613
- "ref_spans": [],
614
- "eq_spans": [],
615
- "section": "The semantic knowledge base",
616
- "sec_num": "3.3"
617
- },
618
- {
619
- "text": "The design principle of TUC is that most of the changes are made in a tabular semantic knowledge base, while there is one general grammar and dictionary. In general, the logic is generated automatically from the semantic knowledge base.",
620
- "cite_spans": [],
621
- "ref_spans": [],
622
- "eq_spans": [],
623
- "section": "The semantic knowledge base",
624
- "sec_num": "3.3"
625
- },
626
- {
627
- "text": "The nouns play a key role in the understanding part as they constitute the class or type hierarchy. Nouns are defined in an a-kind-of hierarchy. The hierarchy is tree-structured with single inheritance. The top level also constitute the top level ontology of TUC's world.",
628
- "cite_spans": [],
629
- "ref_spans": [],
630
- "eq_spans": [],
631
- "section": "The semantic knowledge base",
632
- "sec_num": "3.3"
633
- },
634
- {
635
- "text": "In fact, a type check of the compliances of verbs, nouns adjectives and prepositions is not only necessary for the semantic processing but is essential for the syntax analysis for the disambiguation as well. In TUC, the legal combinations are carefully assembled in the semantic network, which then serves a dual purpose.",
636
- "cite_spans": [],
637
- "ref_spans": [],
638
- "eq_spans": [],
639
- "section": "The semantic knowledge base",
640
- "sec_num": "3.3"
641
- },
642
- {
643
- "text": "These semantic definitions are necessary to allow for instance the following sentences",
644
- "cite_spans": [],
645
- "ref_spans": [],
646
- "eq_spans": [],
647
- "section": "The semantic knowledge base",
648
- "sec_num": "3.3"
649
- },
650
- {
651
- "text": "The dog saw a man with a telescope. The man saw a dog with a telescope.",
652
- "cite_spans": [],
653
- "ref_spans": [],
654
- "eq_spans": [],
655
- "section": "The semantic knowledge base",
656
- "sec_num": "3.3"
657
- },
658
- {
659
- "text": "to be treated differently because with telescope may modify the noun man but not the noun dog, while with telescope modifies the verb see, restricted to person.",
660
- "cite_spans": [],
661
- "ref_spans": [],
662
- "eq_spans": [],
663
- "section": "The semantic knowledge base",
664
- "sec_num": "3.3"
665
- },
666
- {
667
- "text": "The semantics of the phrases are built up by a kind of verb complements, where the event play a central role.",
668
- "cite_spans": [],
669
- "ref_spans": [],
670
- "eq_spans": [],
671
- "section": "The Query Processor Event Calculus",
672
- "sec_num": "3.4"
673
- },
674
- {
675
- "text": "The text is translated from Natural language into a form called TQL (Temporal Query Language/ TUC Query Language) which is a first order event calculus expression, a self contained expression containing the literal meaning of an utterance.",
676
- "cite_spans": [],
677
- "ref_spans": [],
678
- "eq_spans": [],
679
- "section": "The Query Processor Event Calculus",
680
- "sec_num": "3.4"
681
- },
682
- {
683
- "text": "A formalism TQL that was defined, inspired by the Event Calculus by Kowalski and Sergot (Kowalski and Sergot, 1986) .",
684
- "cite_spans": [
685
- {
686
- "start": 68,
687
- "end": 115,
688
- "text": "Kowalski and Sergot (Kowalski and Sergot, 1986)",
689
- "ref_id": null
690
- }
691
- ],
692
- "ref_spans": [],
693
- "eq_spans": [],
694
- "section": "The Query Processor Event Calculus",
695
- "sec_num": "3.4"
696
- },
697
- {
698
- "text": "The TQL expressions consist of predicates, functions, constants and variables. The textual words of nouns and verbs are translated to generic predicates using the selected interpretation. The following question The event parameter plays an important role in the semantics. It is used for various purposes. The most salient role is to identify a subset of time and space in which an action or event occured. Both the actual time and space coordinates are connected to the actions through the event parameter.",
699
- "cite_spans": [],
700
- "ref_spans": [],
701
- "eq_spans": [],
702
- "section": "The Query Processor Event Calculus",
703
- "sec_num": "3.4"
704
- },
705
- {
706
- "text": "The TQL is translated to a route database query language (BusLOG) which is actually a Prolog program. This is done by a production system called Pragma, which acts like an advanced rewriting system with 580 rules.",
707
- "cite_spans": [],
708
- "ref_spans": [],
709
- "eq_spans": [],
710
- "section": "Pragmatic reasoning",
711
- "sec_num": null
712
- },
713
- {
714
- "text": "In addition, there is another rule base for actually generating the natural language answers (120 rules).",
715
- "cite_spans": [],
716
- "ref_spans": [],
717
- "eq_spans": [],
718
- "section": "Pragmatic reasoning",
719
- "sec_num": null
720
- },
721
- {
722
- "text": "Conclusions The TUC approach has as its goal to automate the creation of new natural language interfaces for a well defined subset of the language and with a minimum of explicit programming.",
723
- "cite_spans": [],
724
- "ref_spans": [],
725
- "eq_spans": [],
726
- "section": "4",
727
- "sec_num": null
728
- },
729
- {
730
- "text": "The implemented system has proved its worth, and is interesting if for no other reason. There is also an increasing interest from other bus companies and route information companies alike to get a similar system for their customers.",
731
- "cite_spans": [],
732
- "ref_spans": [],
733
- "eq_spans": [],
734
- "section": "4",
735
- "sec_num": null
736
- },
737
- {
738
- "text": "Further work remains to make the parser really efficient, and much work remains to make the language coverage complete within reasonable limits.",
739
- "cite_spans": [],
740
- "ref_spans": [],
741
- "eq_spans": [],
742
- "section": "4",
743
- "sec_num": null
744
- },
745
- {
746
- "text": "It is an open question whether the system of this kind will be a preferred way of offering information to the public.",
747
- "cite_spans": [],
748
- "ref_spans": [],
749
- "eq_spans": [],
750
- "section": "4",
751
- "sec_num": null
752
- },
753
- {
754
- "text": "If it is, it is a fair amount of work to make it a portable system that can be implemented elsewhere, also connecting various travelling agencies.",
755
- "cite_spans": [],
756
- "ref_spans": [],
757
- "eq_spans": [],
758
- "section": "4",
759
- "sec_num": null
760
- },
761
- {
762
- "text": "If not, it will remain a curiosity. But anyway, a system like this will be a contribution to the development of intelligent systems.",
763
- "cite_spans": [],
764
- "ref_spans": [],
765
- "eq_spans": [],
766
- "section": "4",
767
- "sec_num": null
768
- }
769
- ],
770
- "back_matter": [],
771
- "bib_entries": {
772
- "BIBREF0": {
773
- "ref_id": "b0",
774
- "title": "Naturlig Spr~k och Grafik -nya vSgar inn i databaser",
775
- "authors": [
776
- {
777
- "first": "Tore",
778
- "middle": [],
779
- "last": "Amble",
780
- "suffix": ""
781
- },
782
- {
783
- "first": "Erik",
784
- "middle": [],
785
- "last": "Knudsen",
786
- "suffix": ""
787
- },
788
- {
789
- "first": "Aarno",
790
- "middle": [],
791
- "last": "Lehtola",
792
- "suffix": ""
793
- },
794
- {
795
- "first": "Jan",
796
- "middle": [],
797
- "last": "Ljungberg",
798
- "suffix": ""
799
- },
800
- {
801
- "first": "Ole",
802
- "middle": [],
803
- "last": "Ravnholt",
804
- "suffix": ""
805
- }
806
- ],
807
- "year": 1990,
808
- "venue": "",
809
- "volume": "",
810
- "issue": "",
811
- "pages": "",
812
- "other_ids": {},
813
- "num": null,
814
- "urls": [],
815
- "raw_text": "Tore Amble, Erik Knudsen, Aarno Lehtola, Jan Ljungberg, and Ole Ravnholt. 1990. Naturlig Spr~k och Grafik -nya vSgar inn i databaser.",
816
- "links": null
817
- }
818
- },
819
- "ref_entries": {
820
- "FIGREF0": {
821
- "uris": null,
822
- "num": null,
823
- "text": "Bus number 54 passes by Nidarvoll skole at 1710 and arrives at Trondheim Railway Station at 1725.",
824
- "type_str": "figure"
825
- },
826
- "FIGREF1": {
827
- "uris": null,
828
- "num": null,
829
- "text": "said Bill thought Joe believed Fido Barked. Who said Bill thought Joe believed Fido barked? ==> Max Who did Max say thought Joe believed Fido barked? ==> Bill statement],[has],[a],noun(N),[that]). whoq(P) ---> [who], whichq(P) -([which],[person]).",
830
- "type_str": "figure"
831
- },
832
- "FIGREF2": {
833
- "uris": null,
834
- "num": null,
835
- "text": "Do you know whether the bus goesto Nidar on Saturday ?would give the TQL expression below. Typically, the Norwegian equivalent Vet du om bussen gaar til Nidar paa soendag ? action of E is Go actor(A,E), Y. the actor of E is A srel(to,place,nidar,E),Y. E is to nidar srel(on,time,B,E), y, E is on the saturday B",
836
- "type_str": "figure"
837
- },
838
- "TABREF1": {
839
- "type_str": "table",
840
- "text": "Every man that lives loves Mary.",
841
- "html": null,
842
- "num": null,
843
- "content": "<table><tr><td>John is a man. John lives.</td></tr><tr><td>Who loves Mary?</td></tr><tr><td>==&gt; John</td></tr></table>"
844
- }
845
- }
846
- }
847
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1002.json DELETED
@@ -1,519 +0,0 @@
1
- {
2
- "paper_id": "A00-1002",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:37.021450Z"
6
- },
7
- "title": "Machine Translation of Very Close Languages",
8
- "authors": [
9
- {
10
- "first": "Jan",
11
- "middle": [],
12
- "last": "Haji",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Johns Hopkins University",
17
- "location": {
18
- "addrLine": "3400 N. Charles St",
19
- "postCode": "21218",
20
- "settlement": "Baltimore",
21
- "region": "MD",
22
- "country": "USA"
23
- }
24
- },
25
- "email": "[email protected]"
26
- },
27
- {
28
- "first": "Jan",
29
- "middle": [],
30
- "last": "Hric",
31
- "suffix": "",
32
- "affiliation": {
33
- "laboratory": "",
34
- "institution": "KTI MFF",
35
- "location": {
36
- "addrLine": "Malostransk6 nfim.25 Praha 1",
37
- "postCode": "11800",
38
- "country": "UK"
39
- }
40
- },
41
- "email": "[email protected]"
42
- },
43
- {
44
- "first": "Vladislav",
45
- "middle": [],
46
- "last": "Kubon",
47
- "suffix": "",
48
- "affiliation": {
49
- "laboratory": "",
50
- "institution": "OFAL MFF",
51
- "location": {
52
- "addrLine": "Malostransk6 mim.25 Praha 1",
53
- "postCode": "11800",
54
- "country": "UK"
55
- }
56
- },
57
- "email": ""
58
- }
59
- ],
60
- "year": "",
61
- "venue": null,
62
- "identifiers": {},
63
- "abstract": "Using examples of the transfer-based MT system between Czech and Russian RUSLAN and the word-for-word MT system with morphological disambiguation between Czech and Slovak (~ESILKO we argue that for really close languages it is possible to obtain better translation quality by means of simpler methods. The problem of translation to a group of typologically similar languages using a pivot language is also discussed here.",
64
- "pdf_parse": {
65
- "paper_id": "A00-1002",
66
- "_pdf_hash": "",
67
- "abstract": [
68
- {
69
- "text": "Using examples of the transfer-based MT system between Czech and Russian RUSLAN and the word-for-word MT system with morphological disambiguation between Czech and Slovak (~ESILKO we argue that for really close languages it is possible to obtain better translation quality by means of simpler methods. The problem of translation to a group of typologically similar languages using a pivot language is also discussed here.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Abstract",
74
- "sec_num": null
75
- }
76
- ],
77
- "body_text": [
78
- {
79
- "text": "Although the field of machine translation has a very long history, the number of really successful systems is not very impressive. Most of the funds invested into the development of various MT systems have been wasted and have not stimulated a development of techniques which would allow to translate at least technical texts from a certain limited domain. There were, of course, exceptions, which demonstrated that under certain conditions it is possible to develop a system which will save money and efforts invested into human translation. The main reason why the field of MT has not met the expectations of sci-fi literature, but also the expectations of scientific community, is the complexity of the task itself. A successful automatic translation system requires an application of techniques from several areas of computational linguistics (morphology, syntax, semantics, discourse analysis etc.) as a necessary, but not a sufficient condition. The general opinion is that it is easier to create an MT system for a pair of related languages. In our contribution we would like to demonstrate that this assumption holds only for really very closely related languages.",
80
- "cite_spans": [],
81
- "ref_spans": [],
82
- "eq_spans": [],
83
- "section": "Introduction",
84
- "sec_num": null
85
- },
86
- {
87
- "text": "The first attempt to verify the hypothesis that related languages are easier to translate started in mid 80s at Charles University in Prague. The project was called RUSLAN and aimed at the translation of documentation in the domain of operating systems for mainframe computers. It was developed in cooperation with the Research Institute of Mathematical Machines in Prague. At that time in former COMECON countries it was obligatory to translate any kind of documentation to such systems into Russian. The work on the Czech-to-Russian MT system RUSLAN (cf. Oliva (1989) ) started in 1985. It was terminated in 1990 (with COMECON gone) for the lack of funding.",
88
- "cite_spans": [
89
- {
90
- "start": 545,
91
- "end": 569,
92
- "text": "RUSLAN (cf. Oliva (1989)",
93
- "ref_id": null
94
- }
95
- ],
96
- "ref_spans": [],
97
- "eq_spans": [],
98
- "section": "History",
99
- "sec_num": "1.1"
100
- },
101
- {
102
- "text": "The system was rule-based, implemented in Colmerauer's Q-systems. It contained a fullfledged morphological and syntactic analysis of Czech, a transfer and a syntactic and morphological generation of Russian. There was almost no transfer at the beginning of the project due to the assumption that both languages are similar to the extent that does not require any transfer phase at all. This assumption turned to be wrong and several phenomena were covered by the transfer in the later stage of the project (for example the translation of the Czech verb \"b~\" [to be] into one of the three possible Russian equivalents: empty form, the form \"byt6\" in future tense and the verb \"javljat6sja\"; or the translation of verbal negation). At the time when the work was terminated in 1990, the system had a main translation dictionary of about 8000 words, accompanied by so called transducing dictionary covering another 2000 words. The transducing dictionary was based on the original idea described in Kirschner (1987) . It aimed at the exploitation of the fact that technical terms are based (in a majority of European languages) on Greek or Latin stems, adopted according to the particular derivational rules of the given languages. This fact allows for the \"translation\" of technical terms by means of a direct transcription of productive endings and a slight (regular) adjustment of the spelling of the stem. For example, the English words localization and discrimination can be transcribed into Czech as \"lokalizace\" and \"diskriminace\" with a productive ending -ation being transcribed to -ace. It was generally assumed that for the pair Czech/Russian the transducing dictionary would be able to profit from a substantially greater number of productive rules. This hypothesis proved to be wrong, too (see B6mov~, Kubofi (1990) ). The set of productive endings for both pairs (English/Czech, as developed for an earlier MT system from English to Czech, and Czech/Russian) was very similar. The evaluation of results of RUSLAN showed that roughly 40% of input sentences were translated correctly, about 40% with minor errors correctable by a human post-editor and about 20% of the input required substantial editing or re-translation. There were two main factors that caused a deterioration of the translation. The first factor was the incompleteness of the main dictionary of the system. Even though the system contained a set of so-called fail-soft rules, whose task was to handle such situations, an unknown word typically caused a failure of the module of syntactic analysis, because the dictionary entries contained -besides the translation equivalents and morphological information -very important syntactic information. The second factor was the module of syntactic analysis of Czech. There were several reasons of parsing failures. Apart from the common inability of most rule-based formal grammars to cover a particular natural language to the finest detail of its syntax there were other problems. One of them was the existence of non-projective constructions, which are quite common in Czech even in relatively short sentences. Even though they account only for 1.7\u00b0/'o of syntactic dependencies, every third Czech sentence contains at least one, and in a news corpus, we discovered as much as 15 non-projective dependencies; see also Haji6 et al. (1998) . An example of a non-projective construction is \"Soubor se nepodafilo otev~it.\" . The formalism used for the implementation (Q-systems) was not meant to handle non-projective constructions. Another source of trouble was the use of so-called semantic features. These features were based on lexical semantics of individual words. Their main task was to support a semantically plausible analysis and to block the implausible ones. It turned out that the question of implausible combinations of semantic features is also more complex than it was supposed to be. The practical outcome of the use of semantic features was a higher ratio of parsing failures -semantic features often blocked a plausible analysis. For example, human lexicographers assigned the verb 'to run' a semantic feature stating that only a noun with semantic features of a human or other living being may be assigned the role of subject of this verb. The input text was however full of sentences with 'programs' or 'systems' running etc. It was of course very easy to correct the semantic feature in the dictionary, but the problem was that there were far too many corrections required. On the other hand, the fact that both languages allow a high degree of word-order freedom accounted for a certain simplification of the translation process. The grammar relied on the fact that there are only minor word-order differences between Czech and Russian.",
103
- "cite_spans": [
104
- {
105
- "start": 994,
106
- "end": 1010,
107
- "text": "Kirschner (1987)",
108
- "ref_id": "BIBREF5"
109
- },
110
- {
111
- "start": 1810,
112
- "end": 1823,
113
- "text": "Kubofi (1990)",
114
- "ref_id": "BIBREF0"
115
- },
116
- {
117
- "start": 3341,
118
- "end": 3360,
119
- "text": "Haji6 et al. (1998)",
120
- "ref_id": null
121
- }
122
- ],
123
- "ref_spans": [],
124
- "eq_spans": [],
125
- "section": "System description",
126
- "sec_num": "1.2"
127
- },
128
- {
129
- "text": "We have learned several lessons regarding the MT of closely related languages:",
130
- "cite_spans": [],
131
- "ref_spans": [],
132
- "eq_spans": [],
133
- "section": "Lessons learned from RUSLAN",
134
- "sec_num": "1.3"
135
- },
136
- {
137
- "text": "\u2022 The transfer-based approach provides a similar quality of translation both for closely related and typologically different languages \u2022 Two main bottlenecks of full-fledged transfer-based systems are:",
138
- "cite_spans": [],
139
- "ref_spans": [],
140
- "eq_spans": [],
141
- "section": "Lessons learned from RUSLAN",
142
- "sec_num": "1.3"
143
- },
144
- {
145
- "text": "-complexity of the syntactic dictionary -relative unreliability of the syntactic analysis of the source language Even a relatively simple component (transducing dictionary) was equally complex for English-to-Czech and Czech-to-Russian translation Limited text domains do not exist in real life, it is necessary to work with a high coverage dictionary at least for the source language.",
146
- "cite_spans": [],
147
- "ref_spans": [],
148
- "eq_spans": [],
149
- "section": "Lessons learned from RUSLAN",
150
- "sec_num": "1.3"
151
- },
152
- {
153
- "text": "Localization of products and their documentation is a great problem for any company, which wants to strengthen its position on foreign language market, especially for companies producing various kinds of software. The amounts of texts being localized are huge and the localization costs are huge as well. It is quite clear that the localization from one source language to several target languages, which are typologically similar, but different from the source language, is a waste of money and effort. It is of course much easier to translate texts from Czech to Polish or from Russian to Bulgarian than from English or German to any of these languages. There are several reasons, why localization and translation is not being performed through some pivot language, representing a certain group of closely related languages. Apart from political reasons the translation through a pivot language has several drawbacks. The most important one is the problem of the loss of translation quality. Each translation may to a certain extent shift the meaning of the translated text and thus each subsequent translation provides results more and more different from the original. The second most important reason is the lack of translators from the pivot to the target language, while this is usually no problem for the translation from the source directly to the target language.",
154
- "cite_spans": [],
155
- "ref_spans": [],
156
- "eq_spans": [],
157
- "section": "A pivot language",
158
- "sec_num": "2.1"
159
- },
160
- {
161
- "text": "The main goal of this paper is to suggest how to overcome these obstacles by means of a combination of an MT system with commercial MAHT (Machine-aided human translation) systems. We have chosen the TRADOS Translator's Workbench as a representative system of a class of these products, which can be characterized as an example-based translation tools. IBM's Translation Manager and other products also belong to this class. Such systems uses so-called translation memory, which contains pairs of previously translated sentences from a source to a target language. When a human translator starts translating a new sentence, the system tries to match the source with sentences already stored in the translation memory. If it is successful, it suggests the translation and the human translator decides whether to use it, to modify it or to reject it. The segmentation of a translation memory is a key feature for our system. The translation memory may be exported into a text file and thus allows easy manipulation with its content. Let us suppose that we have at our disposal two translation memories -one human made for the source/pivot language pair and the other created by an MT system for the pivot/target language pair. The substitution of segments of a pivot language by the segments of a target language is then only a routine procedure.",
162
- "cite_spans": [],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "Translation memory is the key",
166
- "sec_num": "2.2"
167
- },
168
- {
169
- "text": "The human translator translating from the source language to the target language then gets a translation memory for the required pair (source/target). The system of penalties applied in TRADOS Translator's Workbench (or a similar system) guarantees that if there is already a human-made translation present, then it gets higher priority than the translation obtained as a result of the automatic MT. This system solves both problems mentioned abovethe human translators from the pivot to the target language are not needed at all and the machinemade translation memory serves only as a resource supporting the direct human translation from the source to the target language.",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "Translation memory is the key",
174
- "sec_num": "2.2"
175
- },
176
- {
177
- "text": "In the group of Slavic languages, there are more closely related languages than Czech and Russian. Apart from the pair of Serbian and Croatian languages, which are almost identical and were considered one language just a few years ago, the most closely related languages in this group are Czech and Slovak. This fact has led us to an experiment with automatic translation between Czech and Slovak. It was clear that application of a similar method to that one used in the system RUSLAN would lead to similar results. Due to the closeness of both languages we have decided to apply a simpler method. Our new system, (~ESILKO, aims at a maximal exploitation of the similarity of both languages. The system uses the method of direct word-for-word translation, justified by the similarity of syntactic constructions of both languages. Although the system is currently being tested on texts from the domain of documentation to corporate information systems, it is not limited to any specific domain. Its primary task is, however, to provide support for translation and localization of various technical texts.",
178
- "cite_spans": [],
179
- "ref_spans": [],
180
- "eq_spans": [],
181
- "section": "Machine translation of (very) closely related Slavic languages",
182
- "sec_num": "3."
183
- },
184
- {
185
- "text": "The greatest problem of the word-for-word translation approach (for languages with very similar syntax and word order, but different morphological system) is the problem of morphological ambiguity of individual word forms. The type of ambiguity is slightly different in languages with a rich inflection (majority of Slavic languages) and in languages which do not have such a wide variety of forms derived from a single lemma. For example, in Czech there are only rare cases of part-of-speech ambiguities (st~t [to stay/the state], zena [woman/chasing] or tri [three/rub(imperative)]), much more frequent is the ambiguity of gender, number and case (for example, the form of the adjective jam[ [spring] is 27-times ambiguous). The main problem is that even though several Slavic languages have the same property as Czech, the ambiguity is not preserved. It is distributed in a different manner and the \"form-for-form\" translation is not applicable. Without the analysis of at least nominal groups it is often very difficult to solve this problem, because for example the actual morphemic categories of adjectives are in Czech distinguishable only on the basis of gender, number and case agreement between an adjective and its governing noun. An alternative way to the solution of this problem was the application of a stochastically based morphological disambiguator (morphological tagger) for Czech whose success rate is close to 92\u00b0/'0. Our system therefore consists of the following modules:",
186
- "cite_spans": [
187
- {
188
- "start": 694,
189
- "end": 702,
190
- "text": "[spring]",
191
- "ref_id": null
192
- }
193
- ],
194
- "ref_spans": [],
195
- "eq_spans": [],
196
- "section": "System (~ESiLKO",
197
- "sec_num": "3.1"
198
- },
199
- {
200
- "text": "1. Import of the input from so-called 'empty' translation memory 2. Morphological analysis of Czech 3. Morphological disambiguation 4. Domain-related bilingual glossaries (incl. single-and multiword terminology) 5. General bilingual dictionary 6. Morphological synthesis of Slovak 7. Export of the output to the original translation memory",
201
- "cite_spans": [],
202
- "ref_spans": [],
203
- "eq_spans": [],
204
- "section": "System (~ESiLKO",
205
- "sec_num": "3.1"
206
- },
207
- {
208
- "text": "Letus now look in a more detail at the individual modules of the system: ad 1. The input text is extracted out of a translation memory previously exported into an ASCII file. The exported translation memory (of TRADOS) has a SGML-Iike notation with a relatively simple structure (cf. the following example):",
209
- "cite_spans": [],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "System (~ESiLKO",
213
- "sec_num": "3.1"
214
- },
215
- {
216
- "text": "Example 1. -A sample of the exported translation memory <RTF Preamble>...</RTF Preamble> <TrU> <CrD>23051999 <CrU>VK <Seg L=CS_01>Pomoci v~kazu ad-hoc m65ete rychle a jednoduge vytv~i~et regerge. <Seg L=SK_01 >n/a </TrU>",
217
- "cite_spans": [],
218
- "ref_spans": [],
219
- "eq_spans": [],
220
- "section": "System (~ESiLKO",
221
- "sec_num": "3.1"
222
- },
223
- {
224
- "text": "Our system uses only the segments marked by <Seg L=CS_01>, which contain one source language sentence each, and <Seg L=SK_01>, which is empty and which will later contain the same sentence translated into the target language by CESiLKO.",
225
- "cite_spans": [],
226
- "ref_spans": [],
227
- "eq_spans": [],
228
- "section": "System (~ESiLKO",
229
- "sec_num": "3.1"
230
- },
231
- {
232
- "text": "ad 2. The morphological analysis of Czech is based on the morphological dictionary developed by Jan Haji6 and Hana Skoumalov~i in 1988-99 (for latest description, see Haji~ (1998) ). The dictionary contains over 700 000 dictionary entries and its typical coverage varies between 99% (novels) to 95% (technical texts). The morphological analysis uses the system of positional tags with 15 positions (each morphological .category, such as Part-of-speech, Number, Gender, Case, etc. has a fixed, singlesymbol place in the tag). The module of morphological disambiguation is a key to the success of the translation. It gets an average number of 3.58 tags per token (word form in text) as an input. The tagging system is purely statistical, and it uses a log-linear model of probability distribution see Haji~, Hladkfi (1998) . The learning is based on a manually tagged corpus of Czech texts (mostly from the general newspaper domain). The system learns contextual rules (features) automatically and also automatically determines feature weights. The average accuracy of tagging is between 91 and 93% and remains the same even for technical texts (if we disregard the unknown names and foreign-language terms that are not ambiguous anyway). The lemmatization immediately follows tagging; it chooses the first lemma with a possible tag corresponding to the tag selected. Despite this simple lemmatization method, and also thanks to the fact that Czech words are rarely ambiguous in their Part-of-speech, it works with an accuracy exceeding 98%. ad 4. The domain-related bilingual glossaries contain pairs of individual words and pairs of multiple-word terms. The glossaries are organized into a hierarchy specified by the user; typically, the glossaries for the most specific domain are applied first. There is one general matching rule for all levels of glossaries -the longest match wins.",
233
- "cite_spans": [
234
- {
235
- "start": 167,
236
- "end": 179,
237
- "text": "Haji~ (1998)",
238
- "ref_id": "BIBREF1"
239
- },
240
- {
241
- "start": 799,
242
- "end": 820,
243
- "text": "Haji~, Hladkfi (1998)",
244
- "ref_id": null
245
- }
246
- ],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "System (~ESiLKO",
250
- "sec_num": "3.1"
251
- },
252
- {
253
- "text": "The multiple-word terms are sequences of lemmas (not word forms). This structure has several advantages, among others it allows to minimize the size of the dictionary and also, due to the simplicity of the structure, it allows modifications of the glossaries by the linguistically naive user. The necessary morphological information is introduced into the domain-related glossary in an off-line preprocessing stage, which does not require user intervention. This makes a big difference when compared to the RUSLAN Czech-to-Russian MT system, when each multiword dictionary entry cost about 30 minutes of linguistic expert's time on average. ad 5. The main bilingual dictionary contains data necessary for the translation of both lemmas and tags. The translation of tags (from the Czech into the Slovak morphological system) is necessary, because due to the morphological differences both systems use close, but slightly different tagsets. Currently the system handles the 1:1 translation of tags (and 2:2, 3:3, etc.) . Different ratio of translation is very rare between Czech and Siovak, but nevertheless an advanced system of dictionary items is under construction (for the translation 1:2, 2:1 etc.). It is quite interesting that the lexically homonymous words often preserve their homonymy even after the translation, so no special treatment of homonyms is deemed necessary. ad 6. The morphological synthesis of Slovak is based on a monolingual dictionary of SIovak, developed by J.Hric (1991-99), covering more than ]00,000 dictionary entries. The coverage of the dictionary is not as high as of the Czech one, but it is still growing. It aims at a similar coverage of Slovak as we enjoy for Czech. ad 7. The export of the output of the system (~ESILKO into the translation memory (of TRADOS Translator's Workbench) amounts mainly to cleaning of all irrelevant SGML markers. The whole resulting Slovak sentence is inserted into the appropriate location in the original translation memory file. The following example also shows that the marker <CrU> contains an information that the target language sentence was created by an MT system. ",
254
- "cite_spans": [
255
- {
256
- "start": 996,
257
- "end": 1016,
258
- "text": "(and 2:2, 3:3, etc.)",
259
- "ref_id": null
260
- }
261
- ],
262
- "ref_spans": [],
263
- "eq_spans": [],
264
- "section": "System (~ESiLKO",
265
- "sec_num": "3.1"
266
- },
267
- {
268
- "text": "The problem how to evaluate results of automatic translation is very difficult. For the evaluation of our system we have exploited the close connection between our system and the TRADOS Translator's Workbench. The method is simple -the human translator receives the translation memory created by our system and translates the text using this memory. The translator is free to make any changes to the text proposed by the translation memory. The target text created by a human translator is then compared with the text created by the mechanical application of translation memory to the source text. TRADOS then evaluates the percentage of matching in the same manner as it normally evaluates the percentage of matching of source text with sentences in translation memory. Our system achieved about 90% match (as defined by the TRADOS match module) with the results of human translation, based on a relatively large (more than 10,000 words) test sample.",
269
- "cite_spans": [],
270
- "ref_spans": [],
271
- "eq_spans": [],
272
- "section": "Evaluation of results",
273
- "sec_num": "3.2"
274
- },
275
- {
276
- "text": "The accuracy of the translation achieved by our system justifies the hypothesis that word-forword translation might be a solution for MT of really closely related languages. The remaining problems to be solved are problems with the oneto many or many-to-many translation, where the lack of information in glossaries and dictionaries sometimes causes an unnecessary translation error. The success of the system CESILKO has encouraged the investigation of the possibility to use the same method for other pairs of Slavic languages, namely for Czech-to-Polish translation. Although these languages are not so similar as Czech and Slovak, we hope that an addition of a simple partial noun phrase parsing might provide results with the quality comparable to the fullfledged syntactic analysis based system RUSLAN (this is of course true also for the Czechoto-Slovak translation). The first results of Czech-to Polish translation are quite encouraging in this respect, even though we could not perform as rigorous testing as we did for Slovak.",
277
- "cite_spans": [],
278
- "ref_spans": [],
279
- "eq_spans": [],
280
- "section": "Conclusions",
281
- "sec_num": "4."
282
- }
283
- ],
284
- "back_matter": [
285
- {
286
- "text": "This project was supported by the grant GAt~R 405/96/K214 and partially by the grant GA(~R 201/99/0236 and project of the Ministry of Education No. VS96151.",
287
- "cite_spans": [],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "Acknowledgements",
291
- "sec_num": null
292
- }
293
- ],
294
- "bib_entries": {
295
- "BIBREF0": {
296
- "ref_id": "b0",
297
- "title": "Czechto-Russian Transducing Dictionary",
298
- "authors": [
299
- {
300
- "first": "Alevtina",
301
- "middle": [],
302
- "last": "B6movfi",
303
- "suffix": ""
304
- },
305
- {
306
- "first": "Vladislav",
307
- "middle": [],
308
- "last": "Kubofi",
309
- "suffix": ""
310
- }
311
- ],
312
- "year": 1990,
313
- "venue": "Proceedings of the Xlllth COLING conference",
314
- "volume": "",
315
- "issue": "",
316
- "pages": "",
317
- "other_ids": {},
318
- "num": null,
319
- "urls": [],
320
- "raw_text": "B6movfi, Alevtina and Kubofi, Vladislav (1990). Czech- to-Russian Transducing Dictionary; In: Proceedings of the Xlllth COLING conference, Helsinki 1990",
321
- "links": null
322
- },
323
- "BIBREF1": {
324
- "ref_id": "b1",
325
- "title": "Building and Using a Syntactially Annotated Coprus: The Prague Dependency Treebank",
326
- "authors": [
327
- {
328
- "first": "Jan",
329
- "middle": [],
330
- "last": "Haji~",
331
- "suffix": ""
332
- }
333
- ],
334
- "year": 1998,
335
- "venue": "Festschrifi for Jarmila Panevov~i",
336
- "volume": "",
337
- "issue": "",
338
- "pages": "106--132",
339
- "other_ids": {},
340
- "num": null,
341
- "urls": [],
342
- "raw_text": "Haji~, Jan (1998). Building and Using a Syntactially Annotated Coprus: The Prague Dependency Treebank. In: Festschrifi for Jarmila Panevov~i, Karolinum Press, Charles Universitz, Prague. pp. 106---132.",
343
- "links": null
344
- },
345
- "BIBREF2": {
346
- "ref_id": "b2",
347
- "title": "Tagging Inflective Languages. Prediction of Morphological Categories for a Rich",
348
- "authors": [
349
- {
350
- "first": "Jan",
351
- "middle": [],
352
- "last": "Haji~",
353
- "suffix": ""
354
- },
355
- {
356
- "first": "Barbora",
357
- "middle": [],
358
- "last": "Hladk~t",
359
- "suffix": ""
360
- }
361
- ],
362
- "year": 1998,
363
- "venue": "Structured Tagset. ACL-Coling'98",
364
- "volume": "",
365
- "issue": "",
366
- "pages": "483--490",
367
- "other_ids": {},
368
- "num": null,
369
- "urls": [],
370
- "raw_text": "Haji~, Jan and Barbora Hladk~t (1998). Tagging Inflective Languages. Prediction of Morphological Categories for a Rich, Structured Tagset. ACL- Coling'98, Montreal, Canada, August 1998, pp. 483- 490.",
371
- "links": null
372
- },
373
- "BIBREF4": {
374
- "ref_id": "b4",
375
- "title": "Core Natural Language Processing Technology Applicable to Multiple Languages. The Workshop'98 Final Report",
376
- "authors": [
377
- {
378
- "first": "Eric",
379
- "middle": [
380
- ";"
381
- ],
382
- "last": "Brill",
383
- "suffix": ""
384
- },
385
- {
386
- "first": "Michael; Hladk~t",
387
- "middle": [],
388
- "last": "Collins",
389
- "suffix": ""
390
- },
391
- {
392
- "first": ";",
393
- "middle": [],
394
- "last": "Barbora",
395
- "suffix": ""
396
- },
397
- {
398
- "first": "Douglas",
399
- "middle": [
400
- ";"
401
- ],
402
- "last": "Jones",
403
- "suffix": ""
404
- },
405
- {
406
- "first": "Cynthia",
407
- "middle": [
408
- ";"
409
- ],
410
- "last": "Kuo",
411
- "suffix": ""
412
- },
413
- {
414
- "first": "",
415
- "middle": [],
416
- "last": "Ramshaw",
417
- "suffix": ""
418
- },
419
- {
420
- "first": ";",
421
- "middle": [],
422
- "last": "Lance",
423
- "suffix": ""
424
- },
425
- {
426
- "first": "Oren",
427
- "middle": [
428
- ";"
429
- ],
430
- "last": "Schwartz",
431
- "suffix": ""
432
- },
433
- {
434
- "first": "Christoph",
435
- "middle": [
436
- ";"
437
- ],
438
- "last": "Tillman",
439
- "suffix": ""
440
- },
441
- {
442
- "first": "Daniel",
443
- "middle": [],
444
- "last": "Zeman",
445
- "suffix": ""
446
- }
447
- ],
448
- "year": null,
449
- "venue": "",
450
- "volume": "",
451
- "issue": "",
452
- "pages": "",
453
- "other_ids": {},
454
- "num": null,
455
- "urls": [],
456
- "raw_text": "Brill, Eric; Collins, Michael; Hladk~t Barbora; Jones, Douglas; Kuo, Cynthia; Ramshaw, Lance; Schwartz, Oren; Tillman, Christoph; and Zeman, Daniel: Core Natural Language Processing Technology Applicable to Multiple Languages. The Workshop'98 Final Report. CLSP JHU. Also at: http:llwww.clsp.jhu.edulws981projectslnlplreport.",
457
- "links": null
458
- },
459
- "BIBREF5": {
460
- "ref_id": "b5",
461
- "title": "APAC3-2: An English-to-Czech Machine Translation System",
462
- "authors": [
463
- {
464
- "first": "Zden~k",
465
- "middle": [],
466
- "last": "Kirschner",
467
- "suffix": ""
468
- }
469
- ],
470
- "year": 1987,
471
- "venue": "Explizite Beschreibung der Sprache und automatische Textbearbeitung XII1, MFF",
472
- "volume": "",
473
- "issue": "",
474
- "pages": "",
475
- "other_ids": {},
476
- "num": null,
477
- "urls": [],
478
- "raw_text": "Kirschner, Zden~k (1987). APAC3-2: An English-to- Czech Machine Translation System; Explizite Beschreibung der Sprache und automatische Textbearbeitung XII1, MFF UK Prague Oliva, Karel (1989). A Parser for Czech Implemented in Systems Q;",
479
- "links": null
480
- },
481
- "BIBREF6": {
482
- "ref_id": "b6",
483
- "title": "Explizite Beschreibung der Sprache und automatische Textbearbeitung XVI",
484
- "authors": [],
485
- "year": null,
486
- "venue": "",
487
- "volume": "",
488
- "issue": "",
489
- "pages": "",
490
- "other_ids": {},
491
- "num": null,
492
- "urls": [],
493
- "raw_text": "Explizite Beschreibung der Sprache und automatische Textbearbeitung XVI, MFF UK Prague",
494
- "links": null
495
- }
496
- },
497
- "ref_entries": {
498
- "FIGREF0": {
499
- "text": "lit.: File Refl. was_not._possible to_open. -It was not possible to open the file]",
500
- "type_str": "figure",
501
- "num": null,
502
- "uris": null
503
- },
504
- "FIGREF1": {
505
- "text": "tags assigned to the word-form \"pomoci\" (help/by means of) pomoci: NFP2 ...... A .... ]NFS7 ...... A .... I R--2 ........... where : N -noun; R -preposition F -feminine gender S -singular, P -plural 7, 2 -case (7 -instrumental, 2 -genitive) A -affirmative (non negative) ad3.",
506
- "type_str": "figure",
507
- "num": null,
508
- "uris": null
509
- },
510
- "TABREF0": {
511
- "type_str": "table",
512
- "content": "<table><tr><td>&lt;RTF Preamble&gt;...&lt;/RTF Preamble&gt;</td></tr><tr><td>&lt;TrU&gt;</td></tr><tr><td>&lt;CRD&gt;23051999</td></tr><tr><td>&lt;CrU&gt;MT!</td></tr><tr><td>&lt;Seg L=CS_01&gt;Pomoci v~kazu ad-hoc mfi~ete</td></tr><tr><td>rychle a jednodu~e vytv~i~et re,erie.</td></tr><tr><td>&lt;Seg L=SK_01&gt;Pomoci v~kazov ad-hoc m6~ete</td></tr><tr><td>r~chio a jednoducho vytvhrat' re,erie.</td></tr><tr><td>&lt;/TrU&gt;</td></tr></table>",
513
- "num": null,
514
- "text": "Example 3. -A sample of the translation memory containing the results of MT",
515
- "html": null
516
- }
517
- }
518
- }
519
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1003.json DELETED
@@ -1,903 +0,0 @@
1
- {
2
- "paper_id": "A00-1003",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:30.277660Z"
6
- },
7
- "title": "Cross-Language Multimedia Information Retrieval",
8
- "authors": [
9
- {
10
- "first": "Sharon",
11
- "middle": [],
12
- "last": "Flank",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "emotion, Inc",
17
- "location": {
18
- "addrLine": "2600 Park Tower Dr",
19
- "postCode": "22180",
20
- "settlement": "Vienna",
21
- "region": "VA",
22
- "country": "USA"
23
- }
24
- },
25
- "email": "[email protected]"
26
- }
27
- ],
28
- "year": "",
29
- "venue": null,
30
- "identifiers": {},
31
- "abstract": "Simple measures can achieve high-accuracy cross-language retrieval in carefully chosen applications. Image retrieval is one of those applications, with results ranging from 68% of human translator performance for German, to 100% for French.",
32
- "pdf_parse": {
33
- "paper_id": "A00-1003",
34
- "_pdf_hash": "",
35
- "abstract": [
36
- {
37
- "text": "Simple measures can achieve high-accuracy cross-language retrieval in carefully chosen applications. Image retrieval is one of those applications, with results ranging from 68% of human translator performance for German, to 100% for French.",
38
- "cite_spans": [],
39
- "ref_spans": [],
40
- "eq_spans": [],
41
- "section": "Abstract",
42
- "sec_num": null
43
- }
44
- ],
45
- "body_text": [
46
- {
47
- "text": "contain strings of keywords. Typical queries are, as in most Web search applications, two to three words in length. At this point, all of the captions are in English. eMotion hosts a large database of images for sale and for licensing, PictureQuest. At least 10% of PictureQuest's user base is outside the United States. The tests were performed on the PictureQuest database of approximately 400,000 images.",
48
- "cite_spans": [],
49
- "ref_spans": [],
50
- "eq_spans": [],
51
- "section": "Introduction",
52
- "sec_num": "1"
53
- },
54
- {
55
- "text": "Information is increasingly global, and the need to access it crosses language barriers. The topic of this paper, cross-language information retrieval, concerns the automatic retrieval of text in one language via a query in a different language. A considerable body of literature has grown up around cross-language information retrieval (e.g. Grefenstette 1998 , TREC-7 1999 . There are two basic approaches. Either the query can be translated, or each entire document can be translated into the same language as the query. The accuracy of retrieval across languages, however, is generally not good. One of the weaknesses that plagues crosslanguage retrieval is that we do not have a good sense of who the users are, or how best to interact with them.",
56
- "cite_spans": [
57
- {
58
- "start": 343,
59
- "end": 360,
60
- "text": "Grefenstette 1998",
61
- "ref_id": "BIBREF4"
62
- },
63
- {
64
- "start": 361,
65
- "end": 374,
66
- "text": ", TREC-7 1999",
67
- "ref_id": null
68
- }
69
- ],
70
- "ref_spans": [],
71
- "eq_spans": [],
72
- "section": "Introduction",
73
- "sec_num": "1"
74
- },
75
- {
76
- "text": "In this paper we describe a multimedia application for which cross-language information retrieval works particularly well. eMotion, Inc. has developed a natural language information retrieval application that retrieves images, such as photographs, based on short textual descriptions or captions. The captions are typically one to three sentences, although they may also Recent Web utilization data for PictureQuest indicate that of the 10% of users from outside the United States, a significant portion come from Spanish-speaking, French-speaking, and German-speaking countries.",
77
- "cite_spans": [],
78
- "ref_spans": [],
79
- "eq_spans": [],
80
- "section": "Introduction",
81
- "sec_num": "1"
82
- },
83
- {
84
- "text": "It is expected that adding appropriate language interfaces and listing PictureQuest in foreign-language search engines will dramatically increase non-English usage.",
85
- "cite_spans": [],
86
- "ref_spans": [],
87
- "eq_spans": [],
88
- "section": "Introduction",
89
- "sec_num": "1"
90
- },
91
- {
92
- "text": "This paper offers several original contributions to the literature on crosslanguage information retrieval. First, the choice of application is novel, and significant because it simplifies the language problem enough to make it tractable. Because the objects retrieved are images and not text, they are instantly comprehensible to the user regardless of language issues. This fact makes it possible for users to perform a relevance assessment without the need for any kind of translation. More important, users themselves can select objects of interest, without recourse to translation.",
93
- "cite_spans": [],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "The Cross-Language Multimedia Retrieval Application",
97
- "sec_num": null
98
- },
99
- {
100
- "text": "The images are, in fact, associated with caption information, but, even in the monolingual system, few users ever even view the captions. It should be noted that most of the images in PictureQuest are utilized for advertising and publishing, rather than for news applications. Users of history and news photos do tend to check the captions, and often users in publishing will view the captions. For advertising, however, what the image itself conveys is far more important than the circumstances under which it was created.",
101
- "cite_spans": [],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "The Cross-Language Multimedia Retrieval Application",
105
- "sec_num": null
106
- },
107
- {
108
- "text": "Another significant contribution of this paper is the inclusion of a variety of machine translation systems. None of the systems tested is a high-end machine translation system: all are freely available on the Web.",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "The Cross-Language Multimedia Retrieval Application",
113
- "sec_num": null
114
- },
115
- {
116
- "text": "Another key feature of this paper is the careful selection of an accuracy measure appropriate to the circumstances of the application. The standard measure, percent of monolingual performance achieved, is used, with a firm focus on precision. In this application, users are able to evaluate only what they see, and generally have no idea what else is present in the collection. As a result, precision is of far more interest to customers than recall. Recall is, however, of interest to image suppliers, and in any case it would not be prudent to optimize for precision without taking into account the recall tradeoff.",
117
- "cite_spans": [],
118
- "ref_spans": [],
119
- "eq_spans": [],
120
- "section": "The Cross-Language Multimedia Retrieval Application",
121
- "sec_num": null
122
- },
123
- {
124
- "text": "The PictureQuest application avoids several of the major stumbling blocks that stand in the way of high-accuracy cross-language retrieval. Ballesteros and Croft (1997) note several pitfalls common to cross-language information retrieval:",
125
- "cite_spans": [
126
- {
127
- "start": 139,
128
- "end": 167,
129
- "text": "Ballesteros and Croft (1997)",
130
- "ref_id": "BIBREF0"
131
- }
132
- ],
133
- "ref_spans": [],
134
- "eq_spans": [],
135
- "section": "The Cross-Language Multimedia Retrieval Application",
136
- "sec_num": null
137
- },
138
- {
139
- "text": "(1) The dictionary may not contain specialized vocabulary (particularly bilingual dictionaries).",
140
- "cite_spans": [],
141
- "ref_spans": [],
142
- "eq_spans": [],
143
- "section": "The Cross-Language Multimedia Retrieval Application",
144
- "sec_num": null
145
- },
146
- {
147
- "text": "(2) Dictionary translations are inherently ambiguous and add extraneous terms to the query.",
148
- "cite_spans": [],
149
- "ref_spans": [],
150
- "eq_spans": [],
151
- "section": "The Cross-Language Multimedia Retrieval Application",
152
- "sec_num": null
153
- },
154
- {
155
- "text": "(3) Failure to translate multi-term concepts as phrases reduces effectiveness.",
156
- "cite_spans": [],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "The Cross-Language Multimedia Retrieval Application",
160
- "sec_num": null
161
- },
162
- {
163
- "text": "In the PictureQuest application, these pitfalls are minimized because the queries are short, not paragraph-long descriptions as in TREC (see, e.g., Voorhees and Harman 1999) . This would be a problem for a statistical approach, since the queries present little context, but, since we are not relying on context (because reducing ambiguity is not our top priority) it makes our task simpler. Assuming that the translation program keeps multi-term concepts intact, or at least that it preserves the modifier-head structure, we can successfully match phrases. The captions (i.e. the documents to be retrieved) are mostly in sentences, and their phrases are intact. The phrase recognizer identifies meaningful phrases (e.g. fire engine) and handles them as a unit. The pattern matcher recognizes core noun phrases and makes it more likely that they will match correctly.",
164
- "cite_spans": [
165
- {
166
- "start": 148,
167
- "end": 173,
168
- "text": "Voorhees and Harman 1999)",
169
- "ref_id": null
170
- }
171
- ],
172
- "ref_spans": [],
173
- "eq_spans": [],
174
- "section": "The Cross-Language Multimedia Retrieval Application",
175
- "sec_num": null
176
- },
177
- {
178
- "text": "Word choice can be a major issue as well for cross-language retrieval systems. Some ambiguity problems can be resolved through the use of a part-of-speech tagger on the captions. As Resnik and Yarowsky (in press) observe, part-of-speech tagging considerably reduces the word sense disambiguation problem. However, some ambiguity remains.",
179
- "cite_spans": [],
180
- "ref_spans": [],
181
- "eq_spans": [],
182
- "section": "The Cross-Language Multimedia Retrieval Application",
183
- "sec_num": null
184
- },
185
- {
186
- "text": "For example, the decision to translate a word as car, automobile, or vehicle, may dramatically affect retrieval accuracy. The PictureQuest system uses a semantic net based on WordNet (Fellbaum 1998) to expand terms. Thus a query for car or automobile will retrieve essentially identical results; vehicle will be less accurate but will still retrieve many of the same images. So while word choice may be a significant consideration for a system like that of Jang et al., 1999 , its impact on PictureQuest is minimal.",
187
- "cite_spans": [
188
- {
189
- "start": 183,
190
- "end": 198,
191
- "text": "(Fellbaum 1998)",
192
- "ref_id": "BIBREF1"
193
- },
194
- {
195
- "start": 457,
196
- "end": 474,
197
- "text": "Jang et al., 1999",
198
- "ref_id": "BIBREF6"
199
- }
200
- ],
201
- "ref_spans": [],
202
- "eq_spans": [],
203
- "section": "The Cross-Language Multimedia Retrieval Application",
204
- "sec_num": null
205
- },
206
- {
207
- "text": "The use of WordNet as an aid to information retrieval is controversial, and some studies indicate it is more hindrance than help (e.g. Voorhees 1993 , 1994 , Smeaton, Kelledy and O'Donnell 1995 . WordNet uses extremely fine-grained distinctions, which can interfere with precision even in monolingual information retrieval. In a cross-language application, the additional senses can add confounding mistranslations. If, on the other hand, WordNet expansion is constrained, the correct translation may be missed, lowering recall. In the PictureQuest application, we have tuned WordNet expansion levels and the corresponding weights attached to them so that WordNet serves to increase recall with minimal impact on precision (Flank 2000) . This tuned expansion appears to be beneficial in the cross-language application as well. Gilarranz, Gonzalo and Verdejo (1997) point out that, for cross-language information retrieval, some precision is lost in any case, and WordNet is more likely to enhance cross-linguistic than monolingual applications.",
208
- "cite_spans": [
209
- {
210
- "start": 135,
211
- "end": 148,
212
- "text": "Voorhees 1993",
213
- "ref_id": "BIBREF12"
214
- },
215
- {
216
- "start": 149,
217
- "end": 155,
218
- "text": ", 1994",
219
- "ref_id": "BIBREF11"
220
- },
221
- {
222
- "start": 156,
223
- "end": 193,
224
- "text": ", Smeaton, Kelledy and O'Donnell 1995",
225
- "ref_id": "BIBREF9"
226
- },
227
- {
228
- "start": 723,
229
- "end": 735,
230
- "text": "(Flank 2000)",
231
- "ref_id": "BIBREF2"
232
- },
233
- {
234
- "start": 827,
235
- "end": 864,
236
- "text": "Gilarranz, Gonzalo and Verdejo (1997)",
237
- "ref_id": "BIBREF3"
238
- }
239
- ],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "The Cross-Language Multimedia Retrieval Application",
243
- "sec_num": null
244
- },
245
- {
246
- "text": "In fact, Smeaton and Quigley (1996) conclude that WordNet is indeed helpful in image retrieval, in particular because image captions are too short for statistical analysis to be useful. This insight is what led us to develop a proprietary image retrieval engine in the first place: fine-grained linguistic analysis is more useful that a statistical approach in a caption averaging some thirty words. (Our typical captions are longer than those reported in Smeaton and Quigley 1996) .",
247
- "cite_spans": [
248
- {
249
- "start": 9,
250
- "end": 35,
251
- "text": "Smeaton and Quigley (1996)",
252
- "ref_id": "BIBREF10"
253
- },
254
- {
255
- "start": 456,
256
- "end": 481,
257
- "text": "Smeaton and Quigley 1996)",
258
- "ref_id": "BIBREF10"
259
- }
260
- ],
261
- "ref_spans": [],
262
- "eq_spans": [],
263
- "section": "The Cross-Language Multimedia Retrieval Application",
264
- "sec_num": null
265
- },
266
- {
267
- "text": "We performed preliminary testing using two translation methodologies. For the initial tests, we chose European languages: French, Spanish, and German. Certainly this choice simplifies the translation problem, but in our case it also reflects the most pressing business need for translation.",
268
- "cite_spans": [],
269
- "ref_spans": [],
270
- "eq_spans": [],
271
- "section": "Translation Methodology",
272
- "sec_num": "3"
273
- },
274
- {
275
- "text": "For the French, Spanish, and German tests, we used Systran as provided by AltaVista (Babelfish); we also tested several other Web translation programs. We used native speakers to craft queries and then translated those queries either manually or automatically and submitted them to PictureQuest. The resulting image set was evaluated for precision and, in a limited fashion, for recall.",
276
- "cite_spans": [],
277
- "ref_spans": [],
278
- "eq_spans": [],
279
- "section": "Translation Methodology",
280
- "sec_num": "3"
281
- },
282
- {
283
- "text": "The second translation methodology employed was direct dictionary translation, tested only for Spanish. We used the same queries for this test. Using an on-line Spanish-English dictionary, we selected, for each word, the top (top-frequency) translation. We then submitted this wordby-word translation to PictureQuest. (Unlike AltaVista, this method spellcorrected letters entered without the necessary diacritics.) Evaluation proceeded in the same manner. The word-by-word method introduces a weakness in phrase recognition: any phrase recognition capabilities in the retrieval system are defeated if phrases are not retained in the input. We can assume that the non-Englishspeaking user will, however, recognize phrases in her or his own language, and look them up as phrases where possible. Thus we can expect at least those multiword phrases that have a dictionary entry to be correctly understood. We still do lose the noun phrase recognition capabilities in the retrieval system, further confounded by the fact that in Spanish adjectives follow the nouns they modify. In the hombre de negocios example in the data below, both AltaVista and Langenscheidt correctly identify the phrase as multiword, and translate it as businessman rather than man of businesses.",
284
- "cite_spans": [],
285
- "ref_spans": [],
286
- "eq_spans": [],
287
- "section": "Translation Methodology",
288
- "sec_num": "3"
289
- },
290
- {
291
- "text": "The use of phrase recognition has been shown to be helpful, and, optimally, we would like to include it.",
292
- "cite_spans": [],
293
- "ref_spans": [],
294
- "eq_spans": [],
295
- "section": "Translation Methodology",
296
- "sec_num": "3"
297
- },
298
- {
299
- "text": "Hull and Grefenstette 1996 showed the upper bound of the improvements possible by using lexicalized phrases.",
300
- "cite_spans": [],
301
- "ref_spans": [],
302
- "eq_spans": [],
303
- "section": "Translation Methodology",
304
- "sec_num": "3"
305
- },
306
- {
307
- "text": "Every phrase that appeared was added to the dictionary, and that tactic did aid retrieval. Both statistical co-occurrence and syntactic phrases are also possible approaches.",
308
- "cite_spans": [],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "Translation Methodology",
312
- "sec_num": "3"
313
- },
314
- {
315
- "text": "Unfortunately, the extra-system approach we take here relies heavily on the external machine translation to preserve phrases intact. If AltaVista (or, in the case of Langenscheidt, the user) recognizes a phrase and translates it as a unit, the translation is better and retrieval is likely to be better.",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "Translation Methodology",
320
- "sec_num": "3"
321
- },
322
- {
323
- "text": "If, however, the translation mistakenly misses a phrase, retrieval quality is likely to be worse. As for compositional noun phrases, if the translation preserves normal word order, then the PicmreQuest-internal noun phrase recognition will take effect. That is, ifjeune fille translates as young girl, then PictureQuest will understand that young is an adjective modifying girl. In the more difficult case, if the translation preserves the correct order in translating la selva africana, i.e. the African jungle, then noun phrase recognition will work. If, however, it comes out as the jungle African, then retrieval will be worse. In the architecture described here, fixing this problem requires access to the internals of the machine translation program.",
324
- "cite_spans": [],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "Translation Methodology",
328
- "sec_num": "3"
329
- },
330
- {
331
- "text": "Evaluating precision and recall on a large corpus is a difficult task. We used the evaluation methods detailed in Flank 1998 . Precision was evaluated using a crossing measure, whereby any image ranked higher than a better match was penalized. Recall per se was measured only with respect to a defined subset of the images. Ranking incorporates some recall measures into the precision score, since images ranked too low are a recall problem, and images marked too high are a precision problem. If there are three good matches, and the third shows up as #4, the bogus #3 is a precision problem, and the too-low #4 is a recall problem.",
332
- "cite_spans": [
333
- {
334
- "start": 114,
335
- "end": 124,
336
- "text": "Flank 1998",
337
- "ref_id": "BIBREF2"
338
- }
339
- ],
340
- "ref_spans": [],
341
- "eq_spans": [],
342
- "section": "Evaluation",
343
- "sec_num": "4"
344
- },
345
- {
346
- "text": "For evaluation of the overall cross-language retrieval performance, we simply measured the ratio between the cross-language and monolingual retrieval accuracy (C/M%). This is standard; see, for example, Jang et al. 1999 . Table 1 illustrates the percentage of monolingual retrieval performance we achieved for the translation tests performed.",
347
- "cite_spans": [
348
- {
349
- "start": 203,
350
- "end": 219,
351
- "text": "Jang et al. 1999",
352
- "ref_id": "BIBREF6"
353
- }
354
- ],
355
- "ref_spans": [
356
- {
357
- "start": 222,
358
- "end": 229,
359
- "text": "Table 1",
360
- "ref_id": null
361
- }
362
- ],
363
- "eq_spans": [],
364
- "section": "Evaluation",
365
- "sec_num": "4"
366
- },
367
- {
368
- "text": "In this instance, we take the precision performance of the human-translated queries and normalize it to 100%, and adjust the other translation modalities relative to the human baseline. Several other factors make the PictureQuest application a particularly good application for machine translation technology. Unlike document translation, there is no need to match every word in the description; useful images may be retrieved even if a word or two is lost. There are no discourse issues at all: searches never use anaphora, and no one cares if the translated query sounds good or not.",
369
- "cite_spans": [],
370
- "ref_spans": [],
371
- "eq_spans": [],
372
- "section": "Evaluation",
373
- "sec_num": "4"
374
- },
375
- {
376
- "text": "In addition, the fact that the objects being retrieved were images greatly simplified the endeavor.",
377
- "cite_spans": [],
378
- "ref_spans": [],
379
- "eq_spans": [],
380
- "section": "Evaluation",
381
- "sec_num": "4"
382
- },
383
- {
384
- "text": "Under normal circumstances, developing a user-friendly interface is a major challenge. Users with only limited (or nonexistent) reading knowledge of the language of the documents need a way to determine, first, which ones are useful, and second, what they say. In the PictureQuest application, however, the retrieved assets are images. Users can instantly assess which images meet their needs.",
385
- "cite_spans": [],
386
- "ref_spans": [],
387
- "eq_spans": [],
388
- "section": "Evaluation",
389
- "sec_num": "4"
390
- },
391
- {
392
- "text": "In conclusion, it appears that simple on-line translation of queries can support effective cross-language information retrieval, for certain applications. We showed how an image retrieval application eliminates some of the problems of cross-language retrieval, and how carefully tuned WordNet expansion simplifies word choice issues. We used a variety of machine translation systems, none of them high-end and all of them free, and nonetheless achieved commercially viable results. ",
393
- "cite_spans": [],
394
- "ref_spans": [],
395
- "eq_spans": [],
396
- "section": "Evaluation",
397
- "sec_num": "4"
398
- },
399
- {
400
- "text": "Human translations, tested on PictureQuest: 90% (normalize to 100%)",
401
- "cite_spans": [],
402
- "ref_spans": [],
403
- "eq_spans": [],
404
- "section": "Spanish",
405
- "sec_num": "5.1"
406
- },
407
- {
408
- "text": "AltaVista: 53% (59% normalized)",
409
- "cite_spans": [],
410
- "ref_spans": [],
411
- "eq_spans": [],
412
- "section": "Spanish",
413
- "sec_num": "5.1"
414
- },
415
- {
416
- "text": "Langenscheidt, word-by-word: 63% (70% normalized)",
417
- "cite_spans": [],
418
- "ref_spans": [],
419
- "eq_spans": [],
420
- "section": "Spanish",
421
- "sec_num": "5.1"
422
- },
423
- {
424
- "text": "For AltaVista, we left out the words that AltaVista didn't translate.",
425
- "cite_spans": [],
426
- "ref_spans": [],
427
- "eq_spans": [],
428
- "section": "AltaVista",
429
- "sec_num": "5.1.1"
430
- },
431
- {
432
- "text": "Langenscheidt, word-by-word: 63% (70% normalized)",
433
- "cite_spans": [],
434
- "ref_spans": [],
435
- "eq_spans": [],
436
- "section": "Langenscheidt",
437
- "sec_num": "5.1.2"
438
- },
439
- {
440
- "text": "For the Langenscheidt word-by-word, we used the bilingual dictionary to translate each word separately as if we knew no English at all, and always took the first translation.",
441
- "cite_spans": [],
442
- "ref_spans": [],
443
- "eq_spans": [],
444
- "section": "Langenscheidt",
445
- "sec_num": "5.1.2"
446
- },
447
- {
448
- "text": "We made the following adjustments:",
449
- "cite_spans": [],
450
- "ref_spans": [],
451
- "eq_spans": [],
452
- "section": "Langenscheidt",
453
- "sec_num": "5.1.2"
454
- },
455
- {
456
- "text": "1. Left out \"una,\" since Langenscheidt mapped it to \"unir\" rather than to either a or one 2. Translated \"e\" as and instead of e",
457
- "cite_spans": [],
458
- "ref_spans": [],
459
- "eq_spans": [],
460
- "section": "Langenscheidt",
461
- "sec_num": "5.1.2"
462
- },
463
- {
464
- "text": "Human translations, tested on PictureQuest: ",
465
- "cite_spans": [],
466
- "ref_spans": [],
467
- "eq_spans": [],
468
- "section": "French",
469
- "sec_num": "5.2"
470
- }
471
- ],
472
- "back_matter": [],
473
- "bib_entries": {
474
- "BIBREF0": {
475
- "ref_id": "b0",
476
- "title": "Phrasal Translation and Query Expansion Techniques for Cross-Language Information Retrieval",
477
- "authors": [
478
- {
479
- "first": "Lisa",
480
- "middle": [],
481
- "last": "Ballesteros",
482
- "suffix": ""
483
- },
484
- {
485
- "first": "W",
486
- "middle": [
487
- "Bruce"
488
- ],
489
- "last": "Croft",
490
- "suffix": ""
491
- }
492
- ],
493
- "year": 1997,
494
- "venue": "AAAI Spring Symposium on Cross-Language Text and Speech Retrieval",
495
- "volume": "",
496
- "issue": "",
497
- "pages": "",
498
- "other_ids": {},
499
- "num": null,
500
- "urls": [],
501
- "raw_text": "Ballesteros, Lisa, and W. Bruce Croft, 1997. \"Phrasal Translation and Query Expansion Techniques for Cross-Language Information Retrieval,\" in AAAI Spring Symposium on Cross-Language Text and Speech Retrieval, Stanford University, Palo Alto, California, March 24-26, 1997.",
502
- "links": null
503
- },
504
- "BIBREF1": {
505
- "ref_id": "b1",
506
- "title": "WordNet: An Electronic Lexical Database",
507
- "authors": [
508
- {
509
- "first": "Christiane",
510
- "middle": [],
511
- "last": "Fellbaum",
512
- "suffix": ""
513
- }
514
- ],
515
- "year": 1998,
516
- "venue": "",
517
- "volume": "",
518
- "issue": "",
519
- "pages": "",
520
- "other_ids": {},
521
- "num": null,
522
- "urls": [],
523
- "raw_text": "Fellbaum, Christiane, ed., 1998. WordNet: An Electronic Lexical Database. Cambridge, MA: MIT Press.",
524
- "links": null
525
- },
526
- "BIBREF2": {
527
- "ref_id": "b2",
528
- "title": "Does WordNet Improve Multimedia Information Retrieval?",
529
- "authors": [
530
- {
531
- "first": "Sharon",
532
- "middle": [],
533
- "last": "Flank",
534
- "suffix": ""
535
- }
536
- ],
537
- "year": 1998,
538
- "venue": "Proceedings of COLING-ACL, 36th Annual Meeting of the Association for Computational Linguistics",
539
- "volume": "",
540
- "issue": "",
541
- "pages": "10--14",
542
- "other_ids": {},
543
- "num": null,
544
- "urls": [],
545
- "raw_text": "Flank, Sharon. 2000. \"Does WordNet Improve Multimedia Information Retrieval?\" Working paper\u2022 Flank, Sharon. 1998\u2022 \"A Layered Approach to NLP- Based Information Retrieval,\" in Proceedings of COLING-ACL, 36th Annual Meeting of the Association for Computational Linguistics, Montreal, Canada, 10-14 August 1998.",
546
- "links": null
547
- },
548
- "BIBREF3": {
549
- "ref_id": "b3",
550
- "title": "An Approach to Conceptual Text Retrieval Using the EuroWordNet Multilingual Semantic Database",
551
- "authors": [
552
- {
553
- "first": "Julio",
554
- "middle": [],
555
- "last": "Gilarranz",
556
- "suffix": ""
557
- },
558
- {
559
- "first": "Julio",
560
- "middle": [],
561
- "last": "Gonzalo",
562
- "suffix": ""
563
- },
564
- {
565
- "first": "Felisa",
566
- "middle": [],
567
- "last": "Verdejo",
568
- "suffix": ""
569
- }
570
- ],
571
- "year": 1997,
572
- "venue": "AAAI Spring Symposium on Cross-Language Text and Speech Retrieval",
573
- "volume": "",
574
- "issue": "",
575
- "pages": "",
576
- "other_ids": {},
577
- "num": null,
578
- "urls": [],
579
- "raw_text": "Gilarranz, Julio, Julio Gonzalo and Felisa Verdejo. 1997. \"An Approach to Conceptual Text Retrieval Using the EuroWordNet Multilingual Semantic Database,\" in AAAI Spring Symposium on Cross- Language Text and Speech Retrieval, Stanford University, , Palo Alto, California, March 24-26, 1997. (http://www.clis.umd.edu/dlrg/filter/sss/papers)",
580
- "links": null
581
- },
582
- "BIBREF4": {
583
- "ref_id": "b4",
584
- "title": "Cross-Language Information Retrieval",
585
- "authors": [
586
- {
587
- "first": "Gregory",
588
- "middle": [],
589
- "last": "Grefenstette",
590
- "suffix": ""
591
- }
592
- ],
593
- "year": 1998,
594
- "venue": "",
595
- "volume": "",
596
- "issue": "",
597
- "pages": "",
598
- "other_ids": {},
599
- "num": null,
600
- "urls": [],
601
- "raw_text": "Grefenstette, Gregory, ed., 1998. Cross-Language Information Retrieval. Norwell, MA: Kluwer.",
602
- "links": null
603
- },
604
- "BIBREF5": {
605
- "ref_id": "b5",
606
- "title": "Experiments in Multilingual Information Retrieval",
607
- "authors": [
608
- {
609
- "first": "David",
610
- "middle": [
611
- "A"
612
- ],
613
- "last": "Hull",
614
- "suffix": ""
615
- },
616
- {
617
- "first": "Gregory",
618
- "middle": [],
619
- "last": "Grefenstette",
620
- "suffix": ""
621
- }
622
- ],
623
- "year": 1996,
624
- "venue": "m Proceedin s o the 19 th L \u2022 \" g f nternational Conference on Research and Development in Information Retrieval (SIGIR96)",
625
- "volume": "",
626
- "issue": "",
627
- "pages": "",
628
- "other_ids": {},
629
- "num": null,
630
- "urls": [],
631
- "raw_text": "Hull, David A. and Gregory Grefenstette, 1996. \"Experiments in Multilingual Information Retrieval,\" m Proceedin s o the 19 th L \u2022 \" g f nternational Conference on Research and Development in Information Retrieval (SIGIR96) Zurich, Switzerland.",
632
- "links": null
633
- },
634
- "BIBREF6": {
635
- "ref_id": "b6",
636
- "title": "Using Mutual Information to Resolve Query Translation Ambiguities and Query Term Weighting",
637
- "authors": [
638
- {
639
- "first": "Myung",
640
- "middle": [
641
- "-"
642
- ],
643
- "last": "Jang",
644
- "suffix": ""
645
- },
646
- {
647
- "first": "Sung",
648
- "middle": [
649
- "Hyon"
650
- ],
651
- "last": "Gil",
652
- "suffix": ""
653
- },
654
- {
655
- "first": "Se Young",
656
- "middle": [],
657
- "last": "Myaeng",
658
- "suffix": ""
659
- },
660
- {
661
- "first": "",
662
- "middle": [],
663
- "last": "Park",
664
- "suffix": ""
665
- }
666
- ],
667
- "year": 1999,
668
- "venue": "Proceedings of 37 th Annual Meeting of the Association for Computational Linguistics",
669
- "volume": "",
670
- "issue": "",
671
- "pages": "",
672
- "other_ids": {},
673
- "num": null,
674
- "urls": [],
675
- "raw_text": "Jang, Myung-Gil, Sung Hyon Myaeng, and Se Young Park, 1999. \"Using Mutual Information to Resolve Query Translation Ambiguities and Query Term Weighting,\" in Proceedings of 37 th Annual Meeting of the Association for Computational Linguistics, College Park, Maryland.",
676
- "links": null
677
- },
678
- "BIBREF7": {
679
- "ref_id": "b7",
680
- "title": "Should We Translate the Documents or the Queries in Cross-Language Information Retrieval",
681
- "authors": [
682
- {
683
- "first": "J",
684
- "middle": [],
685
- "last": "Mccarley",
686
- "suffix": ""
687
- },
688
- {
689
- "first": "",
690
- "middle": [],
691
- "last": "Scott",
692
- "suffix": ""
693
- }
694
- ],
695
- "year": 1999,
696
- "venue": "",
697
- "volume": "",
698
- "issue": "",
699
- "pages": "",
700
- "other_ids": {},
701
- "num": null,
702
- "urls": [],
703
- "raw_text": "McCarley, J. Scott, 1999. \"Should We Translate the Documents or the Queries in Cross-Language Information Retrieval?\"",
704
- "links": null
705
- },
706
- "BIBREF8": {
707
- "ref_id": "b8",
708
- "title": "Distinguishing Systems and Distinguishing Sense: New Evaluation Methods for Word Sense Disambiguation",
709
- "authors": [
710
- {
711
- "first": "Philip",
712
- "middle": [],
713
- "last": "Resnik",
714
- "suffix": ""
715
- },
716
- {
717
- "first": "David",
718
- "middle": [],
719
- "last": "Yarowsky",
720
- "suffix": ""
721
- },
722
- {
723
- "first": "",
724
- "middle": [],
725
- "last": "Press",
726
- "suffix": ""
727
- }
728
- ],
729
- "year": null,
730
- "venue": "Natural Language Engineering",
731
- "volume": "",
732
- "issue": "",
733
- "pages": "",
734
- "other_ids": {},
735
- "num": null,
736
- "urls": [],
737
- "raw_text": "Resnik, Philip and Yarowsky, David, in press. \"Distinguishing Systems and Distinguishing Sense: New Evaluation Methods for Word Sense Disambiguation,\" Natural Language Engineering.",
738
- "links": null
739
- },
740
- "BIBREF9": {
741
- "ref_id": "b9",
742
- "title": "TREC-4 Experiments at Dublin City University: Thresholding Posting Lists, Query Expansion with WordNet and POS Tagging of Spanish",
743
- "authors": [
744
- {
745
- "first": "Alan",
746
- "middle": [
747
- "F"
748
- ],
749
- "last": "Smeaton",
750
- "suffix": ""
751
- },
752
- {
753
- "first": "F",
754
- "middle": [],
755
- "last": "Kelledy",
756
- "suffix": ""
757
- },
758
- {
759
- "first": "R",
760
- "middle": [],
761
- "last": "O'donnell",
762
- "suffix": ""
763
- }
764
- ],
765
- "year": 1995,
766
- "venue": "NIST Special Publication 500-236: The Fourth Text REtrieval Conference (TREC-4)",
767
- "volume": "",
768
- "issue": "",
769
- "pages": "",
770
- "other_ids": {},
771
- "num": null,
772
- "urls": [],
773
- "raw_text": "Smeaton, Alan F., F. Kelledy and R. O'Donnell, 1995. \"TREC-4 Experiments at Dublin City University: Thresholding Posting Lists, Query Expansion with WordNet and POS Tagging of Spanish,\" in Donna K. Harman (ed.) NIST Special Publication 500-236: The Fourth Text REtrieval Conference (TREC-4), Gaithersburg, MD, USA: Department of Commerce, National Institute of Standards and Technology. (http://trec.nist.gov/pubs/trec4/t4_proceedings.html)",
774
- "links": null
775
- },
776
- "BIBREF10": {
777
- "ref_id": "b10",
778
- "title": "Experiments on Using Semantic Distances Between Words in Image Caption Retrieval",
779
- "authors": [
780
- {
781
- "first": "Alan",
782
- "middle": [
783
- "F"
784
- ],
785
- "last": "Smeaton",
786
- "suffix": ""
787
- },
788
- {
789
- "first": "I",
790
- "middle": [],
791
- "last": "Quigley",
792
- "suffix": ""
793
- }
794
- ],
795
- "year": 1996,
796
- "venue": "Proceedings of the 19 th International Conference on Research and Development in Information Retrieval (SIGIR96)",
797
- "volume": "",
798
- "issue": "",
799
- "pages": "",
800
- "other_ids": {},
801
- "num": null,
802
- "urls": [],
803
- "raw_text": "Smeaton, Alan F. and I. Quigley, 1996. \"Experiments on Using Semantic Distances Between Words in Image Caption Retrieval,\" in Proceedings of the 19 th International Conference on Research and Development in Information Retrieval (SIGIR96) Zurich, Switzerland.",
804
- "links": null
805
- },
806
- "BIBREF11": {
807
- "ref_id": "b11",
808
- "title": "Query Expansion Using Lexical-Semantic Relations",
809
- "authors": [
810
- {
811
- "first": "Ellen",
812
- "middle": [
813
- "M"
814
- ],
815
- "last": "Voorhees",
816
- "suffix": ""
817
- }
818
- ],
819
- "year": 1994,
820
- "venue": "Proceedings of the 17 th International ACM SIGIR Conference on Research and Development in Information Retrieval",
821
- "volume": "",
822
- "issue": "",
823
- "pages": "61--70",
824
- "other_ids": {},
825
- "num": null,
826
- "urls": [],
827
- "raw_text": "Voorhees, Ellen M. 1994. \"Query Expansion Using Lexical-Semantic Relations,\" in Proceedings of the 17 th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 61-70.",
828
- "links": null
829
- },
830
- "BIBREF12": {
831
- "ref_id": "b12",
832
- "title": "Using WordNet to Disambiguate Word Senses for Text Retrieval",
833
- "authors": [
834
- {
835
- "first": "Ellen",
836
- "middle": [
837
- "M"
838
- ],
839
- "last": "Voorhees",
840
- "suffix": ""
841
- }
842
- ],
843
- "year": 1993,
844
- "venue": "Proceedings of the 16 th International ACM SIGIR Conference on Research and Development in Information Retrieval",
845
- "volume": "",
846
- "issue": "",
847
- "pages": "171--180",
848
- "other_ids": {},
849
- "num": null,
850
- "urls": [],
851
- "raw_text": "Voorhees, Ellen M. 1993. \"Using WordNet to Disambiguate Word Senses for Text Retrieval,\" in Proceedings of the 16 th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 171-180.",
852
- "links": null
853
- },
854
- "BIBREF13": {
855
- "ref_id": "b13",
856
- "title": "1999\u2022 The 7 th Text Retrieval Conference",
857
- "authors": [
858
- {
859
- "first": "Ellen",
860
- "middle": [
861
- "M"
862
- ],
863
- "last": "Voorhees",
864
- "suffix": ""
865
- },
866
- {
867
- "first": "Donna",
868
- "middle": [
869
- "K"
870
- ],
871
- "last": "Harman",
872
- "suffix": ""
873
- }
874
- ],
875
- "year": null,
876
- "venue": "",
877
- "volume": "",
878
- "issue": "",
879
- "pages": "",
880
- "other_ids": {},
881
- "num": null,
882
- "urls": [],
883
- "raw_text": "Voorhees, Ellen M. and Donna K. Harman, editors, 1999\u2022 The 7 th Text Retrieval Conference (TREC-7).",
884
- "links": null
885
- }
886
- },
887
- "ref_entries": {
888
- "FIGREF1": {
889
- "type_str": "figure",
890
- "uris": null,
891
- "text": "originally drawn from http ://humanities.uchicago.edu/ARTFL/proj ects/academie/1835.searchform.html: French",
892
- "num": null
893
- },
894
- "TABREF0": {
895
- "html": null,
896
- "type_str": "table",
897
- "content": "<table><tr><td colspan=\"2\">Source Example</td><td/><td>Score</td><td/><td/></tr><tr><td colspan=\"4\">Human blonde children playing 90(#3</td><td/><td/></tr><tr><td/><td>with marbles</td><td/><td colspan=\"2\">should be</td><td/></tr><tr><td/><td/><td/><td>#1;</td><td/><td/></tr><tr><td/><td/><td/><td colspan=\"2\">remainder</td><td/></tr><tr><td/><td/><td/><td colspan=\"2\">of top 20</td><td/></tr><tr><td/><td/><td/><td>ok)</td><td/><td/></tr><tr><td>AV</td><td colspan=\"4\">blond children playing 90 (2 of with marbles 20 bad)</td><td colspan=\"2\">5 Appendix: Data</td></tr><tr><td>Lang.</td><td colspan=\"4\">young fair play by means 50 (1 of 2</td><td colspan=\"2\">Source Example</td><td>Score</td></tr><tr><td/><td>of marble</td><td/><td>bad)</td><td/><td/></tr><tr><td colspan=\"2\">Human buying power</td><td/><td/><td/><td colspan=\"2\">Human men repairing road</td><td>100</td></tr><tr><td>AV Lang.</td><td>spending power purchasing power</td><td/><td colspan=\"2\">45 (11 of 20 bad) 100</td><td>AV Lang.</td><td>men repairing wagon man repair road</td><td>0 100</td></tr><tr><td>AV</td><td colspan=\"4\">successful businessman in 60 (8 of</td><td colspan=\"2\">Human woman shopping in store wearing</td><td>red 100</td></tr><tr><td/><td>office</td><td/><td>20 bad)</td><td/><td>AV</td><td>woman dressed red buying 90 (2 of</td></tr><tr><td>Lang.</td><td colspan=\"4\">successful businessman in 6 (8 of 20</td><td/><td>in one tends</td><td>20 bad)</td></tr><tr><td/><td>office</td><td/><td>bad)</td><td/><td colspan=\"2\">Lang. woman clothe red buy in wearing</td></tr><tr><td/><td/><td/><td/><td/><td/><td>shop</td><td>red is lost</td></tr><tr><td/><td/><td/><td/><td/><td/><td>75 (5 of</td></tr><tr><td colspan=\"2\">Human mother and</td><td colspan=\"3\">daughter 100 (but</td><td/><td>20 bad)</td></tr><tr><td/><td colspan=\"3\">baking bread in the kitchen no</td><td>full</td><td/></tr><tr><td/><td/><td/><td colspan=\"2\">matches)</td><td/></tr><tr><td>AV</td><td>mother and</td><td colspan=\"3\">daughter 30 (14 of</td><td colspan=\"2\">Human cars driving on the 100</td></tr><tr><td/><td colspan=\"2\">[horneando-removed]</td><td colspan=\"2\">20 bad)</td><td/><td>highway</td></tr><tr><td>Lang.</td><td colspan=\"4\">bread in the kitchen mother and child bake 100 (but</td><td>AV</td><td>cars handling by the 80' (4 of freeway 20 bad)</td></tr><tr><td/><td colspan=\"2\">bread in the kitchen</td><td colspan=\"2\">no matches) full</td><td>Lang.</td><td>cart handle for the 0 expressway</td></tr><tr><td colspan=\"3\">Human old age and loneliness</td><td>100</td><td/><td/></tr><tr><td>AV</td><td colspan=\"2\">oldness and solitude</td><td>0</td><td/><td colspan=\"2\">Human lions hunting in the 80 (1 of 5</td></tr><tr><td>Lang.</td><td colspan=\"2\">old age and loneliness</td><td>100</td><td/><td/><td>African forest</td><td>bad)</td></tr><tr><td/><td/><td/><td/><td/><td>AV</td><td>lions hunting in the 80 (1 of 5</td></tr><tr><td/><td/><td/><td/><td/><td/><td>African forest</td><td>bad)</td></tr><tr><td/><td/><td/><td/><td/><td>Lang.</td><td>lion hunt in thejungle</td><td>45 (11 of</td></tr><tr><td/><td/><td/><td/><td/><td>gSt ]</td><td>I 20 bad)</td></tr><tr><td/><td/><td/><td/><td/><td/><td>I~:~</td><td>i ~</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">Human juggler using colorful balls 67 (1 of 3</td></tr><tr><td/><td/><td/><td/><td/><td/><td>bad)</td></tr><tr><td/><td/><td/><td/><td/><td>AV</td><td>juggler with using balls of 50 (4 of 8</td></tr><tr><td/><td/><td/><td/><td/><td/><td>colors</td><td>bad)</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">Lang. juggler by means of use (0;</td><td>1</td></tr><tr><td/><td/><td/><td/><td/><td/><td>ball colour</td><td>should be</td></tr><tr><td/><td/><td/><td/><td/><td/><td>there)</td></tr></table>",
898
- "num": null,
899
- "text": "'~ .........................................................."
900
- }
901
- }
902
- }
903
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1004.json DELETED
@@ -1,1181 +0,0 @@
1
- {
2
- "paper_id": "A00-1004",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:11:53.550436Z"
6
- },
7
- "title": "Automatic construction of parallel English-Chinese corpus for cross-language information retrieval",
8
- "authors": [
9
- {
10
- "first": "Jiang",
11
- "middle": [],
12
- "last": "Chen",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "succursale CENTRE-VILLE Montreal (Quebec)",
17
- "location": {
18
- "postCode": "H3C 3J7",
19
- "country": "Canada"
20
- }
21
- },
22
- "email": "chen@ca"
23
- },
24
- {
25
- "first": "Jian-Yun",
26
- "middle": [],
27
- "last": "Nie",
28
- "suffix": "",
29
- "affiliation": {
30
- "laboratory": "",
31
- "institution": "succursale CENTRE-VILLE Montreal (Quebec)",
32
- "location": {
33
- "postCode": "H3C 3J7",
34
- "country": "Canada"
35
- }
36
- },
37
- "email": "[email protected]@ca"
38
- }
39
- ],
40
- "year": "",
41
- "venue": null,
42
- "identifiers": {},
43
- "abstract": "A major obstacle to the construction of a probabilistic translation model is the lack of large parallel corpora. In this paper we first describe a parallel text mining system that finds parallel texts automatically on the Web. The generated Chinese-English parallel corpus is used to train a probabilistic translation model which translates queries for Chinese-English cross-language information retrieval (CLIR). We will discuss some problems in translation model training and show the preliminary CUR results.",
44
- "pdf_parse": {
45
- "paper_id": "A00-1004",
46
- "_pdf_hash": "",
47
- "abstract": [
48
- {
49
- "text": "A major obstacle to the construction of a probabilistic translation model is the lack of large parallel corpora. In this paper we first describe a parallel text mining system that finds parallel texts automatically on the Web. The generated Chinese-English parallel corpus is used to train a probabilistic translation model which translates queries for Chinese-English cross-language information retrieval (CLIR). We will discuss some problems in translation model training and show the preliminary CUR results.",
50
- "cite_spans": [],
51
- "ref_spans": [],
52
- "eq_spans": [],
53
- "section": "Abstract",
54
- "sec_num": null
55
- }
56
- ],
57
- "body_text": [
58
- {
59
- "text": "Parallel texts have been used in a number of studies in computational linguistics. Brown et al. (1993) defined a series of probabilistic translation models for MT purposes. While people may question the effectiveness of using these models for a full-blown MT system, the models are certainly valuable for developing translation assistance tools. For example, we can use such a translation model to help complete target text being drafted by a human translator (Langlais et al., 2000) .",
60
- "cite_spans": [
61
- {
62
- "start": 83,
63
- "end": 102,
64
- "text": "Brown et al. (1993)",
65
- "ref_id": "BIBREF1"
66
- },
67
- {
68
- "start": 460,
69
- "end": 483,
70
- "text": "(Langlais et al., 2000)",
71
- "ref_id": "BIBREF8"
72
- }
73
- ],
74
- "ref_spans": [],
75
- "eq_spans": [],
76
- "section": "Introduction",
77
- "sec_num": "1"
78
- },
79
- {
80
- "text": "Another utilization is in cross-language information retrieval (CLIR) where queries have to be translated from one language to another language in which the documents are written. In CLIR, the quality requirement for translation is relatively low. For example, the syntactic aspect is irrelevant. Even if the translated word is not a true translation but is strongly related to the original query, it is still helpful. Therefore, CLIR is a suitable application for such a translation model. However, a major obstacle to this approach is the lack of parallel corpora for model training. Only a few such corpora exist, including the Hansard English-French corpus and the HKUST English-Chinese corpus (Wu, 1994) . In this paper, we will describe a method which automatically searches for parallel texts on the Web. We will discuss the text mining algorithm we adopted, some issues in translation model training using the generated parallel corpus, and finally the translation model's performance in CLIR.",
81
- "cite_spans": [
82
- {
83
- "start": 698,
84
- "end": 708,
85
- "text": "(Wu, 1994)",
86
- "ref_id": "BIBREF12"
87
- }
88
- ],
89
- "ref_spans": [],
90
- "eq_spans": [],
91
- "section": "Introduction",
92
- "sec_num": "1"
93
- },
94
- {
95
- "text": "Parallel Text Mining Algorithm",
96
- "cite_spans": [],
97
- "ref_spans": [],
98
- "eq_spans": [],
99
- "section": "2",
100
- "sec_num": null
101
- },
102
- {
103
- "text": "The PTMiner system is an intelligent Web agent that is designed to search for large amounts of parallel text on the Web. The mining algorithm is largely language independent. It can thus be adapted to other language pairs with only minor modifications.",
104
- "cite_spans": [],
105
- "ref_spans": [],
106
- "eq_spans": [],
107
- "section": "2",
108
- "sec_num": null
109
- },
110
- {
111
- "text": "Taking advantage of Web search engines as much as possible, PTMiner implements the following steps (illustrated in Fig. 1 ):",
112
- "cite_spans": [],
113
- "ref_spans": [
114
- {
115
- "start": 115,
116
- "end": 121,
117
- "text": "Fig. 1",
118
- "ref_id": "FIGREF0"
119
- }
120
- ],
121
- "eq_spans": [],
122
- "section": "2",
123
- "sec_num": null
124
- },
125
- {
126
- "text": "1 Search for candidate sites -Using existing Web search engines, search for the candidate sites that may contain parallel pages;",
127
- "cite_spans": [],
128
- "ref_spans": [],
129
- "eq_spans": [],
130
- "section": "2",
131
- "sec_num": null
132
- },
133
- {
134
- "text": "2 File name fetching -For each candidate site, fetch the URLs of Web pages that are indexed by the search engines;",
135
- "cite_spans": [],
136
- "ref_spans": [],
137
- "eq_spans": [],
138
- "section": "2",
139
- "sec_num": null
140
- },
141
- {
142
- "text": "3 Host crawling -Starting from the URLs collected in the previous step, search through each candidate site separately for more URLs;",
143
- "cite_spans": [],
144
- "ref_spans": [],
145
- "eq_spans": [],
146
- "section": "2",
147
- "sec_num": null
148
- },
149
- {
150
- "text": "4 Pair scan -From the obtained URLs of each site, scan for possible parallel pairs;",
151
- "cite_spans": [],
152
- "ref_spans": [],
153
- "eq_spans": [],
154
- "section": "2",
155
- "sec_num": null
156
- },
157
- {
158
- "text": "5 Download and verifying -Download the parallel pages, determine file size, language, and character set of each page, and filter out non-parallel pairs.",
159
- "cite_spans": [],
160
- "ref_spans": [],
161
- "eq_spans": [],
162
- "section": "2",
163
- "sec_num": null
164
- },
165
- {
166
- "text": "We take advantage of the huge number of Web sites indexed by existing search engines in determining candidate sites. This is done by submitting some particular requests to the search engines. The requests are determined according to the following observations. In the sites where parallel text exists, there are normally some pages in one language containing links to the parallel version in the other language. These are usually indicated by those links' anchor texts 1. For example, on some English page there may be a link to its Chinese version with the anchor text \"Chinese Version\" or \"in Chinese\". The same phenomenon can be observed on Chinese pages. Chances are that a site with parallel texts will contain such links in some of its documents. This fact is used as the criterion in searching for candidate sites. Therefore, to determine possible sites for English-Chinese parallel texts, we can request an English document containing the following anchor:",
167
- "cite_spans": [],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "Search for candidate Sites",
171
- "sec_num": "2.1"
172
- },
173
- {
174
- "text": "anchor : \"english version H [\"in english\", ...] .",
175
- "cite_spans": [
176
- {
177
- "start": 28,
178
- "end": 47,
179
- "text": "[\"in english\", ...]",
180
- "ref_id": null
181
- }
182
- ],
183
- "ref_spans": [],
184
- "eq_spans": [],
185
- "section": "Search for candidate Sites",
186
- "sec_num": "2.1"
187
- },
188
- {
189
- "text": "Similar requests are sent for Chinese documents.",
190
- "cite_spans": [],
191
- "ref_spans": [],
192
- "eq_spans": [],
193
- "section": "Search for candidate Sites",
194
- "sec_num": "2.1"
195
- },
196
- {
197
- "text": "From the two sets of pages obtained by the above queries we extract two sets of Web sites. The union of these two sets constitutes then the candidate sites. That is to say, a site is a candidate site when it is found to have either an English page linking to its Chinese version or a Chinese page linking to its English version.",
198
- "cite_spans": [],
199
- "ref_spans": [],
200
- "eq_spans": [],
201
- "section": "Search for candidate Sites",
202
- "sec_num": "2.1"
203
- },
204
- {
205
- "text": "We now assume that a pair of parallel texts exists on the same site. To search for parallel pairs on a site, PTMiner first has to obtain all (or at least part of) the HTML file names on the site. From these names pairs are scanned. It is possible to use a Web crawler to explore the candidate sites completely. However, we can take advantage of the search engines again to accelerate the process. As the first step, we submit the following query to the search engines:",
206
- "cite_spans": [],
207
- "ref_spans": [],
208
- "eq_spans": [],
209
- "section": "File Name Fetching",
210
- "sec_num": "2.2"
211
- },
212
- {
213
- "text": "host : hostname to fetch the Web pages that they indexed from this site. If we only require a small amount of parallel texts, this result may be sufficient. For our purpose, however, we need to explore the sites more thoroughly using a host crawler. Therefore, we continue our search for files with a host crawler which uses the documents found by the search engines as the starting point.",
214
- "cite_spans": [],
215
- "ref_spans": [],
216
- "eq_spans": [],
217
- "section": "File Name Fetching",
218
- "sec_num": "2.2"
219
- },
220
- {
221
- "text": "A host crawler is slightly different from a Web crawler.",
222
- "cite_spans": [],
223
- "ref_spans": [],
224
- "eq_spans": [],
225
- "section": "Host Crawling",
226
- "sec_num": "2.3"
227
- },
228
- {
229
- "text": "Web crawlers go through innumerable pages and hosts on the Web. A host crawler is a Web crawler that crawls through documents on a given host only. A breadth-first crawling algorithm is applied in PTMiner as host crawler. The principle is that when a link to an unexplored document on the same site is found in a document, it is added to a list that will be explored later. In this way, most file names from the candidate sites are obtained.",
230
- "cite_spans": [],
231
- "ref_spans": [],
232
- "eq_spans": [],
233
- "section": "Host Crawling",
234
- "sec_num": "2.3"
235
- },
236
- {
237
- "text": "After collecting file names for each candidate site, the next task is to determine the parallel pairs. Again, we try to use some heuristic rules to guess which files may be parallel texts before downloading them. The rules are based on external features of the documents. By external feature, we mean those features which may be known without analyzing the contents of the file, such as its URL, size, and date. This is in contrast with the internal features, such as language, character set, and HTML structure, which cannot be known until we have downloaded the page and analyzed its contents.",
238
- "cite_spans": [],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Pair Scan",
242
- "sec_num": "2.4"
243
- },
244
- {
245
- "text": "The heuristic criterion comes from the following observation: We observe that parallel text pairs usually have similar name patterns. The difference between the names of two parailel pages usually lies in a segment which indicates the language. For example, \"file-ch.html\" (in Chinese) vs. \"file-en.html\" (in English). The difference may also appear in the path, such as \".../chinese/.../file.html\" vs. \".../english/.../file.html'. The name patterns described above are commonly used by webmasters to help organize their sites. Hence, we can suppose that a pair of pages with this kind of pattern are probably parallel texts.",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "Pair Scan",
250
- "sec_num": "2.4"
251
- },
252
- {
253
- "text": "First, we establish four lists for English prefixes, English suffixes, Chinese prefixes and Chinese suffixes. For example: English Prefix = {e, en, e_, en_, e-, en-, ...}. For each file in one language, if a segment in its name corresponds to one of the language affixes, several new names are generated by changing the segment to the possible corresponding affixes of the other language. If a generated name corresponds to an existing file, then the file is considered as a candidate parallel document of the original file.",
254
- "cite_spans": [],
255
- "ref_spans": [],
256
- "eq_spans": [],
257
- "section": "Pair Scan",
258
- "sec_num": "2.4"
259
- },
260
- {
261
- "text": "Next, we further examine the contents of the paired files to determine if they are really parallel according to various external and internal features. This may further improve the pairing precision. The following methods have been implemented in our system.",
262
- "cite_spans": [],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "Filtering",
266
- "sec_num": "2.5"
267
- },
268
- {
269
- "text": "Parallel files often have similar file lengths. One simple way to filter out incorrect pairs is to compare the lengths of the two files. The only problem is to set a reasonable threshold that will not discard too many good pairs, i.e. balance recall and precision. The usual difference ratio depends on the language pairs we are dealing with. For example, Chinese-English parallel texts usually have a larger difference ratio than English-French parallel texts. The filtering threshold had to be determined empirically, from the actual observations. For Chinese-English, a difference up to 50% is tolerated.",
270
- "cite_spans": [],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "Text Length",
274
- "sec_num": "2.5.1"
275
- },
276
- {
277
- "text": "It is also obvious that the two files of a pair have to be in the two languages of interest. By automatically identifying language and character set, we can filter out the pairs that do not satisfy this basic criterion. Some Web pages explicitly indicate the language and the character set. More often such information is omitted by authors. We need some language identification tool for this task.",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Language and Character Set",
282
- "sec_num": "2.5.2"
283
- },
284
- {
285
- "text": "SILC is a language and encoding identification system developed by the RALI laboratory at the University of Montreal. It employs a probabilistic model estimated on tri-grams. Using these models, the system is able to determine the most probable language and encoding of a text (Isabelle et al., 1997) .",
286
- "cite_spans": [
287
- {
288
- "start": 277,
289
- "end": 300,
290
- "text": "(Isabelle et al., 1997)",
291
- "ref_id": "BIBREF5"
292
- }
293
- ],
294
- "ref_spans": [],
295
- "eq_spans": [],
296
- "section": "Language and Character Set",
297
- "sec_num": "2.5.2"
298
- },
299
- {
300
- "text": "In the STRAND system (Resnik, 1998) , the candidate pairs are evaluated by aligning them according to their HTML structures and computing confidence values. Pairs are assumed to be wrong if they have too many mismatching markups or low confidence values.",
301
- "cite_spans": [
302
- {
303
- "start": 21,
304
- "end": 35,
305
- "text": "(Resnik, 1998)",
306
- "ref_id": "BIBREF10"
307
- }
308
- ],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "HTML Structure and Alignment",
312
- "sec_num": "2.5.3"
313
- },
314
- {
315
- "text": "Comparing HTML structures seems to be a sound way to evaluate candidate pairs since parallel pairs usually have similar HTML structures. However, we also noticed that parallel texts may have quite different HTML structures. One of the reasons is that the two files may be created using two HTML editors. For example, one may be used for English and another for Chinese, depending on the language handling capability of the editors. Therefore, caution is required when measuring structure difference numerically.",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "HTML Structure and Alignment",
320
- "sec_num": "2.5.3"
321
- },
322
- {
323
- "text": "Parallel text alignment is still an experimental area. Measuring the confidence values of an alignment is even more complicated. For example, the alignment algorithm we used in the training of the statistical translation model produces acceptable alignment results but it does not provide a confidence value that we can \"confidently\" use as an evaluation criterion. So, for the moment this criterion is not used in candidate pair evaluation.",
324
- "cite_spans": [],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "HTML Structure and Alignment",
328
- "sec_num": "2.5.3"
329
- },
330
- {
331
- "text": "3 Generated",
332
- "cite_spans": [],
333
- "ref_spans": [],
334
- "eq_spans": [],
335
- "section": "HTML Structure and Alignment",
336
- "sec_num": "2.5.3"
337
- },
338
- {
339
- "text": "In this section, we describe the results of our parallel text mining and translation model training.",
340
- "cite_spans": [],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "Corpus and Translation Model Training",
344
- "sec_num": null
345
- },
346
- {
347
- "text": "Using the above approach for Chinese-English, 185 candidate sites were searched from the domain hk. We limited the mining domain to hk because Hong Kong is a bilingual English-Chinese city where high quality parallel Web sites exist. Because of the small number of candidate sites, the host crawler was used to thoroughly explore each site. The resulting corpus contains 14820 pairs of texts including 117.2Mb Chinese texts and 136.5Mb English texts. The entire mining process lasted about a week. Using length comparison and language identification, we refined the precision of the corpus to about 90%. The precision is estimated by examining 367 randomly picked pairs.",
348
- "cite_spans": [],
349
- "ref_spans": [],
350
- "eq_spans": [],
351
- "section": "The Corpus",
352
- "sec_num": "3.1"
353
- },
354
- {
355
- "text": "Many approaches in computational linguistics try to extract translation knowledge from previous translation examples. Most work of this kind establishes probabilistic models from parallel corpora. Based on one of the statistical models proposed by Brown et al. (1993) , the basic principle of our translation model is the following: given a corpus of aligned sentences, if two words often co-occur in the source and target sentences, there is a good likelihood that they are translations of each other. In the simplest case (model 1), the model learns the probability, p(tls), of having a word t in the translation of a sentence containing a word s. For an input sentence, the model then calculates a sequence of words that are most probable to appear in its translation. Using a similar statistical model, Wu (1995) extracted a largescale English-Chinese lexicon from the HKUST cor-<s id=\"00~\"> <HTML> <HEAD> <META HTrP-EQUIV=\"Content-type\" CONTENT=\"text/html; charset--iso-8859-1\"> <META HTI'P-EQUIV=\"Content-language\" CONTENT=\"Western\"> </s> <s id=\"0001\"> <TITLE>Journal of Primary Education 1996, VoI., No. l&2, pp. 19 -27 </TITLE> </HEAD> </s> </s> <s id=\"0003\"> <HI>Journal of Primary Education </HI> </s> <s id=\"0004\"> <HR> <B>Volume 6, No l&2, pp. 19-27 (May, 1996) </B> <HR> </s> <s id=\"0005\"> <H3>Principles for Redesigning Teacher Education </H3> Alan TOM </CENTER> </s> <s id=\"0006\"> <P> <B> <I> Abstract </I> </B> </s> <s id=\"0000\"> <HTML> <HEAD> <META H'ITP-EQUW=\"Content-type\" CONTENT=\"text/html; charset=bigS\"> <META HTTP-EQUIV=\"Content-language\" CONTENT=\"zh\"> <Is> pus which is built manually. In our case, the probabilistic translation model will be used for CLIR. The requirement on our translation model may be less demanding: it is not absolutely necessary that a word t with high p(tls ) always be a true translation of s. It is still useful if t is strongly related to s. For example, although \"railway\" is not a true translation of \"train\" (in French), it is highly useful to include \"railway\" in the translation of a query on \"train\". This is one of the reasons why we think a less controlled parallel corpus can be used to train a translation model for CLIR.",
356
- "cite_spans": [
357
- {
358
- "start": 248,
359
- "end": 267,
360
- "text": "Brown et al. (1993)",
361
- "ref_id": "BIBREF1"
362
- },
363
- {
364
- "start": 807,
365
- "end": 816,
366
- "text": "Wu (1995)",
367
- "ref_id": "BIBREF14"
368
- },
369
- {
370
- "start": 1077,
371
- "end": 1122,
372
- "text": "Primary Education 1996, VoI., No. l&2, pp. 19",
373
- "ref_id": null
374
- }
375
- ],
376
- "ref_spans": [],
377
- "eq_spans": [],
378
- "section": "Statistical Translation Model",
379
- "sec_num": "3.2"
380
- },
381
- {
382
- "text": "Before the mined documents can be aligned into parallel sentences, the raw texts have to undergo a series of some preprocessing, which, to some extent, is language dependent. For example, the major operations on the Chinese-English corpus include encoding scheme transformation (for Chinese), sentence level segmentation, parallel text alignment, Chinese word segmentation (Nie et al., 1999) and English expression extraction.",
383
- "cite_spans": [
384
- {
385
- "start": 373,
386
- "end": 391,
387
- "text": "(Nie et al., 1999)",
388
- "ref_id": "BIBREF9"
389
- }
390
- ],
391
- "ref_spans": [],
392
- "eq_spans": [],
393
- "section": "Parallel Text Alignment",
394
- "sec_num": "3.3"
395
- },
396
- {
397
- "text": "The parallel Web pages we collected from various sites are not all of the same quality. Some are highly parallel and easy to align while others can be very noisy. Aligning English-Chinese parallel texts is already very difficult because of the great differences in the syntactic structures and writing systems of the two languages. A number of alignment techniques have been proposed, varying from statistical methods (Brown et al., 1991; Gale and Church, 1991) to lexical methods (Kay and RSscheisen, 1993; Chen, 1993) . The method we adopted is that of Simard et al. (1992) . Because it considers both length similarity and cognateness as alignment criteria, the method is more robust and better able to deal with noise than pure length-based methods. Cognates are identical sequences of characters in corresponding words in two languages. They are commonly found in English and French. In the case of English-Chinese alignment, where there are no cognates shared by the two languages, only the HTML markup in both texts are taken as cognates. Because the HTML structures of parallel pages are normally similar, the markup was found to be helpful for alignment.",
398
- "cite_spans": [
399
- {
400
- "start": 418,
401
- "end": 438,
402
- "text": "(Brown et al., 1991;",
403
- "ref_id": "BIBREF1"
404
- },
405
- {
406
- "start": 439,
407
- "end": 461,
408
- "text": "Gale and Church, 1991)",
409
- "ref_id": "BIBREF4"
410
- },
411
- {
412
- "start": 481,
413
- "end": 507,
414
- "text": "(Kay and RSscheisen, 1993;",
415
- "ref_id": "BIBREF6"
416
- },
417
- {
418
- "start": 508,
419
- "end": 519,
420
- "text": "Chen, 1993)",
421
- "ref_id": "BIBREF2"
422
- },
423
- {
424
- "start": 555,
425
- "end": 575,
426
- "text": "Simard et al. (1992)",
427
- "ref_id": "BIBREF11"
428
- }
429
- ],
430
- "ref_spans": [],
431
- "eq_spans": [],
432
- "section": "Parallel Text Alignment",
433
- "sec_num": "3.3"
434
- },
435
- {
436
- "text": "To illustrate how markup can help with the alignment, we align the same pair with both the pure length-based method of Gale & Church (Fig. 2) , and the method of Simard et al. (Fig. 3) . First of all, we observe from the figures that the two texts are <s id=\"0003\"> <H 1 >Journal of Primary Education </H 1 > <Is> <s id=\"0004\"> <HR> <B>Volume 6, No l&2, pp. 19-27 (May, 1996) divided into sentences. The sentences are marked by <s id=\"xxxx\"> and </s>. Note that we determine sentences not only by periods, but also by means of HTML markup.",
437
- "cite_spans": [
438
- {
439
- "start": 343,
440
- "end": 375,
441
- "text": "6, No l&2, pp. 19-27 (May, 1996)",
442
- "ref_id": null
443
- }
444
- ],
445
- "ref_spans": [
446
- {
447
- "start": 133,
448
- "end": 141,
449
- "text": "(Fig. 2)",
450
- "ref_id": "FIGREF1"
451
- },
452
- {
453
- "start": 176,
454
- "end": 184,
455
- "text": "(Fig. 3)",
456
- "ref_id": "FIGREF2"
457
- }
458
- ],
459
- "eq_spans": [],
460
- "section": "Parallel Text Alignment",
461
- "sec_num": "3.3"
462
- },
463
- {
464
- "text": "</B> <HR> </$> </s> </s>",
465
- "cite_spans": [],
466
- "ref_spans": [],
467
- "eq_spans": [],
468
- "section": "Parallel Text Alignment",
469
- "sec_num": "3.3"
470
- },
471
- {
472
- "text": "We further notice that it is difficult to align sentences 0002. The sentence in the Chinese page is much longer than its counterpart in the English page because some additional information (font) is added. The length-based method thus tends to take sentence 0002, 0003, and 0004 in the English page as the translation of sentence 0002 in the Chinese page (Fig. 2) , which is wrong. This in turn provocated the three following incorrect alignments. As we can see in Fig. 3 , the cognate method did not make the same mistake because of the noise in sentence 0002. Despite their large length difference, the two 0002 sentences are still aligned as a 1-1 pair, because the sentences in the following 4 alignments (0003 -0003; 0004 -0004, 0005; 0005 -0006; 0006 -0007) have rather similar HTML markups and are taken by the program to be the most likely alignments.",
473
- "cite_spans": [],
474
- "ref_spans": [
475
- {
476
- "start": 355,
477
- "end": 363,
478
- "text": "(Fig. 2)",
479
- "ref_id": "FIGREF1"
480
- },
481
- {
482
- "start": 465,
483
- "end": 471,
484
- "text": "Fig. 3",
485
- "ref_id": "FIGREF2"
486
- }
487
- ],
488
- "eq_spans": [],
489
- "section": "Parallel Text Alignment",
490
- "sec_num": "3.3"
491
- },
492
- {
493
- "text": "Beside HTML markups, other criteria may also be incorporated. For example, it would be helpful to consider strong correspondence between certain English and Chinese words, as in (Wu, 1994) . We hope to implement such correspondences in our future research.",
494
- "cite_spans": [
495
- {
496
- "start": 178,
497
- "end": 188,
498
- "text": "(Wu, 1994)",
499
- "ref_id": "BIBREF12"
500
- }
501
- ],
502
- "ref_spans": [],
503
- "eq_spans": [],
504
- "section": "Parallel Text Alignment",
505
- "sec_num": "3.3"
506
- },
507
- {
508
- "text": "To evaluate the precision of the English-Chinese translation model trained on the Web corpus, we examined two sample lexicons of 200 words, one in each direction. The 200 words for each lexicon were randomly selected from the training source. We examined the most probable translation for each word. The Chinese-English lexicon was found to have a precision of 77%. The English-Chinese lexicon has a higher precision of 81.5%. Part of the lexicons are shown in Fig. 4 , where t/f indicates whether a translation is true or false.",
509
- "cite_spans": [],
510
- "ref_spans": [
511
- {
512
- "start": 461,
513
- "end": 467,
514
- "text": "Fig. 4",
515
- "ref_id": null
516
- }
517
- ],
518
- "eq_spans": [],
519
- "section": "Lexicon Evaluation",
520
- "sec_num": "3.4"
521
- },
522
- {
523
- "text": "These precisions seem to be reasonably high. They are quite comparable to that obtained by Wu (1994) using a manual Chinese-English parallel corpus.",
524
- "cite_spans": [
525
- {
526
- "start": 91,
527
- "end": 100,
528
- "text": "Wu (1994)",
529
- "ref_id": "BIBREF12"
530
- }
531
- ],
532
- "ref_spans": [],
533
- "eq_spans": [],
534
- "section": "Lexicon Evaluation",
535
- "sec_num": "3.4"
536
- },
537
- {
538
- "text": "We also found that stop-lists have significant effect on the translation model. Stop-list is a set of the most frequent words that we remove from the train- ing source. Because these words exist in most alignments, the statistical model cannot derive correct translations for them. More importantly, their existence greatly affects the accuracy of other translations. They can be taken as translations for many words. A priori, it would seem that both the English and Chinese stop-lists should be applied to eliminate the noise caused by them. Interestingly, from our observation and analysis we concluded that for better precision, only the stop-list of the target language should be applied in the model training.",
539
- "cite_spans": [],
540
- "ref_spans": [],
541
- "eq_spans": [],
542
- "section": "Effect of Stopwords",
543
- "sec_num": null
544
- },
545
- {
546
- "text": "We first explain why the stop-list of the target language has to be applied. On the left side of Fig. 5 , if the Chinese word C exists in the same alignments with the English word E more than any other Chinese words, C will be the most probable translation for E. Because of their frequent appearance, some Chinese stopwords may have more chances to be in the same alignments with E. The probability of the translation E --+ C is then reduced (maybe even less than those of the incorrect ones). This is the reason why many English words are translated to \"~' (of) by the translation model trained without using the Chinese stop-list.",
547
- "cite_spans": [],
548
- "ref_spans": [
549
- {
550
- "start": 97,
551
- "end": 103,
552
- "text": "Fig. 5",
553
- "ref_id": "FIGREF3"
554
- }
555
- ],
556
- "eq_spans": [],
557
- "section": "Effect of Stopwords",
558
- "sec_num": null
559
- },
560
- {
561
- "text": "We also found that it is not necessary to remove the stopwords of the source language. In fact, as illustrated on the right side of Fig. 5 , the existence of the English stopwords has two effects on the probability of the translation E -~ C:",
562
- "cite_spans": [],
563
- "ref_spans": [
564
- {
565
- "start": 132,
566
- "end": 138,
567
- "text": "Fig. 5",
568
- "ref_id": "FIGREF3"
569
- }
570
- ],
571
- "eq_spans": [],
572
- "section": "Effect of Stopwords",
573
- "sec_num": null
574
- },
575
- {
576
- "text": "1 They may often be found together with the Chinese word C. Owing to the Expectation Maximization algorithm, the probability of E -~ C may therefore be reduced.",
577
- "cite_spans": [],
578
- "ref_spans": [],
579
- "eq_spans": [],
580
- "section": "Effect of Stopwords",
581
- "sec_num": null
582
- },
583
- {
584
- "text": "2 On the other hand, there is a greater likelihood that English stopwords will be found together with the most frequent Chinese words. Here, we use the term \"Chinese frequent words\" instead of \"Chinese stopwords\" because even if a stop-list is applied, there may still remain some common words that have the same effect as the stopwords. The coexistence of English and Chinese frequent words reduces the probability that the Chinese frequent words are the translations of E, and thus raise the probability of E -+ C.",
585
- "cite_spans": [],
586
- "ref_spans": [],
587
- "eq_spans": [],
588
- "section": "Effect of Stopwords",
589
- "sec_num": null
590
- },
591
- {
592
- "text": "The second effect was found to be more significant than the first, since the model trained without the English stopwords has better precision than the model trained with the English stopwords. For the correct translations given by both models, the model English-Chinese CLIR Results",
593
- "cite_spans": [],
594
- "ref_spans": [],
595
- "eq_spans": [],
596
- "section": "Effect of Stopwords",
597
- "sec_num": null
598
- },
599
- {
600
- "text": "Our final goal was to test the performance of the translation models trained on the Web parallel corpora in CLIR. We conducted CLIR experiments using the Smart IR system.",
601
- "cite_spans": [],
602
- "ref_spans": [],
603
- "eq_spans": [],
604
- "section": "Effect of Stopwords",
605
- "sec_num": null
606
- },
607
- {
608
- "text": "The English test corpus (for C-E CLIR) was the AP corpus used in TREC6 and TREC7. The short English queries were translated manually into Chinese and then translated back to English by the translation model. The Chinese test corpus was the one used in the TREC5 and TREC6 Chinese track. It contains both Chinese queries and their English translations.",
609
- "cite_spans": [],
610
- "ref_spans": [],
611
- "eq_spans": [],
612
- "section": "Results",
613
- "sec_num": "4.1"
614
- },
615
- {
616
- "text": "Our experiments on these two corpora produced the results shown in Tab. 1. The precision of monolingual IR is given as benchmark. In both E-C and C-E CLIR, the translation model achieved around 40% of monolingual precision. To compare with the dictionary-based approach, we employed a Chinese-English dictionary, CEDICT (Denisowski, 1999) , and an English-Chinese online dictionary (Anonymous, 1999a) to translate queries. For each word of the source query, all the possible translations given by the dictionary are included in the translated query. The Chinese-English dictionary has about the same performace as the translation model, while the English-Chinese dictionary has lower precision than that of the translation model.",
617
- "cite_spans": [
618
- {
619
- "start": 320,
620
- "end": 338,
621
- "text": "(Denisowski, 1999)",
622
- "ref_id": "BIBREF3"
623
- }
624
- ],
625
- "ref_spans": [],
626
- "eq_spans": [],
627
- "section": "Results",
628
- "sec_num": "4.1"
629
- },
630
- {
631
- "text": "We also tried to combine the translations given by the translation model and the dictionary. In both C-E and E-C CLIR, significant improvements were achieved (as shown in Tab. 1). The improvements show that the translations given by the translation model and the dictionary complement each other well for IR purposes. The translation model may give either exact translations or incorrect but related words. Even though these words are not correct in the sense of translation, they are very possibly related to the subject of the query and thus helpful for IR purposes. The dictionary-based approach expands a query along another dimension. It gives all the possible translations for each word including those that are missed by the translation model.",
632
- "cite_spans": [],
633
- "ref_spans": [],
634
- "eq_spans": [],
635
- "section": "Results",
636
- "sec_num": "4.1"
637
- },
638
- {
639
- "text": "One advantage of a parallel text-based translation model is that it is easier to build than an MT system. Now that we have examined the CLIR performance of the translation model, we will compare it with two existing MT systems. Both systems were tested in E-C CLIR.",
640
- "cite_spans": [],
641
- "ref_spans": [],
642
- "eq_spans": [],
643
- "section": "Comparison With MT Systems",
644
- "sec_num": "4.2"
645
- },
646
- {
647
- "text": "Using the Sunshine WebTran server (Anonymous, 1999b) , an online Engiish-Chinese MT system, to translate the 54 English queries, we obtained an average precision of 0.2001, which is 50.3% of the mono-lingual precision. The precision is higher than that obtained using the translation model (0.1804) or the dictionary (0.1427) alone, but lower than the precison obtained using them together (0.2232). Kwok (1999) investigated the CLIR performance of an English-Chinese MT software called Transperfect, using the same TREC Chinese collection as we used in this study. Using the MT software alone, Kwok achieved 56% of monolingual precision. The precision is improved to 62% by refining the translation with a dictionary. Kwok also adopted pretranslation query expansion, which further improved the precison to 70% of the monolingual results.",
648
- "cite_spans": [
649
- {
650
- "start": 34,
651
- "end": 52,
652
- "text": "(Anonymous, 1999b)",
653
- "ref_id": null
654
- },
655
- {
656
- "start": 400,
657
- "end": 411,
658
- "text": "Kwok (1999)",
659
- "ref_id": "BIBREF7"
660
- }
661
- ],
662
- "ref_spans": [],
663
- "eq_spans": [],
664
- "section": "Sunshine WebTran Server",
665
- "sec_num": "4.2.1"
666
- },
667
- {
668
- "text": "In our case, the best E-C CLIR precison using the translation model (and dictionary) is 56.1%. It is lower than what Kwok achieved using Transperfect, however, the difference is not large.",
669
- "cite_spans": [],
670
- "ref_spans": [],
671
- "eq_spans": [],
672
- "section": "Transperfect",
673
- "sec_num": "4.2.2"
674
- },
675
- {
676
- "text": "The Chinese-English translation model has a fax lower CLIR performance than that of the English-French model established using the same method (Nie et al., 1999) . The principal reason for this is the fact that English and Chinese are much more different than English and French. This problem surfaced in many phases of this work, from text alignment to query translation. Below, we list some further factors affecting CLIR precision.",
677
- "cite_spans": [
678
- {
679
- "start": 143,
680
- "end": 161,
681
- "text": "(Nie et al., 1999)",
682
- "ref_id": "BIBREF9"
683
- }
684
- ],
685
- "ref_spans": [],
686
- "eq_spans": [],
687
- "section": "Further Problems",
688
- "sec_num": "4.3"
689
- },
690
- {
691
- "text": "\u2022 The Web-collected corpus is noisy and it is difficult to align English-Chinese texts. The alignment method we employed has performed more poorly than on English-French alignment. This in turn leads to poorer performance of the translation model. In general, we observe a higher variability in Chinese-English translations than in English-French translations.",
692
- "cite_spans": [],
693
- "ref_spans": [],
694
- "eq_spans": [],
695
- "section": "Further Problems",
696
- "sec_num": "4.3"
697
- },
698
- {
699
- "text": "\u2022 For E-C CLIR, although queries in both languages were provided, the English queries were not strictly translated from the original Chinese ones. For example, AJg,~ (human right situation) was translated into human right issue. We cannot expect the translation model to translate issue back to ~ (situation). ",
700
- "cite_spans": [],
701
- "ref_spans": [],
702
- "eq_spans": [],
703
- "section": "Further Problems",
704
- "sec_num": "4.3"
705
- },
706
- {
707
- "text": "The goal of this work was to investigate the feasibility of using a statistical translation model trained on a Web-collected corpus to do English-Chinese CLIR.",
708
- "cite_spans": [],
709
- "ref_spans": [],
710
- "eq_spans": [],
711
- "section": "Summary",
712
- "sec_num": "5"
713
- },
714
- {
715
- "text": "In this paper, we have described the algorithm and implementation we used for parallel text mining, translation model training, and some results we obtained in CLIR experiments. Although further work remains to be done, we can conclude that it is possible to automatically construct a Chinese-English parallel corpus from the Web. The current system can be easily adapted to other language pairs. Despite the noisy nature of the corpus and the great difference in the languages, the evaluation lexicons generated by the translation model produced acceptable precision. While the current CLIR results are not as encouraging as those of English-French CLIR, they could be improved in various ways, such as improving the alignment method by adapting cognate definitions to HTML markup, incorporating a lexicon and/or removing some common function words in translated queries. We hope to be able to demonstrate in the near future that a fine-tuned English-Chinese translation model can provide query translations for CLIR with the same quality produced by MT systems.",
716
- "cite_spans": [],
717
- "ref_spans": [],
718
- "eq_spans": [],
719
- "section": "Summary",
720
- "sec_num": "5"
721
- },
722
- {
723
- "text": "1An anchor text is a piece of text on a Web page which, when clicked on, will take you to another linked page. To be helpful, it usually contains the key information about the linked page.",
724
- "cite_spans": [],
725
- "ref_spans": [],
726
- "eq_spans": [],
727
- "section": "",
728
- "sec_num": null
729
- }
730
- ],
731
- "back_matter": [],
732
- "bib_entries": {
733
- "BIBREF0": {
734
- "ref_id": "b0",
735
- "title": "Anonymous. 1999a. Sunrain.net -English-Chinese dictionary",
736
- "authors": [],
737
- "year": null,
738
- "venue": "",
739
- "volume": "",
740
- "issue": "",
741
- "pages": "",
742
- "other_ids": {},
743
- "num": null,
744
- "urls": [],
745
- "raw_text": "Anonymous. 1999a. Sunrain.net -English-Chinese dictionary, http://sunrain.net/r_ecdict _e.htm. Anonymous. 1999b. Sunshine WebTran server. http://www.readworld.com/translate.htm.",
746
- "links": null
747
- },
748
- "BIBREF1": {
749
- "ref_id": "b1",
750
- "title": "The mathematics of machine translation: Parameter estimation",
751
- "authors": [
752
- {
753
- "first": "P",
754
- "middle": [
755
- "F"
756
- ],
757
- "last": "Brown",
758
- "suffix": ""
759
- },
760
- {
761
- "first": "J",
762
- "middle": [
763
- "C"
764
- ],
765
- "last": "Lai",
766
- "suffix": ""
767
- },
768
- {
769
- "first": "R",
770
- "middle": [
771
- "L"
772
- ],
773
- "last": "Mercer",
774
- "suffix": ""
775
- }
776
- ],
777
- "year": 1991,
778
- "venue": "29th Annual Meeting of the Association for Computational Linguistics",
779
- "volume": "19",
780
- "issue": "",
781
- "pages": "263--311",
782
- "other_ids": {},
783
- "num": null,
784
- "urls": [],
785
- "raw_text": "P. F. Brown, J. C. Lai, and R. L. Mercer. 1991. Aligning sentences in parallel corpora. In 29th Annual Meeting of the Association for Computa- tional Linguistics, pages 89-94, Berkeley, Calif. P. F. Brown, S. A. Della Pietra, V. J. Della Pietra, and R. L. Mercer. 1993. The mathematics of ma- chine translation: Parameter estimation. Compu- tational Linguistics, 19:263-311.",
786
- "links": null
787
- },
788
- "BIBREF2": {
789
- "ref_id": "b2",
790
- "title": "Aligning sentences in bilingual corpora using lexical information",
791
- "authors": [
792
- {
793
- "first": "S",
794
- "middle": [
795
- "F"
796
- ],
797
- "last": "Chen",
798
- "suffix": ""
799
- }
800
- ],
801
- "year": 1993,
802
- "venue": "Proceedings of the 31th Annual Meeting of the Association for Computational Linguistics",
803
- "volume": "",
804
- "issue": "",
805
- "pages": "9--16",
806
- "other_ids": {},
807
- "num": null,
808
- "urls": [],
809
- "raw_text": "S. F. Chen. 1993. Aligning sentences in bilingual corpora using lexical information. In Proceedings of the 31th Annual Meeting of the Association for Computational Linguistics, pages 9-16, Colum- bus, Ohio.",
810
- "links": null
811
- },
812
- "BIBREF3": {
813
- "ref_id": "b3",
814
- "title": "Cedict (chinese-english dictionary) project",
815
- "authors": [
816
- {
817
- "first": "Paul",
818
- "middle": [],
819
- "last": "Denisowski",
820
- "suffix": ""
821
- }
822
- ],
823
- "year": 1999,
824
- "venue": "",
825
- "volume": "",
826
- "issue": "",
827
- "pages": "",
828
- "other_ids": {},
829
- "num": null,
830
- "urls": [],
831
- "raw_text": "Paul Denisowski. 1999. Cedict (chinese-english dic- tionary) project, http://www.mindspring.com/ paul_denisowski/cedict.html.",
832
- "links": null
833
- },
834
- "BIBREF4": {
835
- "ref_id": "b4",
836
- "title": "A program for aligning sentences in bilingual corpora",
837
- "authors": [
838
- {
839
- "first": "A",
840
- "middle": [],
841
- "last": "William",
842
- "suffix": ""
843
- },
844
- {
845
- "first": "Kenneth",
846
- "middle": [
847
- "W"
848
- ],
849
- "last": "Gale",
850
- "suffix": ""
851
- },
852
- {
853
- "first": "",
854
- "middle": [],
855
- "last": "Church",
856
- "suffix": ""
857
- }
858
- ],
859
- "year": 1991,
860
- "venue": "Proceedings of the 29th Annual Meeting of the Association for Computational Linguistics",
861
- "volume": "",
862
- "issue": "",
863
- "pages": "177--184",
864
- "other_ids": {},
865
- "num": null,
866
- "urls": [],
867
- "raw_text": "William A. Gale and Kenneth W. Church. 1991. A program for aligning sentences in bilingual cor- pora. In Proceedings of the 29th Annual Meeting of the Association for Computational Linguistics, pages 177-184, Berkeley, Calif.",
868
- "links": null
869
- },
870
- "BIBREF5": {
871
- "ref_id": "b5",
872
- "title": "SILC: un syst~me d'identification de la langue",
873
- "authors": [
874
- {
875
- "first": "P",
876
- "middle": [],
877
- "last": "Isabelle",
878
- "suffix": ""
879
- },
880
- {
881
- "first": "G",
882
- "middle": [],
883
- "last": "Foster",
884
- "suffix": ""
885
- },
886
- {
887
- "first": "P",
888
- "middle": [],
889
- "last": "Plamondon",
890
- "suffix": ""
891
- }
892
- ],
893
- "year": 1997,
894
- "venue": "",
895
- "volume": "",
896
- "issue": "",
897
- "pages": "",
898
- "other_ids": {},
899
- "num": null,
900
- "urls": [],
901
- "raw_text": "P. Isabelle, G. Foster, and P. Plamondon. 1997. SILC: un syst~me d'identification de la langue et du codage, http://www- rali.iro.umontreal.ca/ProjetSILC.en.html.",
902
- "links": null
903
- },
904
- "BIBREF6": {
905
- "ref_id": "b6",
906
- "title": "Text-translation alignment",
907
- "authors": [
908
- {
909
- "first": "M",
910
- "middle": [],
911
- "last": "Kay",
912
- "suffix": ""
913
- },
914
- {
915
- "first": "M",
916
- "middle": [],
917
- "last": "Rsscheisen",
918
- "suffix": ""
919
- }
920
- ],
921
- "year": 1993,
922
- "venue": "Computational Linguistics",
923
- "volume": "19",
924
- "issue": "",
925
- "pages": "121--142",
926
- "other_ids": {},
927
- "num": null,
928
- "urls": [],
929
- "raw_text": "M. Kay and M. RSscheisen. 1993. Text-translation alignment. Computational Linguistics, 19:121- 142.",
930
- "links": null
931
- },
932
- "BIBREF7": {
933
- "ref_id": "b7",
934
- "title": "English-chinese cross-language retrieval based on a translation package",
935
- "authors": [
936
- {
937
- "first": "K",
938
- "middle": [
939
- "L"
940
- ],
941
- "last": "Kwok",
942
- "suffix": ""
943
- }
944
- ],
945
- "year": 1999,
946
- "venue": "Workshop of Machine Translation for Cross Language Information Retrieval, Machine Translation Summit VII",
947
- "volume": "",
948
- "issue": "",
949
- "pages": "",
950
- "other_ids": {},
951
- "num": null,
952
- "urls": [],
953
- "raw_text": "K. L. Kwok. 1999. English-chinese cross-language retrieval based on a translation package. In Work- shop of Machine Translation for Cross Language Information Retrieval, Machine Translation Sum- mit VII, Singapore.",
954
- "links": null
955
- },
956
- "BIBREF8": {
957
- "ref_id": "b8",
958
- "title": "Unit completion for a computer-aided translation typing system",
959
- "authors": [
960
- {
961
- "first": "P",
962
- "middle": [],
963
- "last": "Langlais",
964
- "suffix": ""
965
- },
966
- {
967
- "first": "G",
968
- "middle": [],
969
- "last": "Foster",
970
- "suffix": ""
971
- },
972
- {
973
- "first": "G",
974
- "middle": [],
975
- "last": "Lapalme",
976
- "suffix": ""
977
- }
978
- ],
979
- "year": 2000,
980
- "venue": "Applied Natural Language Processing Conference (ANLP)",
981
- "volume": "",
982
- "issue": "",
983
- "pages": "",
984
- "other_ids": {},
985
- "num": null,
986
- "urls": [],
987
- "raw_text": "P. Langlais, G. Foster, and G. Lapalme. 2000. Unit completion for a computer-aided translation typ- ing system. In Applied Natural Language Pro- cessing Conference (ANLP), Seattle, Washington, May.",
988
- "links": null
989
- },
990
- "BIBREF9": {
991
- "ref_id": "b9",
992
- "title": "Cross-language information retrieval based on parallel texts and automatic mining parallel texts from the Web",
993
- "authors": [
994
- {
995
- "first": "Jianyun",
996
- "middle": [],
997
- "last": "Nie",
998
- "suffix": ""
999
- },
1000
- {
1001
- "first": "Michel",
1002
- "middle": [],
1003
- "last": "Simard",
1004
- "suffix": ""
1005
- },
1006
- {
1007
- "first": "Pierre",
1008
- "middle": [],
1009
- "last": "Isabelle",
1010
- "suffix": ""
1011
- },
1012
- {
1013
- "first": "Richard",
1014
- "middle": [],
1015
- "last": "Durand",
1016
- "suffix": ""
1017
- }
1018
- ],
1019
- "year": 1999,
1020
- "venue": "ACM SIGIR '99",
1021
- "volume": "",
1022
- "issue": "",
1023
- "pages": "74--81",
1024
- "other_ids": {},
1025
- "num": null,
1026
- "urls": [],
1027
- "raw_text": "Jianyun Nie, Michel Simard, Pierre Isabelle, and Richard Durand. 1999. Cross-language informa- tion retrieval based on parallel texts and auto- matic mining parallel texts from the Web. In ACM SIGIR '99, pages 74-81, August.",
1028
- "links": null
1029
- },
1030
- "BIBREF10": {
1031
- "ref_id": "b10",
1032
- "title": "Parallel stands: A preliminary investigation into mining the Web for bilingual text",
1033
- "authors": [
1034
- {
1035
- "first": "Philip",
1036
- "middle": [],
1037
- "last": "Resnik",
1038
- "suffix": ""
1039
- }
1040
- ],
1041
- "year": 1998,
1042
- "venue": "AMTA '98",
1043
- "volume": "",
1044
- "issue": "",
1045
- "pages": "",
1046
- "other_ids": {},
1047
- "num": null,
1048
- "urls": [],
1049
- "raw_text": "Philip Resnik. 1998. Parallel stands: A preliminary investigation into mining the Web for bilingual text. In AMTA '98, October.",
1050
- "links": null
1051
- },
1052
- "BIBREF11": {
1053
- "ref_id": "b11",
1054
- "title": "Using cognates to align sentences in bilingual corpora",
1055
- "authors": [
1056
- {
1057
- "first": "Michel",
1058
- "middle": [],
1059
- "last": "Simard",
1060
- "suffix": ""
1061
- },
1062
- {
1063
- "first": "George",
1064
- "middle": [
1065
- "F"
1066
- ],
1067
- "last": "Foster",
1068
- "suffix": ""
1069
- },
1070
- {
1071
- "first": "Pierre",
1072
- "middle": [],
1073
- "last": "Isabelle",
1074
- "suffix": ""
1075
- }
1076
- ],
1077
- "year": 1992,
1078
- "venue": "Proceedings of TMI-92",
1079
- "volume": "",
1080
- "issue": "",
1081
- "pages": "",
1082
- "other_ids": {},
1083
- "num": null,
1084
- "urls": [],
1085
- "raw_text": "Michel Simard, George F. Foster, and Pierre Is- abelle. 1992. Using cognates to align sentences in bilingual corpora. In Proceedings of TMI-92, Montreal, Quebec.",
1086
- "links": null
1087
- },
1088
- "BIBREF12": {
1089
- "ref_id": "b12",
1090
- "title": "Aligning a parallel English-Chinese corpus statistically with lexical criteria",
1091
- "authors": [
1092
- {
1093
- "first": "Dekai",
1094
- "middle": [],
1095
- "last": "Wu",
1096
- "suffix": ""
1097
- }
1098
- ],
1099
- "year": 1994,
1100
- "venue": "",
1101
- "volume": "",
1102
- "issue": "",
1103
- "pages": "",
1104
- "other_ids": {},
1105
- "num": null,
1106
- "urls": [],
1107
- "raw_text": "Dekai Wu. 1994. Aligning a parallel English- Chinese corpus statistically with lexical criteria.",
1108
- "links": null
1109
- },
1110
- "BIBREF13": {
1111
- "ref_id": "b13",
1112
- "title": "ACL-9$: 32nd Annual Meeting of the Assoc. for Computational Linguistics",
1113
- "authors": [],
1114
- "year": null,
1115
- "venue": "",
1116
- "volume": "",
1117
- "issue": "",
1118
- "pages": "80--87",
1119
- "other_ids": {},
1120
- "num": null,
1121
- "urls": [],
1122
- "raw_text": "In ACL-9$: 32nd Annual Meeting of the Assoc. for Computational Linguistics, pages 80-87, Las Cruces, NM, June.",
1123
- "links": null
1124
- },
1125
- "BIBREF14": {
1126
- "ref_id": "b14",
1127
- "title": "Large-scale automatic extraction of an English-Chinese lexicon",
1128
- "authors": [
1129
- {
1130
- "first": "Dekai",
1131
- "middle": [],
1132
- "last": "Wu",
1133
- "suffix": ""
1134
- }
1135
- ],
1136
- "year": 1995,
1137
- "venue": "Machine Translation",
1138
- "volume": "9",
1139
- "issue": "3-4",
1140
- "pages": "285--313",
1141
- "other_ids": {},
1142
- "num": null,
1143
- "urls": [],
1144
- "raw_text": "Dekai Wu. 1995. Large-scale automatic extraction of an English-Chinese lexicon. Machine Transla- tion, 9(3-4):285-313.",
1145
- "links": null
1146
- }
1147
- },
1148
- "ref_entries": {
1149
- "FIGREF0": {
1150
- "type_str": "figure",
1151
- "text": "The workflow of the mining process.",
1152
- "uris": null,
1153
- "num": null
1154
- },
1155
- "FIGREF1": {
1156
- "type_str": "figure",
1157
- "text": "An alignment example using pure length-based method.",
1158
- "uris": null,
1159
- "num": null
1160
- },
1161
- "FIGREF2": {
1162
- "type_str": "figure",
1163
- "text": "An alignment example considering cognates.",
1164
- "uris": null,
1165
- "num": null
1166
- },
1167
- "FIGREF3": {
1168
- "type_str": "figure",
1169
- "text": "Effect of stop lists in C-E translation.",
1170
- "uris": null,
1171
- "num": null
1172
- },
1173
- "FIGREF4": {
1174
- "type_str": "figure",
1175
- "text": "The training source and the CLIR collections were from different domains. The Web corpus are retrieved from the parallel sites in Hong Kong while the Chinese collection is from People's Daily and Xinhua News Agency, which are published in mainland China. As the result, some important terms such as ~$ $ (mostfavored-nation) and ---I!! ~ ~ (one-nation-twosystems) in the collection are not known by the model.",
1176
- "uris": null,
1177
- "num": null
1178
- }
1179
- }
1180
- }
1181
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1005.json DELETED
@@ -1,993 +0,0 @@
1
- {
2
- "paper_id": "A00-1005",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:44.052213Z"
6
- },
7
- "title": "PartslD: A Dialogue-Based System for Identifying Parts for Medical Systems",
8
- "authors": [
9
- {
10
- "first": "Amit",
11
- "middle": [],
12
- "last": "Bagga",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "[email protected]"
16
- },
17
- {
18
- "first": "Tomek",
19
- "middle": [],
20
- "last": "Strzalkowski",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": "[email protected]"
24
- },
25
- {
26
- "first": "G",
27
- "middle": [],
28
- "last": "Bowden Wise",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- }
33
- ],
34
- "year": "",
35
- "venue": null,
36
- "identifiers": {},
37
- "abstract": "This paper describes a system that provides customer service by allowing users to retrieve identification numbers of parts for medical systems using spoken natural language dialogue. The paper also presents an evaluation of the system which shows that the system successfully retrieves the identification numbers of approximately 80% of the parts.",
38
- "pdf_parse": {
39
- "paper_id": "A00-1005",
40
- "_pdf_hash": "",
41
- "abstract": [
42
- {
43
- "text": "This paper describes a system that provides customer service by allowing users to retrieve identification numbers of parts for medical systems using spoken natural language dialogue. The paper also presents an evaluation of the system which shows that the system successfully retrieves the identification numbers of approximately 80% of the parts.",
44
- "cite_spans": [],
45
- "ref_spans": [],
46
- "eq_spans": [],
47
- "section": "Abstract",
48
- "sec_num": null
49
- }
50
- ],
51
- "body_text": [
52
- {
53
- "text": "Currently people deal with customer service centers either over the phone or on the world wide web on a regular basis. These service centers support a wide variety of tasks including checking the balance of a bank or a credit card account, transferring money from one account to another, buying airline tickets, and filing one's income tax returns. Most of these customer service centers use interactive voice response (IVR) systems on the front-end for determining the user's need by providing a list of options that the user can choose from, and then routing the call appropriately.",
54
- "cite_spans": [],
55
- "ref_spans": [],
56
- "eq_spans": [],
57
- "section": "Introduction",
58
- "sec_num": null
59
- },
60
- {
61
- "text": "The IVRs also gather essential information like the user's bank account number, social security number, etc. For back-end support, the customer service centers use either specialized computer systems (example: a system that retrieves the account balance from a database), or, as in most cases, human operators.",
62
- "cite_spans": [],
63
- "ref_spans": [],
64
- "eq_spans": [],
65
- "section": "Introduction",
66
- "sec_num": null
67
- },
68
- {
69
- "text": "However, the IVR systems are unwieldy to use. Often a user's needs are not covered by the options provided by the system forcing the user to hit 0 to transfer to a human operator. In addition, frequent users often memorize the sequence of options that will get them the desired information. Therefore, any change in the options greatly inconveniences these users. Moreover, there are users that always hit 0 to speak to a live operator because they prefer to deal with a human instead of a machine. Finally, as customer service providers continue to rapidly add functionality to their IVR systems, the size and complexity of these systems continues to grow proportionally. In some popular systems like the IVR system that provides customer service for the Internal Revenue Service (IRS), the user is initially bombarded with 10 different options with each option leading to sub-menus offering a further 3-5 options, and so on. The total number of nodes in the tree corresponding to the IRS' IVR system is quite large (approximately 100) making it extremely complex to use.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Introduction",
74
- "sec_num": null
75
- },
76
- {
77
- "text": "Some customer service providers have started to take advantage of the recent advances in speech recognition technology. Therefore, some of the IVR systems now allow users to say the option number (1, 2, 3 ..... etc.) instead of pressing the corresponding button. In addition, some providers have taken this a step further by allowing users to say a keyword or a phrase from a list of keywords and/or phrases. For example, AT&T, the long distance company, provides their users the following options: \"Please say information for information on placing a call, credit for requesting credit, or operator to speak to an operator.\" However, given the improved speech recognition technology, and the research done in natural language dialogue over the last decade, there exists tremendous potential in enhancing these customer service centers by allowing users to conduct a more natural human-like dialogue with an automated system to provide a customer-friendly system. In this paper we describe a system that uses natural language dialogue to provide customer service for a medical domain.",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": null
83
- },
84
- {
85
- "text": "The system allows field engineers to call and obtain identification numbers of parts for medical systems using natural language dialogue. We first describe some work done previously in using natural language dialogue for customer service applications. Next, we present the architecture of our system along with a description of each of the key components. Finally, we conclude by providing results from an evaluation of the system.",
86
- "cite_spans": [],
87
- "ref_spans": [],
88
- "eq_spans": [],
89
- "section": "Introduction",
90
- "sec_num": null
91
- },
92
- {
93
- "text": "As mentioned earlier, some customer service centers now allow users to say either the option number or a keyword from a list of options/descriptions. However, the only known work which automates part of a customer service center using natural language dialogue is the one by Chu-Carroll and Carpenter (1999). The system described here is used as the front-end of a bank's customer service center. It routes calls by extracting key phrases from a user utterance and then by statistically comparing these phrases to phrases extracted from utterances in a training corpus consisting of pre-recorded calls where the routing was done by a human. The call is routed to the destination of the utterance from the training corpus that is most \"similar\" to the current utterance. On occasion, the system will interact with the user to clarify the user's request by asking a question. For example, if the user wishes to reach the loan department, the system will ask if the loan is for an automobile, or a home. Other related work is (Georgila et al., 1998) .",
94
- "cite_spans": [
95
- {
96
- "start": 1023,
97
- "end": 1046,
98
- "text": "(Georgila et al., 1998)",
99
- "ref_id": "BIBREF7"
100
- }
101
- ],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "Previous Work",
105
- "sec_num": "1."
106
- },
107
- {
108
- "text": "While we are aware of the work being done by speech recognition companies like Nuance (www.nuance.com) and Speechworks (www.speechworks.com) in the area of providing more natural language dialogue-based customer service, we are not aware of any conference or journal publications from them. Some magazine articles which mention their work are (Rosen 1999; Rossheim 1999; Greenemeier 1999 ; Meisel 1999 ). In addition, when we tried out a demo of Nuance's systems, we found that their systems had a very IVRish feel to them. For example, if one wanted to transfer $50 from one account to another, the system would first ask the account that the money was coming from, then the account that the money was going to, and finally, the amount to be transferred. Therefore, a user could not say \"I want to transfer $50 from my savings account to my checking account\" and have the system conduct that transaction.",
109
- "cite_spans": [
110
- {
111
- "start": 343,
112
- "end": 355,
113
- "text": "(Rosen 1999;",
114
- "ref_id": "BIBREF12"
115
- },
116
- {
117
- "start": 356,
118
- "end": 370,
119
- "text": "Rossheim 1999;",
120
- "ref_id": "BIBREF13"
121
- },
122
- {
123
- "start": 371,
124
- "end": 389,
125
- "text": "Greenemeier 1999 ;",
126
- "ref_id": "BIBREF9"
127
- },
128
- {
129
- "start": 390,
130
- "end": 401,
131
- "text": "Meisel 1999",
132
- "ref_id": "BIBREF10"
133
- }
134
- ],
135
- "ref_spans": [],
136
- "eq_spans": [],
137
- "section": "Previous Work",
138
- "sec_num": "1."
139
- },
140
- {
141
- "text": "In addition to the works mentioned above, there have been several classic projects in the area of natural language dialogue like TRAINS/TRIPS project at Rochester (Allen et al., 1989 (Allen et al., , 1995 (Allen et al., , 1996 , Duke's Circuit-Fixit-Shoppe and Pascal Tutoring System (Biermann et al., 1997; 1995) , etc. While the Circuit-Fixit-Shoppe system helps users fix a circuit through a dialogue with the system, the TRIPS and the TRAINS projects allow users to plan their itineraries through dialogue. Duke's Pascal tutoring system helps students in an introductory programming class debug their programs by allowing them to analyze their syntax errors, get additional information on the error, and learn the correct syntax. Although these systems have been quite successful, they use detailed models of the domain and therefore cannot be used for diverse applications such as the ones required for customer service centers. Other related work on dialogue include (Carberry, 1990; Grosz and Sidner, 1986; Reichman, 1981) .",
142
- "cite_spans": [
143
- {
144
- "start": 163,
145
- "end": 182,
146
- "text": "(Allen et al., 1989",
147
- "ref_id": null
148
- },
149
- {
150
- "start": 183,
151
- "end": 204,
152
- "text": "(Allen et al., , 1995",
153
- "ref_id": "BIBREF0"
154
- },
155
- {
156
- "start": 205,
157
- "end": 226,
158
- "text": "(Allen et al., , 1996",
159
- "ref_id": "BIBREF1"
160
- },
161
- {
162
- "start": 284,
163
- "end": 307,
164
- "text": "(Biermann et al., 1997;",
165
- "ref_id": "BIBREF4"
166
- },
167
- {
168
- "start": 308,
169
- "end": 313,
170
- "text": "1995)",
171
- "ref_id": "BIBREF0"
172
- },
173
- {
174
- "start": 973,
175
- "end": 989,
176
- "text": "(Carberry, 1990;",
177
- "ref_id": "BIBREF5"
178
- },
179
- {
180
- "start": 990,
181
- "end": 1013,
182
- "text": "Grosz and Sidner, 1986;",
183
- "ref_id": "BIBREF8"
184
- },
185
- {
186
- "start": 1014,
187
- "end": 1029,
188
- "text": "Reichman, 1981)",
189
- "ref_id": "BIBREF11"
190
- }
191
- ],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "Previous Work",
195
- "sec_num": "1."
196
- },
197
- {
198
- "text": "Initially, we were approached by the medical systems business of our company for help in reducing the number of calls handled by human operators at their call center. An analysis of the types of customer service provided by their call center showed that a large volume of calls handled by their operators were placed by field engineers requesting identification numbers of parts for various medical systems. The ID numbers were most often used for ordering the corresponding parts using an automated IVR system. Therefore, the system we have built helps automate some percentage of these calls by allowing the engineer to describe a part using natural language. The rest of this section describes our system in detail.",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "PartslD: A System for Identification of Parts for Medical Systems",
203
- "sec_num": "2."
204
- },
205
- {
206
- "text": "The database we used for our system was the same as the one used by the operators at the call center. This database consists of the most common parts and was built by the operators themselves. However, the data contained in the database is not clean and there are several types of errors including mis-spellings, use of nonstandard abbreviations, use of several different abbreviations for the same word, etc.",
207
- "cite_spans": [],
208
- "ref_spans": [],
209
- "eq_spans": [],
210
- "section": "D a t a",
211
- "sec_num": "2.1"
212
- },
213
- {
214
- "text": "The database consists of approximately 7000 different parts. For each part, the database contains its identification number, a description, and the product (machine type) that it is used in. The descriptions consist of approximately 60,000 unique words of which approximately 3,000 are words which either are non-standard abbreviations or are unique to the medical domain (example: collimator).",
215
- "cite_spans": [],
216
- "ref_spans": [],
217
- "eq_spans": [],
218
- "section": "D a t a",
219
- "sec_num": "2.1"
220
- },
221
- {
222
- "text": "Due to the large size of the database, we did not attempt to clean the data. However, we did build several data structures based on the database which were used by the system. The primary data structures built were two inverted hash tables corresponding to the product, and the part description fields in the database. The inverted hash tables were built as follows: 1)",
223
- "cite_spans": [],
224
- "ref_spans": [],
225
- "eq_spans": [],
226
- "section": "D a t a",
227
- "sec_num": "2.1"
228
- },
229
- {
230
- "text": "Each product and part description field was split into words.",
231
- "cite_spans": [],
232
- "ref_spans": [],
233
- "eq_spans": [],
234
- "section": "D a t a",
235
- "sec_num": "2.1"
236
- },
237
- {
238
- "text": "2) Stop-words (words containing no information like: a, the, an, etc.) were filtered.",
239
- "cite_spans": [],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "D a t a",
243
- "sec_num": "2.1"
244
- },
245
- {
246
- "text": "Each remaining word was inserted as the index of the appropriate hash table with the identification number of the part being the value corresponding to the index. Therefore, for each non-stop-word word used in describing a part, the hash table contains a list of all the parts whose descriptions contained that word.",
247
- "cite_spans": [],
248
- "ref_spans": [],
249
- "eq_spans": [],
250
- "section": "3)",
251
- "sec_num": null
252
- },
253
- {
254
- "text": "Similarly, the products hash table contains a list of all parts corresponding to each product word.",
255
- "cite_spans": [],
256
- "ref_spans": [],
257
- "eq_spans": [],
258
- "section": "3)",
259
- "sec_num": null
260
- },
261
- {
262
- "text": "The architecture of the system is shown in Figure 1 . The system was designed in a manner such that it could be easily ported from one application to another with minimal effort other than providing the domain-specific knowledge regarding the new application. Therefore, we decided to abstract away the domain-specific information into self-contained modules while keeping the other modules completely independent. The domain-specific modules are shown in the dark shaded boxes in Figure I . The remainder of this section discusses each of the modules shown in the system architecture.",
263
- "cite_spans": [],
264
- "ref_spans": [
265
- {
266
- "start": 43,
267
- "end": 51,
268
- "text": "Figure 1",
269
- "ref_id": null
270
- },
271
- {
272
- "start": 481,
273
- "end": 489,
274
- "text": "Figure I",
275
- "ref_id": null
276
- }
277
- ],
278
- "eq_spans": [],
279
- "section": "S y s t e m Architecture",
280
- "sec_num": "2.2"
281
- },
282
- {
283
- "text": "Since customer service centers are meant to be used by a variety of users, we needed a userindependent speech recognition system. In addition, since the system could not restrict the manner in which a user asked for service, the speech recognition system could not be grammar-based. Therefore, we used a general purpose dictation engine for the system. The dictation system used was Lernout & Hauspie's VoiceXPress system (www.lhs.com). Although the system was general purpose, we did provide to it the set of keywords and phrases that are commonly used in the domain thereby enabling it to better recognize these domain-specific keywords and phrases. The keywords and phrases used were simply the list of descriptions and product names corresponding to each part in the database. It should be noted that the set of domain-specific keywords and phrases was provided to the speech recognition system as a text document. In other words, the training was not done by a human speaking the keywords and phrases into the speech recognition system. In addition, the speech recognition system is far from perfect.",
284
- "cite_spans": [],
285
- "ref_spans": [],
286
- "eq_spans": [],
287
- "section": "The Speech Recognition System (ASR)",
288
- "sec_num": "2.2.1"
289
- },
290
- {
291
- "text": "The recognition rates hover around 50%, and the system has additional difficulty in identifying product names which are most often words not found in a dictionary (examples: 3MlaserCam, 8000BUCKY, etc.).",
292
- "cite_spans": [],
293
- "ref_spans": [],
294
- "eq_spans": [],
295
- "section": "The Speech Recognition System (ASR)",
296
- "sec_num": "2.2.1"
297
- },
298
- {
299
- "text": "The parser is domain-driven in the sense that it uses domain-dependent information produced by the lexicon to look for information, in a user utterance, that is useful in the current domain. However, it does not attempt to understand fully each user utterance. It is robust enough to handle ungrammatical sentences, short phrases, and sentences that contain mis-recognized text.",
300
- "cite_spans": [],
301
- "ref_spans": [],
302
- "eq_spans": [],
303
- "section": "Parser and the Lexicon",
304
- "sec_num": "2.2.2"
305
- },
306
- {
307
- "text": "The lexicon, in addition to providing domain-dependent keywords and phrases to the parser, also provides the semantic knowledge associated with each keyword and phrase. Therefore, for each content word in the inverted hash tables, the lexicon contains entries which help the system determine whether the word was used in a part description, or a product name. In addition, the lexicon also provides the semantic knowledge associated with the pre-specified actions which can be taken by the user like \"operator\" which allows the user to transfer to an operator, and \"stop,\" or \"quit\" which allow the user to quit the system. Some sample entries are:",
308
- "cite_spans": [],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "Parser and the Lexicon",
312
- "sec_num": "2.2.2"
313
- },
314
- {
315
- "text": "collimator => (description_word, collimator) camera => (product_word, camera) operator => (user action, operator) etc.",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "Parser and the Lexicon",
320
- "sec_num": "2.2.2"
321
- },
322
- {
323
- "text": "The parser scans a user utterance and returns, as output, a list of semantic tuples associated with each keyword/phrase contained in the utterance. It is mainly interested in \"key words\" (words that are contained in product and part descriptions, user action words, etc.) and it ignores all the other words in the user utterance. The parser also returns a special tuple containing the entire input string which may be used later by the context-based parser for sub-string matching specially in cases when the DM has asked a specific question to the user and is expecting a particular kind of response.",
324
- "cite_spans": [],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "Parser and the Lexicon",
328
- "sec_num": "2.2.2"
329
- },
330
- {
331
- "text": "The filler takes as input the set of tuples generated by the parser and attempts to check off templates contained in the templates module using these tuples, The set of templates in the templates module contains most of remaining domain-specific knowledge required by the system.",
332
- "cite_spans": [],
333
- "ref_spans": [],
334
- "eq_spans": [],
335
- "section": "The Filler and Template Modules",
336
- "sec_num": "2.2.3"
337
- },
338
- {
339
- "text": "Each template is an internal representation of a part in the database. It contains for each part, its ID, its description, and the product which contains it. In addition, there are several additional templates corresponding to pre-specified user actions like \"operator,\" and \"quit.\" A sample template follows:",
340
- "cite_spans": [],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "The Filler and Template Modules",
344
- "sec_num": "2.2.3"
345
- },
346
- {
347
- "text": "For each tuple input from the parser, the filler checks off the fields which correspond to the tuple. For example, if the filler gets as input (description_word, collimator) , it checks off the description fields of those templates containing collimator as a word in the field. A template is checked off iff one or more of its fields is checked off.",
348
- "cite_spans": [
349
- {
350
- "start": 143,
351
- "end": 173,
352
- "text": "(description_word, collimator)",
353
- "ref_id": null
354
- }
355
- ],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
359
- "sec_num": null
360
- },
361
- {
362
- "text": "In addition, the filler also maintains a list of all description and product words passed through the tuples (i.e. these words have been uttered by the user). These two lists are subsequently passed to the dialogue manager.",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
367
- "sec_num": null
368
- },
369
- {
370
- "text": "Although the filler does not appear to be very helpful for the current application domain, it is an important part of the architecture for other application domains. For example, the current PartslD system is a descendant from an earlier system which allowed users to process financial transactions where the filler was instrumental in helping the dialogue manager determine the type of transaction being carried out by the user (Bagga et al., 2000) .",
371
- "cite_spans": [
372
- {
373
- "start": 429,
374
- "end": 449,
375
- "text": "(Bagga et al., 2000)",
376
- "ref_id": "BIBREF2"
377
- }
378
- ],
379
- "ref_spans": [],
380
- "eq_spans": [],
381
- "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
382
- "sec_num": null
383
- },
384
- {
385
- "text": "The DM receives as input from the filler the set of templates which are checked off. In addition, it also receives two lists containing the list of description words, and product word uttered by the user. The DM proceeds using the following algorithm: 1) It first checks the set of checked off templates input from the filler. If there is exactly one template in this set, the DM asks the user to confirm the part that the template corresponds to. Upon receipt of the confirmation from the user, it returns the identification number of the part to the user. 2) Otherwise, for each description word uttered by the user, the DM looks up the set of parts (or templates) containing the word from the descriptions inverted hash table. It then computes the intersection of these sets. If the intersection is empty, the DM computes the union of these sets and proceeds treating the union as the intersection. 3) If the intersection obtained from (2) above contains exactly one template, the DM asks the user to confirm the part corresponding to the template as in (1) above. 4) Otherwise, the DM looks at the set of product words uttered by the user. If this set is empty, the DM queries the user for the product name. Since the DM is expecting a product name here, the input provided by the user is handled by the context-based parser.",
386
- "cite_spans": [],
387
- "ref_spans": [],
388
- "eq_spans": [],
389
- "section": "The Dialogue Manager (DM)",
390
- "sec_num": "2.2.4"
391
- },
392
- {
393
- "text": "Since most product names consist of nonstandard words consisting of alpha-numeric characters (examples: AMX3, 8000BUCKY, etc.), the recognition quality is quite poor. Therefore, the context-based parser ranks the input received from the user using a sub-string matching algorithm that uses character-based unigram and bigram counts (details are provided in the next section). The sub-string matching algorithm greatly enhances the performance of the system (as shown in the sample dialogue below). 5) If the set of product words is non-empty, or if the DM has successfully queried the user for a product name, it extracts the set of parts (templates) containing each product word from the product words inverted hash table. It then computes an intersection of these sets with the intersection set of description words obtained from (2) above. The resulting intersection is the joint product and description intersection. 6) If the joint intersection has exactly one template, the DM proceeds as in (1) above. Alternatively, if the number of templates in the joint intersection is less than 4, the DM lists the parts corresponding to each of these and asks the user to confirm the correct one. 7) If there are more than 4 templates in the joint intersection, the DM ranks the templates based upon word overlap with the description words uttered by the user. If the number of resulting top-ranked templates is less than 4, the DM proceeds as in the second half of (6) above. 8) If the joint intersection is empty, or in the highly unlikely case of there being more than 4 top-ranked templates in (7), the DM asks the user to enter additional disambiguating information.",
394
- "cite_spans": [],
395
- "ref_spans": [],
396
- "eq_spans": [],
397
- "section": "The Dialogue Manager (DM)",
398
- "sec_num": "2.2.4"
399
- },
400
- {
401
- "text": "The goal of the DM is to hone in on the part (template) desired by the user, and it has to determine this from the set of templates input to it by the filler. It has to be robust enough to deal with poor recognition quality, inadequate information input by the user, and ambiguous data. Therefore, the DM is designed to handle these issues. For example, description words that are mis-recognized as other description words usually cause the intersection of the sets of parts corresponding to these words to be empty. The DM, in this case, takes a union of the sets of parts corresponding to the description words thereby ensuring that the template corresponding to the desired part is in the union.",
402
- "cite_spans": [],
403
- "ref_spans": [],
404
- "eq_spans": [],
405
- "section": "The Dialogue Manager (DM)",
406
- "sec_num": "2.2.4"
407
- },
408
- {
409
- "text": "The DM navigates the space of possibilities by first analyzing the intersection of the sets of parts corresponding to the description words uttered by the user. If no unique part emerges, the DM then checks to see if the user has provided any information about the product that the part is going to be used in. If no product was mentioned by the user, the DM queries the user for the product name. Once this is obtained, the DM then checks to see if a unique part corresponds to the product name and the part description provided by the user. If no unique part emerges, then the DM backs off and asks the user to re-enter the part description. Alternatively, if more than one part corresponds to the specified product and part description, then the DM ranks the parts based upon the number of words uttered by the user. Obviously, since the DM in this case uses a heuristic, it asks the user to confirm the part that ranks the highest. If more than one (although less than 4) parts have the same rank, then the DM explicitly lists these parts and asks the user to specify the desired part. It should be noted that the DM has to ensure that the information it receives is actually what the user meant. This is especially true when the DM uses heuristics, and sub-string matches (as in the case of product names). Therefore, the DM occasionally asks the user to confirm input it has received.",
410
- "cite_spans": [],
411
- "ref_spans": [],
412
- "eq_spans": [],
413
- "section": "The Dialogue Manager (DM)",
414
- "sec_num": "2.2.4"
415
- },
416
- {
417
- "text": "When the dialogue manager is expecting a certain type of input (examples : product names, yes/no responses) from the user, the user response is processed by the context-based parser. Since the type of input is known, the context-based parser uses a sub-string matching algorithm that uses character-based unigram and bigram counts to match the user input with the expectation of the dialogue manager. Therefore, the sub-string matching module takes as input a user utterance string along with a list of expected responses, and it ranks the list of expected responses based upon the user response. Listed below are the details of the algorithm : 1) The algorithm first concatenates the words of the user utterance into one long string. This is needed because the speech recognition system often breaks up the utterance into words even though a single word is being said. For example, the product name AMXll0 is often broken up into the string 'Amex 110'.",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "The Sub-String Matching Algorithm",
422
- "sec_num": "2.2.5"
423
- },
424
- {
425
- "text": "2) Next, the algorithm goes through the string formed in (1) and compares this character by character with the list of expected responses. It assigns one point for every common character. Therefore, the expected response 'AMX3' gets three points for the utterance 'Amex110'.",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "The Sub-String Matching Algorithm",
430
- "sec_num": "2.2.5"
431
- },
432
- {
433
- "text": "3) The algorithm then compares the user utterance with the list of expected responses using 2 characters (bigrams) at a time. It assigns 2 points for each bigram match. For the example shown in (2), there are two bigram matches: the first is that the utterance starts with an 'A' (the previous character is this case is the null character), and the second is the bigram 'AM'. 4) The algorithm now compares the length of the user utterance string and the expected response. If the length of the two strings is the same, then it assigns 2 points to the expected response. 5) Finally, the algorithm calculates the number of unique characters in the expected response, and the user utterance string. If these characters are the same, then it assigns 4 points to the expected response. The expected response which has the highest number of points is the most likely one. If two or more expected responses have the same number of points, then the system asks the user to confh'm the correct one.",
434
- "cite_spans": [],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "The Sub-String Matching Algorithm",
438
- "sec_num": "2.2.5"
439
- },
440
- {
441
- "text": "While we have not evaluated this substring matching algorithm independently, a brief evaluation in the context of the system resulted in about 90% accuracy.",
442
- "cite_spans": [],
443
- "ref_spans": [],
444
- "eq_spans": [],
445
- "section": "The Sub-String Matching Algorithm",
446
- "sec_num": "2.2.5"
447
- },
448
- {
449
- "text": "The presentation module works in one of two possible modes: over the phone, and over the web.",
450
- "cite_spans": [],
451
- "ref_spans": [],
452
- "eq_spans": [],
453
- "section": "The Presentation Module",
454
- "sec_num": "2.2.6"
455
- },
456
- {
457
- "text": "This module takes as input a string generated by the question-generation module and presents this string to the user in the appropriate mode of communication. If the speech option for the system is turned on, the speech-based output is generated using Lernout and Hauspie's RealSpeak text-to-speech system. Although the system currently cannot use both modes of communication simultaneously, we plan to incorporate this feature sometime in the future.",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "The Presentation Module",
462
- "sec_num": "2.2.6"
463
- },
464
- {
465
- "text": "As with any dialogue system, it is extremely important for the system to be robust. Our system has the following two features which make it extremely robust: 1)",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "7 Robustness of the System",
470
- "sec_num": "2.2."
471
- },
472
- {
473
- "text": "The user can, at any given moment, say operator to transfer to an operator, quit~stop to exit, and back~restart to start afresh.",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "7 Robustness of the System",
478
- "sec_num": "2.2."
479
- },
480
- {
481
- "text": "When expecting a response from the user, if the system does not receive an expected input, it repeats the question at most twice before transferring control to an operator.",
482
- "cite_spans": [],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "2)",
486
- "sec_num": null
487
- },
488
- {
489
- "text": "This section gives examples of two sample dialogues that occur during the testing of the system. The system's response is indicated by \"S>\", the user's response by \"U>\", and the recognition of the user's response by \"V>\". ",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "Sample Dialogues",
494
- "sec_num": "3."
495
- },
496
- {
497
- "text": "The goal of our evaluation was to ensure that the system helped a user successfully identify parts irrespective of the performance of the speech recognition engine for the user. In other words, we wanted to see if the system was robust enough to conduct transactions with a diverse mix of users. We tested the system with 4 different users two of whom had foreign accents. For each user, we randomly selected 20 parts from the database. The results are summarized in Table 1 . These results show that the system was quite successful in handling requests from users with a variety of accents achieving varying recognition rates. Out of the 80 parts tested, only twice did the user feel that he/she had to transfer to an operator. The system successfully retrieved the identification numbers of 79% of the parts while transferring 19% of the cases to a human operator because of extremely bad ",
498
- "cite_spans": [],
499
- "ref_spans": [
500
- {
501
- "start": 467,
502
- "end": 474,
503
- "text": "Table 1",
504
- "ref_id": null
505
- }
506
- ],
507
- "eq_spans": [],
508
- "section": "Evaluation and Results",
509
- "sec_num": "4."
510
- },
511
- {
512
- "text": "In this paper we have described a robust system that provides customer service for a medical parts application. The preliminary results are extremely encouraging with the system being able to successfully process approximately 80% of the requests from users with diverse accents.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "Conclusions",
517
- "sec_num": null
518
- }
519
- ],
520
- "back_matter": [
521
- {
522
- "text": "We wish to thank the GE Medical Systems team of Todd Reinke, Jim Tierney, and Lisa Naughton for providing support and funding for this project. In addition, we also wish to thank Dong Hsu of Lernout and Hauspie for his help on the ASR and the text-to-speech systems. Finally, we wish to thank the Information Technology Laboratory of GE CRD for providing additional funding for this project.",
523
- "cite_spans": [],
524
- "ref_spans": [],
525
- "eq_spans": [],
526
- "section": "Acknowledgements",
527
- "sec_num": null
528
- }
529
- ],
530
- "bib_entries": {
531
- "BIBREF0": {
532
- "ref_id": "b0",
533
- "title": "The TRAINS Project: A case study in building a conversational planning agent",
534
- "authors": [
535
- {
536
- "first": "J",
537
- "middle": [
538
- "F"
539
- ],
540
- "last": "Allen",
541
- "suffix": ""
542
- }
543
- ],
544
- "year": 1995,
545
- "venue": "Journal of Experimental and Theoretical AI",
546
- "volume": "",
547
- "issue": "7",
548
- "pages": "7--48",
549
- "other_ids": {},
550
- "num": null,
551
- "urls": [],
552
- "raw_text": "Allen, J. F. et al. (1995) The TRAINS Project: A case study in building a conversational planning agent. Journal of Experimental and Theoretical AI, (7) 7-48.",
553
- "links": null
554
- },
555
- "BIBREF1": {
556
- "ref_id": "b1",
557
- "title": "A Robust System for Natural Spoken Dialogue",
558
- "authors": [
559
- {
560
- "first": "J",
561
- "middle": [
562
- "F"
563
- ],
564
- "last": "Allen",
565
- "suffix": ""
566
- },
567
- {
568
- "first": "B",
569
- "middle": [
570
- "W"
571
- ],
572
- "last": "Miller",
573
- "suffix": ""
574
- },
575
- {
576
- "first": "E",
577
- "middle": [
578
- "K"
579
- ],
580
- "last": "Ringer",
581
- "suffix": ""
582
- },
583
- {
584
- "first": "T",
585
- "middle": [],
586
- "last": "Sikorski",
587
- "suffix": ""
588
- }
589
- ],
590
- "year": 1996,
591
- "venue": "34th Annual Meeting of the ACL",
592
- "volume": "",
593
- "issue": "",
594
- "pages": "62--70",
595
- "other_ids": {},
596
- "num": null,
597
- "urls": [],
598
- "raw_text": "Allen, J. F., Miller, B. W.; Ringer, E. K.; and Sikorski, T. (1996) A Robust System for Natural Spoken Dialogue. 34th Annual Meeting of the ACL, Santa Cruz, 62-70.",
599
- "links": null
600
- },
601
- "BIBREF2": {
602
- "ref_id": "b2",
603
- "title": "FidelityXPress: A Multi-Modal System for Financial Transactions",
604
- "authors": [
605
- {
606
- "first": "A",
607
- "middle": [],
608
- "last": "Bagga",
609
- "suffix": ""
610
- },
611
- {
612
- "first": "G",
613
- "middle": [
614
- "C"
615
- ],
616
- "last": "Stein",
617
- "suffix": ""
618
- },
619
- {
620
- "first": "T",
621
- "middle": [],
622
- "last": "Strzalkowski",
623
- "suffix": ""
624
- }
625
- ],
626
- "year": 2000,
627
- "venue": "Proceedings of the 6 a~ Conference on Content-Based Multimedia Information Access (RIAO'00)",
628
- "volume": "",
629
- "issue": "",
630
- "pages": "",
631
- "other_ids": {},
632
- "num": null,
633
- "urls": [],
634
- "raw_text": "Bagga, A., Stein G. C., and Strzalkowski, T. (2000) FidelityXPress: A Multi-Modal System for Financial Transactions. Proceedings of the 6 a~ Conference on Content-Based Multimedia Information Access (RIAO'00).",
635
- "links": null
636
- },
637
- "BIBREF3": {
638
- "ref_id": "b3",
639
- "title": "Natural language with discrete speech as a mode for human to machine communication",
640
- "authors": [
641
- {
642
- "first": "A",
643
- "middle": [
644
- "W"
645
- ],
646
- "last": "Biermann",
647
- "suffix": ""
648
- },
649
- {
650
- "first": "R",
651
- "middle": [],
652
- "last": "Rodman",
653
- "suffix": ""
654
- },
655
- {
656
- "first": "D",
657
- "middle": [],
658
- "last": "Rubin",
659
- "suffix": ""
660
- },
661
- {
662
- "first": "J",
663
- "middle": [
664
- "R"
665
- ],
666
- "last": "Heidlage",
667
- "suffix": ""
668
- }
669
- ],
670
- "year": 1985,
671
- "venue": "Communication of the ACM",
672
- "volume": "18",
673
- "issue": "6",
674
- "pages": "628--636",
675
- "other_ids": {},
676
- "num": null,
677
- "urls": [],
678
- "raw_text": "Biermann, A.W.; Rodman, R.; Rubin, D.; and Heidlage, J.R. (1985) Natural language with discrete speech as a mode for human to machine communication. Communication of the ACM 18(6): 628-636.",
679
- "links": null
680
- },
681
- "BIBREF4": {
682
- "ref_id": "b4",
683
- "title": "Goal-orientedMultimedia Dialogue with Variable Initiative",
684
- "authors": [
685
- {
686
- "first": "Alan",
687
- "middle": [
688
- "W"
689
- ],
690
- "last": "Biermann",
691
- "suffix": ""
692
- },
693
- {
694
- "first": "",
695
- "middle": [],
696
- "last": "Guinn",
697
- "suffix": ""
698
- },
699
- {
700
- "first": "I",
701
- "middle": [],
702
- "last": "Curry",
703
- "suffix": ""
704
- },
705
- {
706
- "first": "M",
707
- "middle": [],
708
- "last": "Fulkerson",
709
- "suffix": ""
710
- },
711
- {
712
- "first": "G",
713
- "middle": [
714
- "A"
715
- ],
716
- "last": "Keim",
717
- "suffix": ""
718
- },
719
- {
720
- "first": "Z",
721
- "middle": [],
722
- "last": "Liang",
723
- "suffix": ""
724
- },
725
- {
726
- "first": "D",
727
- "middle": [
728
- "M"
729
- ],
730
- "last": "Melamed",
731
- "suffix": ""
732
- },
733
- {
734
- "first": "K",
735
- "middle": [],
736
- "last": "Rajagopalan",
737
- "suffix": ""
738
- }
739
- ],
740
- "year": null,
741
- "venue": "Lecture Notes in Artificial Intelligence",
742
- "volume": "",
743
- "issue": "",
744
- "pages": "1--16",
745
- "other_ids": {},
746
- "num": null,
747
- "urls": [],
748
- "raw_text": "Biermann, Alan W.; Guinn, Curry I.; Fulkerson, M.: Keim, G.A.; Liang, Z.; Melamed, D.M.; and Rajagopalan, K. (1997) Goal-orientedMultimedia Dialogue with Variable Initiative. Lecture Notes in Artificial Intelligence 1325; Springer-Verlag, New York; pp. 1-16.",
749
- "links": null
750
- },
751
- "BIBREF5": {
752
- "ref_id": "b5",
753
- "title": "Plan Recognition in Natural Language Dialogue",
754
- "authors": [
755
- {
756
- "first": "S",
757
- "middle": [],
758
- "last": "Carberry",
759
- "suffix": ""
760
- }
761
- ],
762
- "year": 1990,
763
- "venue": "",
764
- "volume": "",
765
- "issue": "",
766
- "pages": "",
767
- "other_ids": {},
768
- "num": null,
769
- "urls": [],
770
- "raw_text": "Carberry, S. (1990) Plan Recognition in Natural Language Dialogue. Cambridge, Mass.: The MIT Press.",
771
- "links": null
772
- },
773
- "BIBREF6": {
774
- "ref_id": "b6",
775
- "title": "Vector-Based Natural Language Call Routing",
776
- "authors": [
777
- {
778
- "first": "J",
779
- "middle": [],
780
- "last": "Chu-Carroll",
781
- "suffix": ""
782
- },
783
- {
784
- "first": "R",
785
- "middle": [],
786
- "last": "Carpenter",
787
- "suffix": ""
788
- }
789
- ],
790
- "year": 1999,
791
- "venue": "Journal of Computational Linguistics",
792
- "volume": "25",
793
- "issue": "30",
794
- "pages": "361--388",
795
- "other_ids": {},
796
- "num": null,
797
- "urls": [],
798
- "raw_text": "Chu-Carroll, J, and R. Carpenter. (1999) Vector- Based Natural Language Call Routing. Journal of Computational Linguistics, 25(30), pp. 361-388.",
799
- "links": null
800
- },
801
- "BIBREF7": {
802
- "ref_id": "b7",
803
- "title": "An Integrated Dialogue System for the Automation of Call Centre Services",
804
- "authors": [
805
- {
806
- "first": "K",
807
- "middle": [],
808
- "last": "Georgila",
809
- "suffix": ""
810
- },
811
- {
812
- "first": "A",
813
- "middle": [],
814
- "last": "Tsopanoglou",
815
- "suffix": ""
816
- },
817
- {
818
- "first": "N",
819
- "middle": [],
820
- "last": "Fakotakis",
821
- "suffix": ""
822
- },
823
- {
824
- "first": "G",
825
- "middle": [],
826
- "last": "Kokkinakis",
827
- "suffix": ""
828
- }
829
- ],
830
- "year": 1998,
831
- "venue": "ICLSP'98, 5th International Conference on Spoken Language Processing",
832
- "volume": "",
833
- "issue": "",
834
- "pages": "",
835
- "other_ids": {},
836
- "num": null,
837
- "urls": [],
838
- "raw_text": "Georgila, K., A.Tsopanoglou, N.Fakotakis and G.Kokkinakis. (1998) An Integrated Dialogue System for the Automation of Call Centre Services. ICLSP'98, 5th International Conference on Spoken Language Processing, Sydney, Australia.",
839
- "links": null
840
- },
841
- "BIBREF8": {
842
- "ref_id": "b8",
843
- "title": "Attentions, intentions, and the structure of discourse",
844
- "authors": [
845
- {
846
- "first": "B",
847
- "middle": [
848
- "J"
849
- ],
850
- "last": "Grosz",
851
- "suffix": ""
852
- },
853
- {
854
- "first": "C",
855
- "middle": [
856
- "L"
857
- ],
858
- "last": "Sidner",
859
- "suffix": ""
860
- }
861
- ],
862
- "year": 1986,
863
- "venue": "Computational Linguistics",
864
- "volume": "12",
865
- "issue": "3",
866
- "pages": "175--204",
867
- "other_ids": {},
868
- "num": null,
869
- "urls": [],
870
- "raw_text": "Grosz, B.J. and Sidner, C.L. (1986) Attentions, intentions, and the structure of discourse. Computational Linguistics 12(3): 175-204.",
871
- "links": null
872
- },
873
- "BIBREF9": {
874
- "ref_id": "b9",
875
- "title": "Voice-Recognition Technology Builds a Following. Information Week",
876
- "authors": [
877
- {
878
- "first": "L",
879
- "middle": [],
880
- "last": "Greenemeier",
881
- "suffix": ""
882
- }
883
- ],
884
- "year": 1999,
885
- "venue": "",
886
- "volume": "",
887
- "issue": "",
888
- "pages": "",
889
- "other_ids": {},
890
- "num": null,
891
- "urls": [],
892
- "raw_text": "Greenemeier, L. (1999) Voice-Recognition Technology Builds a Following. Information Week, December 13.",
893
- "links": null
894
- },
895
- "BIBREF10": {
896
- "ref_id": "b10",
897
- "title": "Can Speech Recognition Give Telephones a New Face?",
898
- "authors": [
899
- {
900
- "first": "W",
901
- "middle": [],
902
- "last": "Meisel",
903
- "suffix": ""
904
- }
905
- ],
906
- "year": 1999,
907
- "venue": "Business Communications Review",
908
- "volume": "",
909
- "issue": "",
910
- "pages": "",
911
- "other_ids": {},
912
- "num": null,
913
- "urls": [],
914
- "raw_text": "Meisel, W. (1999) Can Speech Recognition Give Telephones a New Face? Business Communications Review, November 1.",
915
- "links": null
916
- },
917
- "BIBREF11": {
918
- "ref_id": "b11",
919
- "title": "Plain-speaking: A theory and grammar of spontaneous discourse",
920
- "authors": [
921
- {
922
- "first": "R",
923
- "middle": [],
924
- "last": "Reichman",
925
- "suffix": ""
926
- }
927
- ],
928
- "year": 1981,
929
- "venue": "",
930
- "volume": "",
931
- "issue": "",
932
- "pages": "",
933
- "other_ids": {},
934
- "num": null,
935
- "urls": [],
936
- "raw_text": "Reichman, R.. (1981) Plain-speaking: A theory and grammar of spontaneous discourse. PhD thesis, Department of Computer Science, Harvard University, Cambridge, Massachusetts.",
937
- "links": null
938
- },
939
- "BIBREF12": {
940
- "ref_id": "b12",
941
- "title": "Speech Has Industry Talking. Business Travel News",
942
- "authors": [
943
- {
944
- "first": "C",
945
- "middle": [],
946
- "last": "Rosen",
947
- "suffix": ""
948
- }
949
- ],
950
- "year": 1999,
951
- "venue": "",
952
- "volume": "",
953
- "issue": "",
954
- "pages": "",
955
- "other_ids": {},
956
- "num": null,
957
- "urls": [],
958
- "raw_text": "Rosen, C. (1999) Speech Has Industry Talking. Business Travel News, November.",
959
- "links": null
960
- },
961
- "BIBREF13": {
962
- "ref_id": "b13",
963
- "title": "Giving Voice to Customer Service. Datamation",
964
- "authors": [
965
- {
966
- "first": "J",
967
- "middle": [],
968
- "last": "Rossheim",
969
- "suffix": ""
970
- }
971
- ],
972
- "year": 1999,
973
- "venue": "",
974
- "volume": "",
975
- "issue": "",
976
- "pages": "",
977
- "other_ids": {},
978
- "num": null,
979
- "urls": [],
980
- "raw_text": "Rossheim, J. (1999) Giving Voice to Customer Service. Datamation, November 1.",
981
- "links": null
982
- }
983
- },
984
- "ref_entries": {
985
- "FIGREF0": {
986
- "type_str": "figure",
987
- "text": "Figure 1. PartslD System Architecture",
988
- "num": null,
989
- "uris": null
990
- }
991
- }
992
- }
993
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1006.json DELETED
@@ -1,925 +0,0 @@
1
- {
2
- "paper_id": "A00-1006",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:18.088771Z"
6
- },
7
- "title": "Translation using Information on Dialogue Participants",
8
- "authors": [
9
- {
10
- "first": "Setsuo",
11
- "middle": [],
12
- "last": "Yamada",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "ATR Interpreting Telecommunications Research Laboratories",
16
- "institution": "",
17
- "location": {
18
- "addrLine": "* 2-2, Seika-cho, Soraku-gun",
19
- "postCode": "619-0288",
20
- "settlement": "Hikaridai, Kyoto",
21
- "country": "JAPAN"
22
- }
23
- },
24
- "email": "[email protected]"
25
- },
26
- {
27
- "first": "Eiichiro",
28
- "middle": [],
29
- "last": "Sumita",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "ATR Interpreting Telecommunications Research Laboratories",
33
- "institution": "",
34
- "location": {
35
- "addrLine": "* 2-2, Seika-cho, Soraku-gun",
36
- "postCode": "619-0288",
37
- "settlement": "Hikaridai, Kyoto",
38
- "country": "JAPAN"
39
- }
40
- },
41
- "email": "[email protected]"
42
- },
43
- {
44
- "first": "Hideki",
45
- "middle": [],
46
- "last": "Kashioka",
47
- "suffix": "",
48
- "affiliation": {
49
- "laboratory": "ATR Interpreting Telecommunications Research Laboratories",
50
- "institution": "",
51
- "location": {
52
- "addrLine": "* 2-2, Seika-cho, Soraku-gun",
53
- "postCode": "619-0288",
54
- "settlement": "Hikaridai, Kyoto",
55
- "country": "JAPAN"
56
- }
57
- },
58
- "email": "[email protected]"
59
- }
60
- ],
61
- "year": "",
62
- "venue": null,
63
- "identifiers": {},
64
- "abstract": "This paper proposes a way to improve the translation quality by using information on dialogue participants that is easily obtained from outside the translation component. We incorporated information on participants' social roles and genders into transfer rules and dictionary entries. An experiment with 23 unseen dialogues demonstrated a recall of 65% and a precision of 86%. These results showed that our simple and easy-to-implement method is effective, and is a key technology enabling smooth conversation with a dialogue translation system. *Current affiliation is ATR Spoken Language Translation Research Laboratories Current mail addresses are { setsuo.yarnada, eiichiro.sumita, hideki.kashioka} @slt. atr. co.jp",
65
- "pdf_parse": {
66
- "paper_id": "A00-1006",
67
- "_pdf_hash": "",
68
- "abstract": [
69
- {
70
- "text": "This paper proposes a way to improve the translation quality by using information on dialogue participants that is easily obtained from outside the translation component. We incorporated information on participants' social roles and genders into transfer rules and dictionary entries. An experiment with 23 unseen dialogues demonstrated a recall of 65% and a precision of 86%. These results showed that our simple and easy-to-implement method is effective, and is a key technology enabling smooth conversation with a dialogue translation system. *Current affiliation is ATR Spoken Language Translation Research Laboratories Current mail addresses are { setsuo.yarnada, eiichiro.sumita, hideki.kashioka} @slt. atr. co.jp",
71
- "cite_spans": [],
72
- "ref_spans": [],
73
- "eq_spans": [],
74
- "section": "Abstract",
75
- "sec_num": null
76
- }
77
- ],
78
- "body_text": [
79
- {
80
- "text": "Recently, various dialogue translation systems have been proposed (Bub and others, 1997; Kurematsu and Morimoto, 1996; Rayner and Carter, 1997; Ros~ and Levin, 1998; Sumita and others, 1999; Yang and Park, 1997; Vidal, 1997 ). If we want to make a conversation proceed smoothly using these translation systems, it is important to use not only linguistic information, which comes from the source language, but also extra-linguistic information, which does not come from the source language, but, is shared between the participants of the conversation.",
81
- "cite_spans": [
82
- {
83
- "start": 66,
84
- "end": 88,
85
- "text": "(Bub and others, 1997;",
86
- "ref_id": null
87
- },
88
- {
89
- "start": 89,
90
- "end": 118,
91
- "text": "Kurematsu and Morimoto, 1996;",
92
- "ref_id": "BIBREF4"
93
- },
94
- {
95
- "start": 119,
96
- "end": 143,
97
- "text": "Rayner and Carter, 1997;",
98
- "ref_id": "BIBREF8"
99
- },
100
- {
101
- "start": 144,
102
- "end": 165,
103
- "text": "Ros~ and Levin, 1998;",
104
- "ref_id": "BIBREF9"
105
- },
106
- {
107
- "start": 166,
108
- "end": 190,
109
- "text": "Sumita and others, 1999;",
110
- "ref_id": null
111
- },
112
- {
113
- "start": 191,
114
- "end": 211,
115
- "text": "Yang and Park, 1997;",
116
- "ref_id": "BIBREF13"
117
- },
118
- {
119
- "start": 212,
120
- "end": 223,
121
- "text": "Vidal, 1997",
122
- "ref_id": "BIBREF12"
123
- }
124
- ],
125
- "ref_spans": [],
126
- "eq_spans": [],
127
- "section": "Introduction",
128
- "sec_num": "1"
129
- },
130
- {
131
- "text": "Several dialogue translation methods that use extra-linguistic information have been proposed. Horiguchi outlined how \"spoken language pragmatic information\" can be translated (Horiguchi, 1997) . However, she did not apply this idea to a dialogue translation system. LuperFoy et al. proposed a software architec-ture that uses '% pragmatic adaptation\" (Lu-perFoy and others, 1998) , and Mima et al. proposed a method that uses \"situational information\" (Mima and others, 1997) . LuperFoy et al. simulated their method on man-machine interfaces and Mima et al. preliminarily evaluated their method. Neither study, however, applied its proposals to an actual dialogue translation system.",
132
- "cite_spans": [
133
- {
134
- "start": 176,
135
- "end": 193,
136
- "text": "(Horiguchi, 1997)",
137
- "ref_id": "BIBREF3"
138
- },
139
- {
140
- "start": 352,
141
- "end": 380,
142
- "text": "(Lu-perFoy and others, 1998)",
143
- "ref_id": null
144
- },
145
- {
146
- "start": 453,
147
- "end": 476,
148
- "text": "(Mima and others, 1997)",
149
- "ref_id": null
150
- }
151
- ],
152
- "ref_spans": [],
153
- "eq_spans": [],
154
- "section": "Introduction",
155
- "sec_num": "1"
156
- },
157
- {
158
- "text": "The above mentioned methods will need time to work in practice, since it is hard to obtain the extra-linguistic information on which they depend.",
159
- "cite_spans": [],
160
- "ref_spans": [],
161
- "eq_spans": [],
162
- "section": "Introduction",
163
- "sec_num": "1"
164
- },
165
- {
166
- "text": "We have been paying special attention to \"politeness,\" because a lack of politeness can interfere with a smooth conversation between two participants, such as a clerk and a customer. It is easy for a dialogue translation system to know which participant is the clerk and which is the customer from the interface (such as the wires to the microphones).",
167
- "cite_spans": [],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "Introduction",
171
- "sec_num": "1"
172
- },
173
- {
174
- "text": "This paper describes a method of \"politeness\" selection according to a participant's social role (a clerk or a customer), which is easily obtained from the extra-linguistic environment. We incorporated each participant's social role into transfer rules and transfer dictionary entries. We then conducted an experiment with 23 unseen dialogues (344 utterances). Our method achieved a recall of 65% and a precision of 86%. These rates could be improved to 86% and 96%, respectively (see Section 4). It is therefore possible to use a \"participant's social role\" (a clerk or a customer in this case) to appropriately make the translation results \"polite,\" and to make the conversation proceed smoothly with a dialogue translation system. Section 2 analyzes the relationship between a particular participant's social role (a clerk) and politeness in Japanese. Section 3 describes our proposal in detail using an English-to-Japanese translation system. Section 4 shows an experiment and results, followed by a discussion in Section 5. Finally, Section 6 concludes this paper.",
175
- "cite_spans": [],
176
- "ref_spans": [],
177
- "eq_spans": [],
178
- "section": "Introduction",
179
- "sec_num": "1"
180
- },
181
- {
182
- "text": "This section focuses on one participant's social role. We investigated Japanese outputs of a dialogue translation system to see how many utterances should be polite expressions in a current translation system for travel arrangement. We input 1,409 clerk utterances into a Transfer Driven Machine Translation system (Sumita and others, 1999 ) (TDMT for short). The inputs were closed utterances, meaning the system already knew the utterances, enabling the utterances to be transferred at a good quality. Therefore, we used closed utterances as the inputs to avoid translation errors.",
183
- "cite_spans": [
184
- {
185
- "start": 315,
186
- "end": 339,
187
- "text": "(Sumita and others, 1999",
188
- "ref_id": null
189
- }
190
- ],
191
- "ref_spans": [],
192
- "eq_spans": [],
193
- "section": "A Participant's Social Role and Politeness",
194
- "sec_num": "2"
195
- },
196
- {
197
- "text": "As a result, it was shown that about 70% (952) of all utterances should be improved to use polite expressions. This result shows that a current translation system is not enough to make a conversation smoothly. Not surprisingly, if all expressions were polite, some Japanese speakers would feel insulted. Therefore, Japanese speakers do not have to use polite expression in all utterances.",
198
- "cite_spans": [],
199
- "ref_spans": [],
200
- "eq_spans": [],
201
- "section": "A Participant's Social Role and Politeness",
202
- "sec_num": "2"
203
- },
204
- {
205
- "text": "We classified the investigated data into different types of English expressions for Japanese politeness, i.e., into honorific titles, parts of speech such as verbs, and canned phrases, as shown in Table 1 ; however, not all types appeared in the data.",
206
- "cite_spans": [],
207
- "ref_spans": [
208
- {
209
- "start": 197,
210
- "end": 204,
211
- "text": "Table 1",
212
- "ref_id": "TABREF0"
213
- }
214
- ],
215
- "eq_spans": [],
216
- "section": "A Participant's Social Role and Politeness",
217
- "sec_num": "2"
218
- },
219
- {
220
- "text": "For example, when the clerk said \"How will you be paying, Mr. Suzuki,\" the Japanese translation was made polite as \"donoyouni oshiharaininarimasu-ka suzuki-sama\" in place of the standard expression \"donoyouni shiharaimasu-ka suzuki-san.\" Table 1 shows that there is a difference in how expressions should be made more polite according to the type, and that many polite expressions can be translated by using only local information, i.e., transfer rules and dictionary entries. In the next section, we describe how to incorporate the information on dialogue participants, such as roles and genders, into transfer rules and dictionary entries in a dialogue translation system.",
221
- "cite_spans": [],
222
- "ref_spans": [
223
- {
224
- "start": 238,
225
- "end": 245,
226
- "text": "Table 1",
227
- "ref_id": "TABREF0"
228
- }
229
- ],
230
- "eq_spans": [],
231
- "section": "A Participant's Social Role and Politeness",
232
- "sec_num": "2"
233
- },
234
- {
235
- "text": "This section describes how to use information on dialogue participants, such as participants' social roles and genders. First, we describe TDMT, which we also used in our experiment. Second, we mention how to modify transfer rules and transfer dictionary entries according to information on dialogue participants.",
236
- "cite_spans": [],
237
- "ref_spans": [],
238
- "eq_spans": [],
239
- "section": "A Method of Using Information on Dialogue Participants",
240
- "sec_num": "3"
241
- },
242
- {
243
- "text": "TDMT uses bottom-up left-to-right chart parsing with transfer rules as shown in Figure 1 . The parsing determines the best structure and best transferred result locally by performing structural disambiguation using semantic distance calculations, in parallel with the derivation of possible structures. The semantic distance is defined by a thesaurus.",
244
- "cite_spans": [],
245
- "ref_spans": [
246
- {
247
- "start": 80,
248
- "end": 88,
249
- "text": "Figure 1",
250
- "ref_id": null
251
- }
252
- ],
253
- "eq_spans": [],
254
- "section": "Transfer Driven Machine Translation",
255
- "sec_num": "3.1"
256
- },
257
- {
258
- "text": "(source pattern) ==~ J ((target pattern 1) ((source example 1) (source example 2)",
259
- "cite_spans": [],
260
- "ref_spans": [],
261
- "eq_spans": [],
262
- "section": "Transfer Driven Machine Translation",
263
- "sec_num": "3.1"
264
- },
265
- {
266
- "text": "\u2022 \"-)",
267
- "cite_spans": [],
268
- "ref_spans": [],
269
- "eq_spans": [],
270
- "section": "Transfer Driven Machine Translation",
271
- "sec_num": "3.1"
272
- },
273
- {
274
- "text": "(target pattern 2) \u00b0o* )",
275
- "cite_spans": [],
276
- "ref_spans": [],
277
- "eq_spans": [],
278
- "section": "Transfer Driven Machine Translation",
279
- "sec_num": "3.1"
280
- },
281
- {
282
- "text": "Figure 1: Transfer rule format A transfer rule consists of a source pattern, a target pattern, and a source example. The source pattern consists of variables and constituent boundaries (Furuse and Iida, 1996) . A constituent boundary is either a functional word or the part-of-speech of a left constituent's last word and the part-of-speech of a right constituent's first word. In Example (1), the constituent boundary IV-CN) is inserted between \"accept\" and \"payment,\" because \"accept\" is a Verb and \"payment\" is a Common Noun. The target pattern consists of variables that correspond to variables in the source pattern and words of the target language. The source example consists of words that come from utterances referred to when a person creates transfer rules (we call such utterances closed utterances). Figure 2 shows a transfer rule whose source pattern is (X (V-CN) Y). Variable X corresponds to x, which is used in the target pattern, and Y corresponds to y, which is also watashidomo-wa kurejitto-kaado-deno o_shiharai-wo oukeshimasu Gloss:",
283
- "cite_spans": [
284
- {
285
- "start": 185,
286
- "end": 208,
287
- "text": "(Furuse and Iida, 1996)",
288
- "ref_id": null
289
- }
290
- ],
291
- "ref_spans": [
292
- {
293
- "start": 812,
294
- "end": 820,
295
- "text": "Figure 2",
296
- "ref_id": null
297
- }
298
- ],
299
- "eq_spans": [],
300
- "section": "Transfer Driven Machine Translation",
301
- "sec_num": "3.1"
302
- },
303
- {
304
- "text": "We-TOP credit-card-by payment-OBJ accept used in the target pattern. The source example ((\"accept\") (\"payment\")) comes from Example (1), and the other source examples come from the other closed utterances. This transfer rule means that if the source pattern is (X (V-CN) Y) then (y \"wo\" x) or (y \"ni\" x) is selected as the target pattern, where an input word pair corresponding to X and Y is semantically the most similar in a thesaurus to, or exactly the same as, the source example. For example, if an input word pair corresponding to X and Y is semantically the most similar in a thesaurus to, or exactly the same as, ((\"accept\") (\"payment\")), then the target pattern (y \"wo\" x) is selected in Figure 2 . As a result, an appropriate target pattern is selected.",
305
- "cite_spans": [],
306
- "ref_spans": [
307
- {
308
- "start": 697,
309
- "end": 705,
310
- "text": "Figure 2",
311
- "ref_id": null
312
- }
313
- ],
314
- "eq_spans": [],
315
- "section": "Transfer Driven Machine Translation",
316
- "sec_num": "3.1"
317
- },
318
- {
319
- "text": "After a target pattern is selected, TDMT creates a target structure according to the pattern (X (V-CN) Y) ((y \"wo\" x) (((\"accept\") (\"payment\")) ((\"take\") (\"picture\"))) (y \"hi\" x) (((\"take\") (\"bus\")) ((\"get\") (\"sunstroke\"))) ) Figure 2 : Transfer rule example by referring to a transfer dictionary, as shown in Figure 3 . If the input is \"accept (V-CN) payment,\" then this part is translated into \"shiharai wo uketsukeru.\" \"wo\" is derived from the target pattern (y \"wo\" x), and \"shiharai\" and \"uketsukeru\" are derived from the transfer dictionary, as shown in Figure 4 . ",
320
- "cite_spans": [],
321
- "ref_spans": [
322
- {
323
- "start": 226,
324
- "end": 234,
325
- "text": "Figure 2",
326
- "ref_id": null
327
- },
328
- {
329
- "start": 310,
330
- "end": 318,
331
- "text": "Figure 3",
332
- "ref_id": null
333
- },
334
- {
335
- "start": 560,
336
- "end": 568,
337
- "text": "Figure 4",
338
- "ref_id": null
339
- }
340
- ],
341
- "eq_spans": [],
342
- "section": "Transfer Driven Machine Translation",
343
- "sec_num": "3.1"
344
- },
345
- {
346
- "text": "For this research, we modified the transfer rules and the transfer dictionary entries, as shown in Figures 5 and 6 . In Figure 5 , the target pattern \"target pattern 11\" and the source word \"source example 1\" are used to change the translation according to information on dialogue participants. For example, if \":pattern-cond 11\" is defined as \":h-gender male\" as shown in Figure 7 , then \"target pattern 11\" is selected when the hearer is a male, that is, \"(\"Mr.\" x)\" is selected. Moreover, if \":word-cond 11\" is defined as \":srole clerk\" as shown in Figure 8 , then \"source example 1\" is translated into \"target word 11\" when the speaker is a clerk, that is, \"accept\" is translated into \"oukesuru.\" Translations such as \"target word 11\" are valid only in the source pattern; that is, a source example might not always be translated into one of these target words. If we always want to produce translations according to information on dialogue participants, then we need to modify the entries in the transfer dictionary like Figure 6 shows. Conversely, if we do not want to always change the translation, then we should not modify the entries but modify the transfer rules. Several conditions can also be given to \":word-cond\" and \":pattern-cond.\" For example, \":s-role customer and :s-gender female,\" which means the speaker is a customer and a female, can be given. In Figure 5 , \":default\" means the de-fault target pattern or word if no condition is matched. The condition is checked from up to down in order; that is, first, \":pattern-cond 11,\" second, \":pattern-cond 1~,\" ... and so on.",
347
- "cite_spans": [],
348
- "ref_spans": [
349
- {
350
- "start": 99,
351
- "end": 114,
352
- "text": "Figures 5 and 6",
353
- "ref_id": null
354
- },
355
- {
356
- "start": 120,
357
- "end": 128,
358
- "text": "Figure 5",
359
- "ref_id": null
360
- },
361
- {
362
- "start": 373,
363
- "end": 381,
364
- "text": "Figure 7",
365
- "ref_id": null
366
- },
367
- {
368
- "start": 552,
369
- "end": 560,
370
- "text": "Figure 8",
371
- "ref_id": "FIGREF1"
372
- },
373
- {
374
- "start": 1026,
375
- "end": 1034,
376
- "text": "Figure 6",
377
- "ref_id": null
378
- },
379
- {
380
- "start": 1372,
381
- "end": 1380,
382
- "text": "Figure 5",
383
- "ref_id": null
384
- }
385
- ],
386
- "eq_spans": [],
387
- "section": "Transfer Rules and Entries according to Information on Dialogue Participants",
388
- "sec_num": "3.2"
389
- },
390
- {
391
- "text": "(X (V-CN) Y) ((y \"wo\" x) (((\"accept\") (\"payment\")) ((\"take\") (\"picture\"))) (((\"accept\") -~ Even though we do not have rules and entries for pattern conditions and word conditions according to another participant's information, such as \":s-role customer'(which means the speaker's role is a customer) and \":s-gender male\" (which means the speaker's gender is male), TDMT can translate expressions corresponding to this information too. For example, \"Very good, please let me confirm them\" will be translated into \"shouchiitashimasita kakunin sasete itadakimasu\" when the speaker is a clerk or \"soredekekkoudesu kakunin sasete kudasai\" when the speaker is a customer, as shown in Example (2).",
392
- "cite_spans": [],
393
- "ref_spans": [],
394
- "eq_spans": [],
395
- "section": "Transfer Rules and Entries according to Information on Dialogue Participants",
396
- "sec_num": "3.2"
397
- },
398
- {
399
- "text": "By making a rule and an entry like the examples shown in Figures 8 and 9 , the utterance of Example (1) will be translated into \"watashidomo wa kurejitto kaado deno oshiharai wo oukeshimasu\" when the speaker is a clerk.",
400
- "cite_spans": [],
401
- "ref_spans": [
402
- {
403
- "start": 57,
404
- "end": 72,
405
- "text": "Figures 8 and 9",
406
- "ref_id": "FIGREF1"
407
- }
408
- ],
409
- "eq_spans": [],
410
- "section": "Transfer Rules and Entries according to Information on Dialogue Participants",
411
- "sec_num": "3.2"
412
- },
413
- {
414
- "text": "The TDMT system for English-to-Japanese at the time Of the experiment had about 1,500 transfer rules and 8,000 transfer dictionary entries. In other words, this TDMT system was capable of translating 8,000 English words into Japanese words. About 300 transfer rules and 40 transfer dictionary entries were modified to improve the level of \"politeness.\"",
415
- "cite_spans": [],
416
- "ref_spans": [],
417
- "eq_spans": [],
418
- "section": "An Experiment",
419
- "sec_num": "4"
420
- },
421
- {
422
- "text": "We conducted an experiment using the transfer rules and transfer dictionary for a clerk with 23 unseen dialogues (344 utterances). Our input was off-line, i.e., a transcription of dialogues, which was encoded with the participant's social role. In the on-line situation, our system can not infer whether the participant's social role is a clerk or a customer, but can instead determine the role without error from the interface (such as a microphone or a button).",
423
- "cite_spans": [],
424
- "ref_spans": [],
425
- "eq_spans": [],
426
- "section": "An Experiment",
427
- "sec_num": "4"
428
- },
429
- {
430
- "text": "In order to evaluate the experiment, we classifted the Japanese translation results obtained for the 23 unseen dialogues (199 utterances from a clerk, and 145 utterances from a customer, making 344 utterances in total) into two types: expressions that had to be changed to more polite expressions, and expressions that did not. Table 2 shows the number of utterances that included an expression which had to be changed into a more polite one (indicated by \"Yes\") and those that did not (indicated by \"No\"). We neglected 74 utterances whose translations were too poor to judge whether to assign a \"Yes\" or \"No.\" The translation results were evaluated to see whether the impressions of the translated results were improved or not with/without modification for the clerk from the viewpoint of \"politeness.\" Table 3 shows the impressions obtained according to the necessity of change shown in Table 2 .",
431
- "cite_spans": [],
432
- "ref_spans": [
433
- {
434
- "start": 328,
435
- "end": 335,
436
- "text": "Table 2",
437
- "ref_id": "TABREF1"
438
- },
439
- {
440
- "start": 804,
441
- "end": 811,
442
- "text": "Table 3",
443
- "ref_id": "TABREF3"
444
- },
445
- {
446
- "start": 889,
447
- "end": 896,
448
- "text": "Table 2",
449
- "ref_id": "TABREF1"
450
- }
451
- ],
452
- "eq_spans": [],
453
- "section": "An Experiment",
454
- "sec_num": "4"
455
- },
456
- {
457
- "text": "The evaluation criteria are recall and precision, which are defined as follows: Recall = number of utterances whose impression is better number of utterances which should be more polite better: Impression of a translation is better. same: Impression of a translation has not changed. worse: Impression of a translation is worse. no-diff: There is no difference between the two translations.",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "An Experiment",
462
- "sec_num": "4"
463
- },
464
- {
465
- "text": "Precision = number of utterances whose impression is better number of utterances whose expression has been changed by the modified rules and entries",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "An Experiment",
470
- "sec_num": "4"
471
- },
472
- {
473
- "text": "The recall was 65% (= 68 -(68 + 5 + 3 + 28)) and the precision was 86% (= 68 -: (68 + 5 + 3 + 0+3+0)).",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "An Experiment",
478
- "sec_num": "4"
479
- },
480
- {
481
- "text": "There are two main reasons which bring down these rates. One reason is that TDMT does not know who or what the agent of the action in the utterance is; agents are also needed to select polite expressions. The other reason is that there are not enough rules and transfer dictionary entries for the clerk.",
482
- "cite_spans": [],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "An Experiment",
486
- "sec_num": "4"
487
- },
488
- {
489
- "text": "It is easier to take care of the latter problem than the former problem. If we resolve the latter problem, that is, if we expand the transfer rules and the transfer dictionary entries according to the \"participant's social role\" (a clerk and a customer), then the recall rate and the precision rate can be improved (to 86% and 96%, respectively, as we have found). As a result, we can say that our method is effective for smooth conversation with a dialogue translation system.",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "An Experiment",
494
- "sec_num": "4"
495
- },
496
- {
497
- "text": "In general, extra-linguistic information is hard to obtain. However, some extra-linguistic information can be easily obtained:",
498
- "cite_spans": [],
499
- "ref_spans": [],
500
- "eq_spans": [],
501
- "section": "Discussion",
502
- "sec_num": "5"
503
- },
504
- {
505
- "text": "(1) One piece of information is the participant's social role, which can be obtained from the interface such as the microphone used. It was proven that a clerk and customer as the social roles of participants are useful for translation into Japanese. However, more research is required on another participant's social role.",
506
- "cite_spans": [],
507
- "ref_spans": [],
508
- "eq_spans": [],
509
- "section": "Discussion",
510
- "sec_num": "5"
511
- },
512
- {
513
- "text": "(2) Another piece of information is the participant's gender, which can be obtained by a speech recognizer with high accuracy (Takezawa and others, 1998; Naito and others, 1998) . We have considered how expressions can be useful by using the hearer's gender for Japanese-to-English translation.",
514
- "cite_spans": [
515
- {
516
- "start": 126,
517
- "end": 153,
518
- "text": "(Takezawa and others, 1998;",
519
- "ref_id": null
520
- },
521
- {
522
- "start": 154,
523
- "end": 177,
524
- "text": "Naito and others, 1998)",
525
- "ref_id": null
526
- }
527
- ],
528
- "ref_spans": [],
529
- "eq_spans": [],
530
- "section": "Discussion",
531
- "sec_num": "5"
532
- },
533
- {
534
- "text": "Let us consider the Japanese honorific title \"sama\" or \"san.\" If the heater's gender is male, then it should be translated \"Mr.\" and if the hearer's gender is female, then it should be translated \"Ms.\" as shown in Figure 7 . Additionally, the participant's gender is useful for translating typical expressions for males or females. For example, Japanese \"wa\" is often attached at the end of the utterance by females.",
535
- "cite_spans": [],
536
- "ref_spans": [
537
- {
538
- "start": 214,
539
- "end": 222,
540
- "text": "Figure 7",
541
- "ref_id": null
542
- }
543
- ],
544
- "eq_spans": [],
545
- "section": "Discussion",
546
- "sec_num": "5"
547
- },
548
- {
549
- "text": "It is also important for a dialogue translation system to use extra-linguistic information which the system can obtain easily, in order to make a conversation proceed smoothly and comfortably for humans using the translation system. We expect that other pieces of usable information can be easily obtained in the future. For example, age might be obtained from a cellular telephone if it were always carried by the same person and provided with personal information. In this case, if the system knew the hearer was a child, it could change complex expressions into easier ones.",
550
- "cite_spans": [],
551
- "ref_spans": [],
552
- "eq_spans": [],
553
- "section": "Discussion",
554
- "sec_num": "5"
555
- },
556
- {
557
- "text": "We have proposed a method of translation using information on dialogue participants, which is easily obtained from outside the translation component, and applied it to a dialogue translation system for travel arrangement. This method can select a polite expression for an utterance according to the \"participant's social role,\" which is easily determined by the interface (such as the wires to the microphones). For example, if the microphone is for the clerk (the speaker is a clerk), then the dialogue translation system can select a more polite expression.",
558
- "cite_spans": [],
559
- "ref_spans": [],
560
- "eq_spans": [],
561
- "section": "Conclusion",
562
- "sec_num": "6"
563
- },
564
- {
565
- "text": "In an English-to-Japanese translation system, we added additional transfer rules and transfer dictionary entries for the clerk to be more polite than the customer. Then, we conducted an experiment with 23 unseen dialogues (344 utterances). We evaluated the translation results to see whether the impressions of the results improved or not. Our method achieved a recall of 65% and a precision of 86%. These rates could easily be improved to 86% and 96%, respectively. Therefore, we can say that our method is effective for smooth conversation with a dialogue translation system.",
566
- "cite_spans": [],
567
- "ref_spans": [],
568
- "eq_spans": [],
569
- "section": "Conclusion",
570
- "sec_num": "6"
571
- },
572
- {
573
- "text": "Our proposal has a limitation in that if the system does not know who or what the agent of an action in an utterance is, it cannot appropriately select a polite expression. We are considering ways to enable identification of the agent of an action in an utterance and to expand the current framework to improve the level of politeness even more. In addition, we intend to apply other extra-linguistic information to a dialogue translation system.",
574
- "cite_spans": [],
575
- "ref_spans": [],
576
- "eq_spans": [],
577
- "section": "Conclusion",
578
- "sec_num": "6"
579
- }
580
- ],
581
- "back_matter": [],
582
- "bib_entries": {
583
- "BIBREF0": {
584
- "ref_id": "b0",
585
- "title": "Verbmobih The combination of deep and shallow processing for spontaneous speech translation",
586
- "authors": [
587
- {
588
- "first": "Thomas",
589
- "middle": [],
590
- "last": "Bub",
591
- "suffix": ""
592
- }
593
- ],
594
- "year": 1997,
595
- "venue": "the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97",
596
- "volume": "",
597
- "issue": "",
598
- "pages": "71--74",
599
- "other_ids": {},
600
- "num": null,
601
- "urls": [],
602
- "raw_text": "Thomas Bub et al. 1997. Verbmobih The combination of deep and shallow processing for spontaneous speech translation. In the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97, pages 71-74, Munich.",
603
- "links": null
604
- },
605
- "BIBREF2": {
606
- "ref_id": "b2",
607
- "title": "Proceedings of COLING-96",
608
- "authors": [],
609
- "year": null,
610
- "venue": "",
611
- "volume": "",
612
- "issue": "",
613
- "pages": "412--417",
614
- "other_ids": {},
615
- "num": null,
616
- "urls": [],
617
- "raw_text": "In Proceedings of COLING-96, pages 412-417, Copenhagen.",
618
- "links": null
619
- },
620
- "BIBREF3": {
621
- "ref_id": "b3",
622
- "title": "Towards translating spoken language pragmatics in an analogical framework",
623
- "authors": [
624
- {
625
- "first": "Keiko",
626
- "middle": [],
627
- "last": "Horiguchi",
628
- "suffix": ""
629
- }
630
- ],
631
- "year": 1997,
632
- "venue": "Proceedings of A CL/EA CL-97 workshop on Spoken Language Translation",
633
- "volume": "",
634
- "issue": "",
635
- "pages": "16--23",
636
- "other_ids": {},
637
- "num": null,
638
- "urls": [],
639
- "raw_text": "Keiko Horiguchi. 1997. Towards translating spoken language pragmatics in an analogical framework. In Proceedings of A CL/EA CL-97 workshop on Spoken Language Translation, pages 16-23, Madrid.",
640
- "links": null
641
- },
642
- "BIBREF4": {
643
- "ref_id": "b4",
644
- "title": "Automatic Speech Translation",
645
- "authors": [
646
- {
647
- "first": "Akira",
648
- "middle": [],
649
- "last": "Kurematsu",
650
- "suffix": ""
651
- },
652
- {
653
- "first": "Tsuyoshi",
654
- "middle": [],
655
- "last": "Morimoto",
656
- "suffix": ""
657
- }
658
- ],
659
- "year": 1996,
660
- "venue": "",
661
- "volume": "",
662
- "issue": "",
663
- "pages": "",
664
- "other_ids": {},
665
- "num": null,
666
- "urls": [],
667
- "raw_text": "Akira Kurematsu and Tsuyoshi Morimoto. 1996. Automatic Speech Translation. Gordon and Breach Publishers.",
668
- "links": null
669
- },
670
- "BIBREF5": {
671
- "ref_id": "b5",
672
- "title": "An architecture for dialogue management, context tracking, and pragmatic adaptation in spoken dialogue system",
673
- "authors": [
674
- {
675
- "first": "Susann",
676
- "middle": [],
677
- "last": "Luperfoy",
678
- "suffix": ""
679
- }
680
- ],
681
- "year": 1998,
682
- "venue": "Proceedings of COLING-A CL'98",
683
- "volume": "",
684
- "issue": "",
685
- "pages": "794--801",
686
- "other_ids": {},
687
- "num": null,
688
- "urls": [],
689
- "raw_text": "Susann LuperFoy et al. 1998. An architecture for dialogue management, context tracking, and pragmatic adaptation in spoken dialogue system. In Proceedings of COLING-A CL'98, pages 794-801, Montreal.",
690
- "links": null
691
- },
692
- "BIBREF6": {
693
- "ref_id": "b6",
694
- "title": "A situation-based approach to spoken dialogue translation between different social roles",
695
- "authors": [
696
- {
697
- "first": "Hideki",
698
- "middle": [],
699
- "last": "Mima",
700
- "suffix": ""
701
- }
702
- ],
703
- "year": 1997,
704
- "venue": "Proceedings of TMI-97",
705
- "volume": "",
706
- "issue": "",
707
- "pages": "176--183",
708
- "other_ids": {},
709
- "num": null,
710
- "urls": [],
711
- "raw_text": "Hideki Mima et al. 1997. A situation-based approach to spoken dialogue translation be- tween different social roles. In Proceedings of TMI-97, pages 176-183, Santa Fe.",
712
- "links": null
713
- },
714
- "BIBREF7": {
715
- "ref_id": "b7",
716
- "title": "Acoustic and language model for speech translation system ATR-MATRIX",
717
- "authors": [
718
- {
719
- "first": "Masaki",
720
- "middle": [],
721
- "last": "Naito",
722
- "suffix": ""
723
- }
724
- ],
725
- "year": 1998,
726
- "venue": "the Proceedings of the 1998 Spring Meeting of the Acoustical Society of Japan",
727
- "volume": "",
728
- "issue": "",
729
- "pages": "159--160",
730
- "other_ids": {},
731
- "num": null,
732
- "urls": [],
733
- "raw_text": "Masaki Naito et al. 1998. Acoustic and lan- guage model for speech translation system ATR-MATRIX. In the Proceedings of the 1998 Spring Meeting of the Acoustical Soci- ety of Japan, pages 159-160 (in Japanese).",
734
- "links": null
735
- },
736
- "BIBREF8": {
737
- "ref_id": "b8",
738
- "title": "Hybrid language processing in the spoken language translator",
739
- "authors": [
740
- {
741
- "first": "Manny",
742
- "middle": [],
743
- "last": "Rayner",
744
- "suffix": ""
745
- },
746
- {
747
- "first": "David",
748
- "middle": [],
749
- "last": "Carter",
750
- "suffix": ""
751
- }
752
- ],
753
- "year": 1997,
754
- "venue": "the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97",
755
- "volume": "",
756
- "issue": "",
757
- "pages": "107--110",
758
- "other_ids": {},
759
- "num": null,
760
- "urls": [],
761
- "raw_text": "Manny Rayner and David Carter. 1997. Hy- brid language processing in the spoken lan- guage translator. In the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97, pages 107-110, Mu- nich.",
762
- "links": null
763
- },
764
- "BIBREF9": {
765
- "ref_id": "b9",
766
- "title": "An interactive domain independent approach to robust dialogue interpretation",
767
- "authors": [
768
- {
769
- "first": "Carolyn",
770
- "middle": [],
771
- "last": "Penstein Ros~",
772
- "suffix": ""
773
- },
774
- {
775
- "first": "Lori",
776
- "middle": [
777
- "S"
778
- ],
779
- "last": "Levin",
780
- "suffix": ""
781
- }
782
- ],
783
- "year": 1998,
784
- "venue": "Proceedings of COLING-ACL'98",
785
- "volume": "",
786
- "issue": "",
787
- "pages": "1129--1135",
788
- "other_ids": {},
789
- "num": null,
790
- "urls": [],
791
- "raw_text": "Carolyn Penstein Ros~ and Lori S. Levin. 1998. An interactive domain independent approach to robust dialogue interpretation. In Proceed- ings of COLING-ACL'98, pages 1129-1135, Montreal.",
792
- "links": null
793
- },
794
- "BIBREF10": {
795
- "ref_id": "b10",
796
- "title": "Solutions to problems inherent in spoken-language translation: The ATR-MATRIX approach",
797
- "authors": [
798
- {
799
- "first": "Eiichiro",
800
- "middle": [],
801
- "last": "Sumita",
802
- "suffix": ""
803
- }
804
- ],
805
- "year": 1999,
806
- "venue": "the Machine Translation Summit VII",
807
- "volume": "",
808
- "issue": "",
809
- "pages": "229--235",
810
- "other_ids": {},
811
- "num": null,
812
- "urls": [],
813
- "raw_text": "Eiichiro Sumita et al. 1999. Solutions to prob- lems inherent in spoken-language translation: The ATR-MATRIX approach. In the Ma- chine Translation Summit VII, pages 229- 235, Singapore.",
814
- "links": null
815
- },
816
- "BIBREF11": {
817
- "ref_id": "b11",
818
- "title": "A Japaneseto-English speech translation system: ATR-MATRIX",
819
- "authors": [
820
- {
821
- "first": "Toshiyuki",
822
- "middle": [],
823
- "last": "Takezawa",
824
- "suffix": ""
825
- }
826
- ],
827
- "year": 1998,
828
- "venue": "the 5th International Conference On Spoken Language Processing: ICSLP-98",
829
- "volume": "",
830
- "issue": "",
831
- "pages": "2779--2782",
832
- "other_ids": {},
833
- "num": null,
834
- "urls": [],
835
- "raw_text": "Toshiyuki Takezawa et al. 1998. A Japanese- to-English speech translation system: ATR- MATRIX. In the 5th International Con- ference On Spoken Language Processing: ICSLP-98, pages 2779-2782, Sydney.",
836
- "links": null
837
- },
838
- "BIBREF12": {
839
- "ref_id": "b12",
840
- "title": "Finite-state speech-tospeech translation",
841
- "authors": [
842
- {
843
- "first": "Enrique",
844
- "middle": [],
845
- "last": "Vidal",
846
- "suffix": ""
847
- }
848
- ],
849
- "year": 1997,
850
- "venue": "the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97",
851
- "volume": "",
852
- "issue": "",
853
- "pages": "111--114",
854
- "other_ids": {},
855
- "num": null,
856
- "urls": [],
857
- "raw_text": "Enrique Vidal. 1997. Finite-state speech-to- speech translation. In the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97, pages 111-114, Mu- nich.",
858
- "links": null
859
- },
860
- "BIBREF13": {
861
- "ref_id": "b13",
862
- "title": "An experiment on Korean-to-English and Korean-to-Japanese spoken language translation",
863
- "authors": [
864
- {
865
- "first": "Jae-Woo",
866
- "middle": [],
867
- "last": "Yang",
868
- "suffix": ""
869
- },
870
- {
871
- "first": "Jun",
872
- "middle": [],
873
- "last": "Park",
874
- "suffix": ""
875
- }
876
- ],
877
- "year": 1997,
878
- "venue": "the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97",
879
- "volume": "",
880
- "issue": "",
881
- "pages": "87--90",
882
- "other_ids": {},
883
- "num": null,
884
- "urls": [],
885
- "raw_text": "Jae-Woo Yang and Jun Park. 1997. An exper- iment on Korean-to-English and Korean-to- Japanese spoken language translation. In the 1997 International Conference on Acoustics, Speech, and Signal Processing: ICASSP 97, pages 87-90, Munich.",
886
- "links": null
887
- }
888
- },
889
- "ref_entries": {
890
- "FIGREF0": {
891
- "uris": null,
892
- "text": "Transfer rule format with information on dialogue participants (((source word 1) --* (target word 11) :cond 11 I (source word 1) -* (target word 12) :cond 12 I I Transfer rule example with the participant's gender",
893
- "type_str": "figure",
894
- "num": null
895
- },
896
- "FIGREF1": {
897
- "uris": null,
898
- "text": "Transfer rule example with a participant's role (((\"payment\") --~ (\"oshiharai\") :s-role clerk ( \"payment\" ) ---* ( \"shiharai\" )) ((\"we\") --* (\"watashidomo\") :s-role clerk (\"we\") --~ (\"watashltachi\")))Figure 9: Transfer dictionary example with a speaker's role",
899
- "type_str": "figure",
900
- "num": null
901
- },
902
- "TABREF0": {
903
- "text": "",
904
- "html": null,
905
- "content": "<table><tr><td/><td/><td colspan=\"4\">: Examples of polite expressions</td></tr><tr><td>Type:</td><td/><td colspan=\"2\">verb, title</td><td/></tr><tr><td>Eng:</td><td/><td colspan=\"3\">How will you be paying, Mr. Suzuki</td></tr><tr><td colspan=\"5\">Standard: donoyouni shiharaimasu-ka</td><td>suzuki-san</td></tr><tr><td colspan=\"2\">Polite:</td><td colspan=\"4\">donoyouni o_shiharaininarimasu-ka suzuki-sama</td></tr><tr><td colspan=\"2\">Gloss:</td><td>How</td><td>pay-QUESTION</td><td/><td>suzuki-Mr.</td></tr><tr><td>Type:</td><td/><td colspan=\"2\">verb, common noun</td><td/></tr><tr><td>Eng:</td><td/><td colspan=\"3\">We have two types of rooms available</td></tr><tr><td colspan=\"3\">Standard: aiteiru</td><td>ni-shurui-no</td><td>heya-ga</td><td>ariraasu</td></tr><tr><td colspan=\"2\">Polite:</td><td>aiteiru</td><td>ni-shurui-no</td><td>oheya-ga</td><td>gozaimasu</td></tr><tr><td colspan=\"2\">Gloss:</td><td colspan=\"4\">available two-types-of room-TOP</td><td>have</td></tr><tr><td>Type:</td><td/><td colspan=\"2\">auxiliary verb</td><td/></tr><tr><td>Eng:</td><td/><td colspan=\"2\">You can shop for hours</td><td/></tr><tr><td colspan=\"3\">Standard: suujikan</td><td colspan=\"3\">kaimono-wo surukotogadekimasu</td></tr><tr><td colspan=\"2\">Polite:</td><td>suujikan</td><td colspan=\"3\">kaimono-wo shiteitadakemasu</td></tr><tr><td colspan=\"2\">Gloss:</td><td colspan=\"2\">for hours make-OBJ</td><td>can</td></tr><tr><td>Type:</td><td/><td>pronoun</td><td/><td/></tr><tr><td>Eng:</td><td/><td colspan=\"2\">Your room number, please</td><td/></tr><tr><td colspan=\"3\">Standard: anatano</td><td colspan=\"2\">heya bangou-wo</td><td>onegaishirnasu</td></tr><tr><td colspan=\"2\">Polite:</td><td colspan=\"3\">okyakusamano heya bangou-wo</td><td>onegaishimasu</td></tr><tr><td colspan=\"2\">Gloss:</td><td>Your</td><td colspan=\"3\">room number-so obj</td><td>please</td></tr><tr><td>Type:</td><td/><td colspan=\"2\">canned phrase</td><td/></tr><tr><td>Eng:</td><td/><td colspan=\"2\">How can I help you</td><td/></tr><tr><td colspan=\"3\">Standard: dou</td><td>shimashitaka</td><td/></tr><tr><td colspan=\"2\">Polite:</td><td colspan=\"2\">douitta goyoukendeshouka</td><td/></tr><tr><td colspan=\"2\">Gloss:</td><td>How</td><td>can I help you</td><td/></tr><tr><td colspan=\"2\">Example (1)</td><td/><td/><td/></tr><tr><td>Eng:</td><td colspan=\"3\">We accept payment by credit card</td><td/></tr><tr><td colspan=\"6\">Standard: watashitachi-wa kurejitlo-kaado-deno shiharai-wo</td><td>ukelsukemasu</td></tr><tr><td>Polite:</td><td/><td/><td/><td/></tr></table>",
906
- "num": null,
907
- "type_str": "table"
908
- },
909
- "TABREF1": {
910
- "text": "",
911
- "html": null,
912
- "content": "<table><tr><td colspan=\"3\">The number of utterances to be</td></tr><tr><td>changed or not</td><td/><td/></tr><tr><td colspan=\"3\">Necessity | The number</td></tr><tr><td colspan=\"3\">of change I of utterances Yes 104</td></tr><tr><td>No</td><td/><td>166</td></tr><tr><td>Out of scope</td><td/><td>74</td></tr><tr><td>Total</td><td>[</td><td>344</td></tr><tr><td colspan=\"3\">* 74 translations were too poor to handle for the</td></tr><tr><td colspan=\"3\">\"politeness\" problem, and so they are ignored in this</td></tr><tr><td>paper.</td><td/><td/></tr></table>",
913
- "num": null,
914
- "type_str": "table"
915
- },
916
- "TABREF3": {
917
- "text": "",
918
- "html": null,
919
- "content": "<table><tr><td colspan=\"3\">: Evaluation on using the speaker's role</td></tr><tr><td>Necessity</td><td>~ Impression</td><td>The number</td></tr><tr><td>of change</td><td/><td>of utterances</td></tr><tr><td>Yes (lo4)</td><td>better same</td><td>68 5</td></tr><tr><td/><td>worse</td><td>3</td></tr><tr><td/><td>no-diff</td><td>28</td></tr><tr><td>No (166)</td><td>better s alTle</td><td>0 3</td></tr><tr><td/><td>worse</td><td>0</td></tr><tr><td/><td>no-diff</td><td>163</td></tr></table>",
920
- "num": null,
921
- "type_str": "table"
922
- }
923
- }
924
- }
925
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1007.json DELETED
@@ -1,1155 +0,0 @@
1
- {
2
- "paper_id": "A00-1007",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:00.159666Z"
6
- },
7
- "title": "Distilling dialogues -A method using natural dialogue dialogue systems development",
8
- "authors": [
9
- {
10
- "first": "Arne",
11
- "middle": [],
12
- "last": "Jsnsson",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "LinkSping University",
17
- "location": {
18
- "addrLine": "S-581 83",
19
- "country": "LINKOPING SWEDEN"
20
- }
21
- },
22
- "email": ""
23
- },
24
- {
25
- "first": "Nils",
26
- "middle": [],
27
- "last": "Dahlb~ick",
28
- "suffix": "",
29
- "affiliation": {
30
- "laboratory": "",
31
- "institution": "LinkSping University",
32
- "location": {
33
- "addrLine": "S-581 83",
34
- "country": "LINKOPING SWEDEN"
35
- }
36
- },
37
- "email": ""
38
- }
39
- ],
40
- "year": "",
41
- "venue": null,
42
- "identifiers": {},
43
- "abstract": "We report on a method for utilising corpora collected in natural settings. It is based on distilling (re-writing) natural dialogues to elicit the type of dialogue that would occur if one the dialogue participants was a computer instead of a human. The method is a complement to other means such as Wizard of Oz-studies and un-distilled natural dialogues. We present the distilling method and guidelines for distillation. We also illustrate how the method affects a corpus of dialogues and discuss the pros and cons of three approaches in different phases of dialogue systems development.",
44
- "pdf_parse": {
45
- "paper_id": "A00-1007",
46
- "_pdf_hash": "",
47
- "abstract": [
48
- {
49
- "text": "We report on a method for utilising corpora collected in natural settings. It is based on distilling (re-writing) natural dialogues to elicit the type of dialogue that would occur if one the dialogue participants was a computer instead of a human. The method is a complement to other means such as Wizard of Oz-studies and un-distilled natural dialogues. We present the distilling method and guidelines for distillation. We also illustrate how the method affects a corpus of dialogues and discuss the pros and cons of three approaches in different phases of dialogue systems development.",
50
- "cite_spans": [],
51
- "ref_spans": [],
52
- "eq_spans": [],
53
- "section": "Abstract",
54
- "sec_num": null
55
- }
56
- ],
57
- "body_text": [
58
- {
59
- "text": "It has been known for quite some time now, that the language used when interacting with a computer is different from the one used in dialogues between people, (c.f. JSnsson and Dahlb~ick (1988) ). Given that we know that the language will be different, but not how it will be different, we need to base our development of natural language dialogue systems on a relevant set of dialogue corpora. It is our belief that we need to clarify a number of different issues regarding the collection and use of corpora in the development of speech-only and multimodal dialogue systems. Exchanging experiences and developing guidelines in this area are as important as, and in some sense a necessary pre-requisite to, the development of computational models of speech, language, and dialogue/discourse. It is interesting to note the difference in the state of art in the field of natural language dialogue systems with that of corpus linguistics, where issues of the usefulness of different samples, the necessary sampling size, representativeness in corpus design and other have been discussed for quite some time (e.g. (Garside et al., 1997; Atkins et al., 1992; Crowdy, 1993; Biber, 1993) ). Also the neighboring area of evaluation of NLP systems (for an overview, see Sparck Jones and Galliers (1996) ) seems to have advanced further. Some work have been done in the area of natural language dialogue systems, e.g. on the design of Wizard of Oz-studies (Dahlb~ck et al., 1998) , on measures for inter-rater reliability (Carletta, 1996) , on frameworks for evaluating spoken dialogue agents (Walker et al., 1998) and on the use of different corpora in the development of a particular system (The Carnegie-Mellon Communicator, Eskenazi et al. (1999) ).",
60
- "cite_spans": [
61
- {
62
- "start": 165,
63
- "end": 193,
64
- "text": "JSnsson and Dahlb~ick (1988)",
65
- "ref_id": null
66
- },
67
- {
68
- "start": 1110,
69
- "end": 1132,
70
- "text": "(Garside et al., 1997;",
71
- "ref_id": "BIBREF12"
72
- },
73
- {
74
- "start": 1133,
75
- "end": 1153,
76
- "text": "Atkins et al., 1992;",
77
- "ref_id": "BIBREF1"
78
- },
79
- {
80
- "start": 1154,
81
- "end": 1167,
82
- "text": "Crowdy, 1993;",
83
- "ref_id": "BIBREF4"
84
- },
85
- {
86
- "start": 1168,
87
- "end": 1180,
88
- "text": "Biber, 1993)",
89
- "ref_id": "BIBREF2"
90
- },
91
- {
92
- "start": 1278,
93
- "end": 1293,
94
- "text": "Galliers (1996)",
95
- "ref_id": "BIBREF17"
96
- },
97
- {
98
- "start": 1446,
99
- "end": 1469,
100
- "text": "(Dahlb~ck et al., 1998)",
101
- "ref_id": null
102
- },
103
- {
104
- "start": 1512,
105
- "end": 1528,
106
- "text": "(Carletta, 1996)",
107
- "ref_id": "BIBREF3"
108
- },
109
- {
110
- "start": 1583,
111
- "end": 1604,
112
- "text": "(Walker et al., 1998)",
113
- "ref_id": "BIBREF18"
114
- },
115
- {
116
- "start": 1718,
117
- "end": 1740,
118
- "text": "Eskenazi et al. (1999)",
119
- "ref_id": "BIBREF9"
120
- }
121
- ],
122
- "ref_spans": [],
123
- "eq_spans": [],
124
- "section": "Introduction",
125
- "sec_num": "1"
126
- },
127
- {
128
- "text": "The question we are addressing in this paper is how to collect and analyse relevant corpora. We begin by describing what we consider to be the main advantages and disadvantages of the two currently used methods; studies of human dialogues and Wizard of Oz-dialogues, especially focusing on the ecological validity of the methods. We then describe a method called 'distilling dialogues', which can serve as a supplement to the other two.",
129
- "cite_spans": [],
130
- "ref_spans": [],
131
- "eq_spans": [],
132
- "section": "Introduction",
133
- "sec_num": "1"
134
- },
135
- {
136
- "text": "The advantage of using real dialogues between people is that they will illustrate which tasks and needs that people actually bring to a particular service provider. Thus, on the level of the users' general goals, such dialogues have a high validity. But there are two drawbacks here. First; it is not self-evident that users will have the same task expectations from a computer system as they have with a person. Second, the language used will differ from the language used when interacting with a computer. These two disadvantages have been the major force behind the development of Wizard of Ozmethods. The advantage here is that the setting will be human-computer interaction. But there are important disadvantages, too. First, on the practical side, the task of setting up a high quality simulation environment and training the operators ('wizards') to use this is a resource consuming task (Dahlb~ck et al., 1998) . Second, and probably even more important, is that we cannot then observe real users using a system for real life tasks, where they bring their own needs, motivations, resources, and constraints to bear. To some extent this problem can be overcome using well-designed so called 'scenarios'. As pointed out in Dahlb~ck (1991), on many levels of analysis the artificiality of the situation will not af-fect the language used. An example of this is the pattern of pronoun-antecedent relations. But since the tasks given to the users are often pre-described by the researchers, this means that this is not a good way of finding out which tasks the users actually want to perform. Nor does it provide a clear enough picture on how the users will act to find something that satisfies their requirements. If e.g. the task is one of finding a charter holiday trip or buying a TVset within a specified set of constraints (economical and other), it is conceivable that people will stay with the first item that matches the specification, whereas in real life they would probably look for alternatives. In our experience, this is primarily a concern if the focus is on the users' goals and plans, but is less a problem when the interest is on lowerlevel aspects, such as, syntax or patterns of pronounantecedent relationship (c.f. Dahlb~ick (1991)).",
137
- "cite_spans": [
138
- {
139
- "start": 895,
140
- "end": 918,
141
- "text": "(Dahlb~ck et al., 1998)",
142
- "ref_id": null
143
- }
144
- ],
145
- "ref_spans": [],
146
- "eq_spans": [],
147
- "section": "Natural and Wizard of Oz-Dialogues",
148
- "sec_num": "2"
149
- },
150
- {
151
- "text": "To summarize; real life dialogues will provide a reasonably correct picture of the way users' approach their tasks, and what tasks they bring to the service provider, but the language used will not give a good approximation of what the system under construction will need to handle. Wizard of Ozdialogues, on the other hand, will give a reasonable approximation of some aspects of the language used, but in an artificial context.",
152
- "cite_spans": [],
153
- "ref_spans": [],
154
- "eq_spans": [],
155
- "section": "Natural and Wizard of Oz-Dialogues",
156
- "sec_num": "2"
157
- },
158
- {
159
- "text": "The usual approach has been to work in three steps. First analyse real human dialogues, and based on these, in the second phase, design one or more Wizard of Oz-studies. The final step is to fine-tune the system's performance on real users. A good example of this method is presented in Eskenazi et al. (1999) . But there are also possible problems with this approach (though we are not claiming that this was the case in their particular project). Eskenazi et al. (1999) asked a human operator to act 'computerlike' in their Wizard of Oz-phase. The advantage is of course that the human operator will be able to perform all the tasks that is usually provided by this service. The disadvantage is that it puts a heavy burden on the human operator to act as a computer. Since we know that lay-persons' ideas of what computers can and cannot do are in many respects far removed from what is actually the case, we risk introducing some systematic distortion here. And since it is difficult to perform consistently in similar situations, we also risk introducing non-systematic distortion here, even in those cases when the 'wizard' is an NLP-professional.",
160
- "cite_spans": [
161
- {
162
- "start": 287,
163
- "end": 309,
164
- "text": "Eskenazi et al. (1999)",
165
- "ref_id": "BIBREF9"
166
- },
167
- {
168
- "start": 449,
169
- "end": 471,
170
- "text": "Eskenazi et al. (1999)",
171
- "ref_id": "BIBREF9"
172
- }
173
- ],
174
- "ref_spans": [],
175
- "eq_spans": [],
176
- "section": "Natural and Wizard of Oz-Dialogues",
177
- "sec_num": "2"
178
- },
179
- {
180
- "text": "Our suggestion is therefore to supplement the above mentioned methods, and bridge the gap between them, by post-processing human dialogues to give them a computer-like quality. The advantage, compared to having people do the simulation on the fly, is both that it can be done with more consistency, and also that it can be done by researchers that actually know what human-computer natural language dialogues can look like. A possible disadvantage with using both Wizard of Oz-and real computer dialogues, is that users will quickly adapt to what the system can provide them with, and will therefore not try to use it for tasks they know it cannot perform. Consequently, we will not get a full picture of the different services they would like the system to provide.",
181
- "cite_spans": [],
182
- "ref_spans": [],
183
- "eq_spans": [],
184
- "section": "Natural and Wizard of Oz-Dialogues",
185
- "sec_num": "2"
186
- },
187
- {
188
- "text": "A disadvantage with this method is, of course, that post-processing takes some time compared to using the natural dialogues as they are. There is also a concern on the ecological validity of the results, as discussed later.",
189
- "cite_spans": [],
190
- "ref_spans": [],
191
- "eq_spans": [],
192
- "section": "Natural and Wizard of Oz-Dialogues",
193
- "sec_num": "2"
194
- },
195
- {
196
- "text": "Distilling dialogues, i.e. re-writing human interactions in order to have them reflect what a humancomputer interaction could look like involves a number of considerations. The main issue is that in corpora of natural dialogues one of the interlocutors is not a dialogue system. The system's task is instead performed by a human and the problem is how to anticipate the behaviour of a system that does not exist based on the performance of an agent with different performance characteristics. One important aspect is how to deal with human features that are not part of what the system is supposed to be able to handle, for instance if the user talks about things outside of the domain, such as discussing an episode of a recent TV show. It also involves issues on how to handle situations where one of the interlocuters discusses with someone else on a different topic, e.g. discussing the up-coming Friday party with a friend in the middle of an information providing dialogue with a customer.",
197
- "cite_spans": [],
198
- "ref_spans": [],
199
- "eq_spans": [],
200
- "section": "Distilling dialogues",
201
- "sec_num": "3"
202
- },
203
- {
204
- "text": "It is important for the distilling process to have at least an outline of the dialogue system that is under development: Will it for instance have the capacity to recognise users' goals, even if not explicitly stated? Will it be able to reason about the discourse domain? What services will it provide, and what will be outside its capacity to handle?",
205
- "cite_spans": [],
206
- "ref_spans": [],
207
- "eq_spans": [],
208
- "section": "Distilling dialogues",
209
- "sec_num": "3"
210
- },
211
- {
212
- "text": "In our case, we assume that the planned dialogue system has the ability to reason on various aspects of dialogue and properties of the application. In our current work, and in the examples used for illustration in this paper, we assume a dialogue model that can handle any relevant dialogue phenomenon and also an interpreter and speech recogniser being able to understand any user input that is relevant to the task. There is is also a powerful domain reasoning module allowing for more or less any knowledge reasoning on issues that can be accomplished within the domain (Flycht-Eriksson, 1999 ). Our current system does, however, not have an explicit user task model, as opposed to a system task model , which is included, and thus, we can not assume that the 'system' remembers utterances where the user explains its task. Furthermore, as our aim is system development we will not consider interaction outside the systems capabilities as relevant to include in the distilled dialogues.",
213
- "cite_spans": [
214
- {
215
- "start": 573,
216
- "end": 595,
217
- "text": "(Flycht-Eriksson, 1999",
218
- "ref_id": "BIBREF11"
219
- }
220
- ],
221
- "ref_spans": [],
222
- "eq_spans": [],
223
- "section": "Distilling dialogues",
224
- "sec_num": "3"
225
- },
226
- {
227
- "text": "The context of our work is the development a multi-modal dialogue system. However, in our current work with distilling dialogues, the abilities of a multi-modal system were not fully accounted for. The reason for this is that the dialogues would be significantly affected, e.g. a telephone conversation where the user always likes to have the next connection, please will result in a table if multi-modal output is possible and hence a fair amount of the dialogne is removed. We have therefore in this paper analysed the corpus assuming a speech-only system, since this is closer to the original telephone conversations, and hence needs fewer assumptions on system performance when distilling the dialogues.",
228
- "cite_spans": [],
229
- "ref_spans": [],
230
- "eq_spans": [],
231
- "section": "Distilling dialogues",
232
- "sec_num": "3"
233
- },
234
- {
235
- "text": "Distilling dialogues requires guidelines for how to handle various types of utterances. In this section we will present our guidelines for distilling a corpus of telephone conversations between a human information provider on local buses 1 to be used for developing a multimodal dialogue system (Qvarfordt and JSnsson, 1998; Flycht-Eriksson and JSnsson, 1998; Qvarfordt, 1998) . Similar guidelines are used within another project on developing Swedish Dialogue Systems where the domain is travel bureau information. We can distinguish three types of contributors: 'System' (i.e. a future systems) utterances, User utterances, and other types, such as moves by other speakers, and noise.",
236
- "cite_spans": [
237
- {
238
- "start": 295,
239
- "end": 324,
240
- "text": "(Qvarfordt and JSnsson, 1998;",
241
- "ref_id": "BIBREF15"
242
- },
243
- {
244
- "start": 325,
245
- "end": 359,
246
- "text": "Flycht-Eriksson and JSnsson, 1998;",
247
- "ref_id": "BIBREF10"
248
- },
249
- {
250
- "start": 360,
251
- "end": 376,
252
- "text": "Qvarfordt, 1998)",
253
- "ref_id": "BIBREF16"
254
- }
255
- ],
256
- "ref_spans": [],
257
- "eq_spans": [],
258
- "section": "Distillation guidelines",
259
- "sec_num": "4"
260
- },
261
- {
262
- "text": "The problem of modifying 'system' utterances can be divided into two parts: how to change and when to change. They are in some respects intertwined, but as the how-part affects the when-part more we will take this as a starting point.",
263
- "cite_spans": [],
264
- "ref_spans": [],
265
- "eq_spans": [],
266
- "section": "Modifying system utterances",
267
- "sec_num": "4.1"
268
- },
269
- {
270
- "text": "\u2022 The 'system' provides as much relevant information as possible at once. This depends on the capabilities of the systems output modalities. If we have a screen or similar output device we present as much as possible which normally is all relevant information. If we, on the other hand, only have spoken output the amount of information that the hearer can interpret in one utterance must be considered when 1The bus time table dialogues are collected at LinkSping University and are available (in Swedish) on http://www.ida.liu.se/~arnjo/kfb/dialoger.html distilling. The system might in such cases provide less information. The principle of providing all relevant information is based on the assumption that a computer system often has access to all relevant information when querying the background system and can also present it more conveniently, especially in a multimodal system (Ahrenberg et al., 1996) . A typical example is the dialogue fragment in figure 1. In this fragment the system provides information on what train to take and how to change to a bus. The result of distilling this fragment provides the revised fragment of figure 2. As seen in the fragment of figure 2 we also remove a number of utterances typical for human interaction, as discussed below.",
271
- "cite_spans": [
272
- {
273
- "start": 886,
274
- "end": 910,
275
- "text": "(Ahrenberg et al., 1996)",
276
- "ref_id": "BIBREF0"
277
- }
278
- ],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Modifying system utterances",
282
- "sec_num": "4.1"
283
- },
284
- {
285
- "text": "* System utterances are made more computer-like and do not include irrelevant information. The latter is seen in $9 in the dialogue in figure 3 where the provided information is not relevant. It could also be possible to remove $5 and respond with $7 at once. This, however, depends on if the information grounded in $5-U6 is needed for the 'system' in order to know the arrival time or if that could be concluded from U4. This in turn depends on the system's capabilities. If we assume that the dialogue system has a model of user tasks, the information in $5-U6 could have been concluded from that. We will, in this case, retain $5-U6 as we do not assume a user task model and in order to stay as close to the original dialogue as possible.",
286
- "cite_spans": [],
287
- "ref_spans": [
288
- {
289
- "start": 135,
290
- "end": 143,
291
- "text": "figure 3",
292
- "ref_id": null
293
- }
294
- ],
295
- "eq_spans": [],
296
- "section": "Modifying system utterances",
297
- "sec_num": "4.1"
298
- },
299
- {
300
- "text": "The next problem concerns the case when 'system' utterances are changed or removed.",
301
- "cite_spans": [],
302
- "ref_spans": [],
303
- "eq_spans": [],
304
- "section": "Modifying system utterances",
305
- "sec_num": "4.1"
306
- },
307
- {
308
- "text": "\u2022 Dialogue contributions provided by something or someone other than the user or the 'system' are removed. These are regarded as not being part of the interaction. This means that if someone interrupts the current interaction, say that the telephone rings during a face-to-face interaction, the interrupting interaction is normally removed from the corpus. Furthermore, 'system' interruptions are removed. A human can very well interrupt another human interlocuter, but a computer system will not do that. However, this guideline could lead to problems, for instance, when users follow up such interruptions. If no information is provided or the interrupted sequence does not affect the dialogue, we have no problems removing the interruption.",
309
- "cite_spans": [],
310
- "ref_spans": [],
311
- "eq_spans": [],
312
- "section": "Modifying system utterances",
313
- "sec_num": "4.1"
314
- },
315
- {
316
- "text": "The problem is what to do when information from the 'system' is used in the continuing dialogue. For such cases we have no fixed strategy, : no the bus does not run on sundays U6: how can you (.) can you take the train and then change some way (.) because (.) to MjSlby and so $7: you can take the train from LinkSping fourteen and twenty nine and then you will change at MjSlby station to bus six hundred sixty at fifteen and ten the dialogue needs to be rearranged depending on how the information is to be used (c.f. the discussion in the final section of this paper).",
317
- "cite_spans": [
318
- {
319
- "start": 256,
320
- "end": 259,
321
- "text": "(.)",
322
- "ref_id": null
323
- }
324
- ],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "Modifying system utterances",
328
- "sec_num": "4.1"
329
- },
330
- {
331
- "text": "\u2022 'System' utterances which are no longer valid are removed. Typical examples of this are the utterances $7, $9, $11 and $13 in the dialogue fragment of figure 1.",
332
- "cite_spans": [],
333
- "ref_spans": [],
334
- "eq_spans": [],
335
- "section": "Modifying system utterances",
336
- "sec_num": "4.1"
337
- },
338
- {
339
- "text": "* Remove sequences of utterances where the 'system' behaves in a way a computer would not do.",
340
- "cite_spans": [],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "Modifying system utterances",
344
- "sec_num": "4.1"
345
- },
346
- {
347
- "text": "For instance jokes, irony, humor, commenting on the other dialogue participant, or dropping the telephone (or whatever is going on in $7 in figure 4). A common case of this is when the 'system' is talking while looking for information, $5 in the dialogue fragment of figure 4 is an example of this. Related to this is when the system provides its own comments. If we can assume that it has such capabilities they are included, otherwise we remove them.",
348
- "cite_spans": [],
349
- "ref_spans": [],
350
- "eq_spans": [],
351
- "section": "Modifying system utterances",
352
- "sec_num": "4.1"
353
- },
354
- {
355
- "text": "The system does not repeat information that has already been provided unless explicitly asked to do so. In human interaction it is not uncommon to repeat what has been uttered for purposes other than to provide grounding information or feedback. This is for instance common during U4: 'n' I must be at Resecentrum before fourteen and thirty five (.) 'cause we will going to the interstate buses ja ska va p~ rececentrum innan ]jorton ~ trettifem (.) f5 vi ska till l~ngf~irdsbussarna $5: aha (.) 'n' then you must be there around twenty past two something then jaha (.) \u2022 The system does not ask for information it has already achieved. For instance asking again if it is on Sunday as in $9 in figure 1. This is not uncommon in human interaction and such utterances from the user are not removed. However, we can assume that the dialogue system does not forget what has been talked about before.",
356
- "cite_spans": [
357
- {
358
- "start": 566,
359
- "end": 569,
360
- "text": "(.)",
361
- "ref_id": null
362
- }
363
- ],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Modifying system utterances",
367
- "sec_num": "4.1"
368
- },
369
- {
370
- "text": "The general rule is to change user utterances as little as possible. The reason for this is that we do not want to develop systems where the user needs to restrict his/her behaviour to the capabilities of the dialogue system. However, there are certain changes made to user utterances, in most cases as a consequence of changes of system utterances.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Modifying user utterances",
375
- "sec_num": "4.2"
376
- },
377
- {
378
- "text": "Utterances that are no longer valid are removed.",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Modifying user utterances",
383
- "sec_num": "4.2"
384
- },
385
- {
386
- "text": "The most common cases are utterances whose request has already been answered, as seen in the distilled dialogue in figure 2 of the dialogue in figure 1.",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Modifying user utterances",
391
- "sec_num": "4.2"
392
- },
393
- {
394
- "text": "Sl1: sixteen fifty five sexton ]emti/em U12: sixteen fifty five (.) aha sexton femti/em (.) jaha S13: bus line four hundred thirty five linje ]yrahundra tretti/em Figure 5 : Dialogue fragment from a natural bus timetable interaction",
395
- "cite_spans": [],
396
- "ref_spans": [
397
- {
398
- "start": 163,
399
- "end": 171,
400
- "text": "Figure 5",
401
- "ref_id": null
402
- }
403
- ],
404
- "eq_spans": [],
405
- "section": "Modifying user utterances",
406
- "sec_num": "4.2"
407
- },
408
- {
409
- "text": "\u2022 Utterances are removed where the user discusses things that are in the environment. For instance commenting the 'systems' clothes or hair. This also includes other types of communicative signals such as laughter based on things outside the interaction, for instance, in the environment of the interlocuters.",
410
- "cite_spans": [],
411
- "ref_spans": [],
412
- "eq_spans": [],
413
- "section": "Modifying user utterances",
414
- "sec_num": "4.2"
415
- },
416
- {
417
- "text": "\u2022 User utterances can also be added in order to make the dialogue continue. In the dialogue in figure 5 there is nothing in the dialogue explaining why the system utters S13. In such cases we need to add a user utterance, e.g. Which bus is that?. However, it might turn out that there are cues, such as intonation, found when listening to the tapes. If such detailed analyses are carried out, we will, of course, not need to add utterances. Furthermore, it is sometimes the case that the telephone operator deliberately splits the information into chunks that can be comprehended by the user, which then must be considered in the distillation.",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "Modifying user utterances",
422
- "sec_num": "4.2"
423
- },
424
- {
425
- "text": "To illustrate the method we will in this section try to characterise the results from our distillations. The illustration is based on 39 distilled dialogues from the previously mentioned corpus collected with a telephone operator having information on local bus time-tables and persons calling the information service. The distillation took about three hours for all 39 dialogues, i.e. it is reasonably fast. The distilled dialogues are on the average 27% shorter. However, this varies between the dialogues, at most 73% was removed but there were also seven dialogues that were not changed at all.",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "Applying the method",
430
- "sec_num": "5"
431
- },
432
- {
433
- "text": "At the most 34 utterances where removed from one single dialogue and that was from a dialogue with discussions on where to find a parking lot, i.e. discussions outside the capabilities of the application. There was one more dialogue where more than 30 utterances were removed and that dialogue is a typical example of dialogues where distillation actually is very useful and also indicates what is normally removed from the dialogues. This particular dia-logue begins with the user asking for the telephone number to 'the Lost property office' for a specific bus operator. However, the operator starts a discussion on what bus the traveller traveled on before providing the requested telephone number. The reason for this discussion is probably that the operator knows that different bus companies are utilised and would like to make sure that the user really understands his/her request. The interaction that follows can, thus, in that respect be relevant, but for our purpose of developing systems based on an overall goal of providing information, not to understand human interaction, our dialogue system will not able to handle such phenomenon (JSnsson, 1996) .",
434
- "cite_spans": [
435
- {
436
- "start": 1148,
437
- "end": 1163,
438
- "text": "(JSnsson, 1996)",
439
- "ref_id": "BIBREF14"
440
- }
441
- ],
442
- "ref_spans": [],
443
- "eq_spans": [],
444
- "section": "Applying the method",
445
- "sec_num": "5"
446
- },
447
- {
448
- "text": "The dialogues can roughly be divided into five different categories based on the users task. The discussion in twenty five dialogues were on bus times between various places, often one departure and one arrival but five dialogues involved more places. In five dialogues the discussion was one price and various types of discounts. Five users wanted to know the telephone number to 'the Lost property office', two discussed only bus stops and two discussed how they could utilise their season ticket to travel outside the trafficking area of the bus company. It is interesting to note that there is no correspondence between the task being performed during the interaction and the amount of changes made to the dialogue. Thus, if we can assume that the amount of distillation indicates something about a user's interaction style, other factors than the task are important when characterising user behaviour.",
449
- "cite_spans": [],
450
- "ref_spans": [],
451
- "eq_spans": [],
452
- "section": "Applying the method",
453
- "sec_num": "5"
454
- },
455
- {
456
- "text": "Looking at what is altered we find that the most important distilling principle is that the 'system' provides all relevant information at once, c.f. figures 1 and 2. This in turn removes utterances provided by both 'system' and user.",
457
- "cite_spans": [],
458
- "ref_spans": [],
459
- "eq_spans": [],
460
- "section": "Applying the method",
461
- "sec_num": "5"
462
- },
463
- {
464
- "text": "Most added utterances, both from the user and the 'system', provide explicit requests for information that is later provided in the dialogue, e.g. utterance $3 in figure 6. We have added ten utterances in all 39 dialogues, five 'system' utterances and five user utterances. Note, however, that we utilised the transcribed dialogues, without information on intonation. We would probably not have needed to add this many utterances if we had utilised the tapes. Our reason for not using information on intonation is that we do not assume that our system's speech recogniser can recognise intonation.",
465
- "cite_spans": [],
466
- "ref_spans": [],
467
- "eq_spans": [],
468
- "section": "Applying the method",
469
- "sec_num": "5"
470
- },
471
- {
472
- "text": "Finally, as discussed above, we did not utilise the full potential of multi-modality when distilling the dialogues. For instance, some dialogues could be further distilled if we had assumed that the system had presented a time-table. One reason for this is that we wanted to capture as many interesting aspects intact as possible. The advantage is, thus, that we have a better corpus for understanding human- mm N~ir rill du \u00a3ka? U4: 'n' I must be at Resecentrum before fourteen and thirty five (.) 'cause we will going to the interstate buses ja ska va p~ rececentrum innan fjorton d trettifem (.) f5 vi ska till l~ngfiirdsbussarna Figure 6 : Distilled dialogue fragment with added utterance computer interaction and can from that corpus do a second distillation where we focus more on multimodal interaction.",
473
- "cite_spans": [],
474
- "ref_spans": [
475
- {
476
- "start": 633,
477
- "end": 641,
478
- "text": "Figure 6",
479
- "ref_id": null
480
- }
481
- ],
482
- "eq_spans": [],
483
- "section": "Applying the method",
484
- "sec_num": "5"
485
- },
486
- {
487
- "text": "We have been presenting a method for distilling human dialogues to make them resemble human computer interaction, in order to utilise such dialogues as a knowledge source when developing dialogue systems. Our own main purpose has been to use them for developing multimodal systems, however, as discussed above, we have in this paper rather assumed a speech-only system. But we believe that the basic approach can be used also for multi-modal systems and other kinds of natural language dialogue systems.",
488
- "cite_spans": [],
489
- "ref_spans": [],
490
- "eq_spans": [],
491
- "section": "Discussion",
492
- "sec_num": "6"
493
- },
494
- {
495
- "text": "It is important to be aware of the limitations of the method, and how 'realistic' the produced result will be, compared to a dialogue with the final system. Since we are changing the dialogue moves, by for instance providing all required information in one move, or never asking to be reminded of what the user has previously requested, it is obvious that what follows after the changed sequence would probably be affected one way or another. A consequence of this is that the resulting dialogue is less accurate as a model of the entire dialogue. It is therefore not an ideal candidate for trying out the systems over-all performance during system development. But for the smaller sub-segments or sub-dialogues, we believe that it creates a good approximation of what will take place once the system is up and running. Furthermore, we believe distilled dialogues in some respects to be more realistic than Wizard of Ozdialogues collected with a wizard acting as a computer.",
496
- "cite_spans": [],
497
- "ref_spans": [],
498
- "eq_spans": [],
499
- "section": "Discussion",
500
- "sec_num": "6"
501
- },
502
- {
503
- "text": "Another issue, that has been discussed previously in the description of the method, is that the distilling is made based on a particular view of what a dialogue with a computer will look like. While not necessarily being a detailed and specific model, it is at least an instance of a class of computer dialogue models.",
504
- "cite_spans": [],
505
- "ref_spans": [],
506
- "eq_spans": [],
507
- "section": "Discussion",
508
- "sec_num": "6"
509
- },
510
- {
511
- "text": "One example of this is whether the system is meant to acquire information on the user's underlying motivations or goals or not. In the examples presented, we have not assumed such capabilities, but this assumption is not an absolute necessity. We believe, however, that the distilling process should be based on one such model, not the least to ensure a consistent treatment of similar recurring phenomena at different places in the corpora.",
512
- "cite_spans": [],
513
- "ref_spans": [],
514
- "eq_spans": [],
515
- "section": "Discussion",
516
- "sec_num": "6"
517
- },
518
- {
519
- "text": "The validity of the results based on analysing distilled dialogues depends partly on how the distillation has been carried out. Even when using natural dialogues we can have situations where the interaction is somewhat mysterious, for instance, if some of the dialogue participants behaves irrational such as not providing feedback or being too elliptical. However, if careful considerations have been made to stay as close to the original dialogues as possible, we believe that distilled dialogues will reflect what a human would consider to be a natural interaction.",
520
- "cite_spans": [],
521
- "ref_spans": [],
522
- "eq_spans": [],
523
- "section": "Discussion",
524
- "sec_num": "6"
525
- }
526
- ],
527
- "back_matter": [
528
- {
529
- "text": "This work results from a number of projects on development of natural language interfaces supported by The Swedish Transport & Communications Research Board (KFB) and the joint Research Program for Language Technology (HSFR/NUTEK). We are indebted to the participants of the Swedish Dialogue Systems project, especially to Staffan Larsson, Lena Santamarta, and Annika Flycht-Eriksson for interesting discussions on this topic.",
530
- "cite_spans": [],
531
- "ref_spans": [],
532
- "eq_spans": [],
533
- "section": "Acknowledgments",
534
- "sec_num": null
535
- }
536
- ],
537
- "bib_entries": {
538
- "BIBREF0": {
539
- "ref_id": "b0",
540
- "title": "Customizing interaction for natural language interfaces. LinkSpin9 Electronic articles in Computer and Information Science",
541
- "authors": [
542
- {
543
- "first": "Lars",
544
- "middle": [],
545
- "last": "Ahrenberg",
546
- "suffix": ""
547
- },
548
- {
549
- "first": "Nils",
550
- "middle": [],
551
- "last": "Dahlb~ck",
552
- "suffix": ""
553
- },
554
- {
555
- "first": "Arne",
556
- "middle": [],
557
- "last": "Jsnsson",
558
- "suffix": ""
559
- },
560
- {
561
- "first": "",
562
- "middle": [],
563
- "last": "Thur~e",
564
- "suffix": ""
565
- }
566
- ],
567
- "year": 1993,
568
- "venue": "Notes from Workshop on Pragmatics in Dialogue, The XIV:th Scandinavian Conference of Linguistics and the VI-II:th Conference of Nordic and General Linguistics",
569
- "volume": "1",
570
- "issue": "",
571
- "pages": "",
572
- "other_ids": {},
573
- "num": null,
574
- "urls": [],
575
- "raw_text": "Lars Ahrenberg, Nils Dahlb~ck, Arne JSnsson, and /~ke Thur~e. 1996. Customizing interac- tion for natural language interfaces. LinkSpin9 Electronic articles in Computer and Informa- tion Science, also in Notes from Workshop on Pragmatics in Dialogue, The XIV:th Scandi- navian Conference of Linguistics and the VI- II:th Conference of Nordic and General Linguis- tics, GSteborg, Sweden, 1993, 1(1), October, 1. http : / / www.ep.liu.se / ea / cis /1996 / O01/.",
576
- "links": null
577
- },
578
- "BIBREF1": {
579
- "ref_id": "b1",
580
- "title": "Corpus design criteria. Literary and Linguistic Computing",
581
- "authors": [
582
- {
583
- "first": "Sue",
584
- "middle": [],
585
- "last": "Atkins",
586
- "suffix": ""
587
- },
588
- {
589
- "first": "Jeremy",
590
- "middle": [],
591
- "last": "Clear",
592
- "suffix": ""
593
- },
594
- {
595
- "first": "Nicholas",
596
- "middle": [],
597
- "last": "Ostler",
598
- "suffix": ""
599
- }
600
- ],
601
- "year": 1992,
602
- "venue": "",
603
- "volume": "7",
604
- "issue": "",
605
- "pages": "1--16",
606
- "other_ids": {},
607
- "num": null,
608
- "urls": [],
609
- "raw_text": "Sue Atkins, Jeremy Clear, and Nicholas Ostler. 1992. Corpus design criteria. Literary and Lin- guistic Computing, 7(1):1-16.",
610
- "links": null
611
- },
612
- "BIBREF2": {
613
- "ref_id": "b2",
614
- "title": "Representativeness in corpus design",
615
- "authors": [
616
- {
617
- "first": "Douglas",
618
- "middle": [],
619
- "last": "Biber",
620
- "suffix": ""
621
- }
622
- ],
623
- "year": 1993,
624
- "venue": "Literary and Linguistic Computing",
625
- "volume": "8",
626
- "issue": "4",
627
- "pages": "244--257",
628
- "other_ids": {},
629
- "num": null,
630
- "urls": [],
631
- "raw_text": "Douglas Biber. 1993. Representativeness in cor- pus design. Literary and Linguistic Computing, 8(4):244-257.",
632
- "links": null
633
- },
634
- "BIBREF3": {
635
- "ref_id": "b3",
636
- "title": "Assessing agreement on classification tasks: The kappa statistic. Computational Linguistics",
637
- "authors": [
638
- {
639
- "first": "Jean",
640
- "middle": [],
641
- "last": "Carletta",
642
- "suffix": ""
643
- }
644
- ],
645
- "year": 1996,
646
- "venue": "",
647
- "volume": "22",
648
- "issue": "",
649
- "pages": "249--254",
650
- "other_ids": {},
651
- "num": null,
652
- "urls": [],
653
- "raw_text": "Jean Carletta. 1996. Assessing agreement on classi- fication tasks: The kappa statistic. Computation- al Linguistics, 22(2):249-254.",
654
- "links": null
655
- },
656
- "BIBREF4": {
657
- "ref_id": "b4",
658
- "title": "Spoken corpus design. Literary and Linguistic Computing",
659
- "authors": [
660
- {
661
- "first": "Steve",
662
- "middle": [],
663
- "last": "Crowdy",
664
- "suffix": ""
665
- }
666
- ],
667
- "year": 1993,
668
- "venue": "",
669
- "volume": "8",
670
- "issue": "",
671
- "pages": "259--265",
672
- "other_ids": {},
673
- "num": null,
674
- "urls": [],
675
- "raw_text": "Steve Crowdy. 1993. Spoken corpus design. Literary and Linguistic Computing, 8(4):259-265.",
676
- "links": null
677
- },
678
- "BIBREF5": {
679
- "ref_id": "b5",
680
- "title": "Knowledge sources in spoken dialogue systems",
681
- "authors": [
682
- {
683
- "first": "Nils",
684
- "middle": [],
685
- "last": "Dahlb",
686
- "suffix": ""
687
- },
688
- {
689
- "first": "/",
690
- "middle": [],
691
- "last": "Ick",
692
- "suffix": ""
693
- },
694
- {
695
- "first": "Arne",
696
- "middle": [],
697
- "last": "Jsnsson",
698
- "suffix": ""
699
- }
700
- ],
701
- "year": 1999,
702
- "venue": "Proceedings of Eurospeech'99",
703
- "volume": "",
704
- "issue": "",
705
- "pages": "",
706
- "other_ids": {},
707
- "num": null,
708
- "urls": [],
709
- "raw_text": "Nils Dahlb/ick and Arne JSnsson. 1999. Knowledge sources in spoken dialogue systems. In Proceed- ings of Eurospeech'99, Budapest, Hungary.",
710
- "links": null
711
- },
712
- "BIBREF6": {
713
- "ref_id": "b6",
714
- "title": "Wizard of oz studies -why and how",
715
- "authors": [
716
- {
717
- "first": "Nils",
718
- "middle": [],
719
- "last": "Dahlb/Ick",
720
- "suffix": ""
721
- },
722
- {
723
- "first": "Arne",
724
- "middle": [],
725
- "last": "Jsnsson",
726
- "suffix": ""
727
- },
728
- {
729
- "first": "Lars",
730
- "middle": [],
731
- "last": "Ahrenberg",
732
- "suffix": ""
733
- }
734
- ],
735
- "year": 1998,
736
- "venue": "Readings in Intelligent User Interfaces",
737
- "volume": "",
738
- "issue": "",
739
- "pages": "",
740
- "other_ids": {},
741
- "num": null,
742
- "urls": [],
743
- "raw_text": "Nils Dahlb/ick, Arne JSnsson, and Lars Ahrenberg. 1998. Wizard of oz studies -why and how. In Mark Maybury & Wolfgang Wahlster, editor, Readings in Intelligent User Interfaces. Morgan Kaufmann.",
744
- "links": null
745
- },
746
- "BIBREF7": {
747
- "ref_id": "b7",
748
- "title": "An architecture for multi-modal natural dialogue systems",
749
- "authors": [
750
- {
751
- "first": "Ntis",
752
- "middle": [],
753
- "last": "Dahlb",
754
- "suffix": ""
755
- },
756
- {
757
- "first": "/",
758
- "middle": [],
759
- "last": "Ick",
760
- "suffix": ""
761
- },
762
- {
763
- "first": "Annika",
764
- "middle": [],
765
- "last": "Flycht-Eriksson",
766
- "suffix": ""
767
- },
768
- {
769
- "first": "Arne",
770
- "middle": [],
771
- "last": "Jsnsson",
772
- "suffix": ""
773
- },
774
- {
775
- "first": "Pernilla",
776
- "middle": [],
777
- "last": "Qvarfordt",
778
- "suffix": ""
779
- }
780
- ],
781
- "year": 1999,
782
- "venue": "Proceedings of ESCA Tutorial and Research Workshop (ETRW) on Interactive Dialogue in Multi-Modal Systems",
783
- "volume": "",
784
- "issue": "",
785
- "pages": "",
786
- "other_ids": {},
787
- "num": null,
788
- "urls": [],
789
- "raw_text": "Ntis Dahlb/ick, Annika Flycht-Eriksson, Arne JSnsson, and Pernilla Qvarfordt. 1999. An ar- chitecture for multi-modal natural dialogue sys- tems. In Proceedings of ESCA Tutorial and Re- search Workshop (ETRW) on Interactive Dialogue in Multi-Modal Systems, Germany.",
790
- "links": null
791
- },
792
- "BIBREF8": {
793
- "ref_id": "b8",
794
- "title": "Nils Dahlb/ick. 1991. Representations of Discourse, Cognitive and Computational Aspects",
795
- "authors": [],
796
- "year": null,
797
- "venue": "",
798
- "volume": "",
799
- "issue": "",
800
- "pages": "",
801
- "other_ids": {},
802
- "num": null,
803
- "urls": [],
804
- "raw_text": "Nils Dahlb/ick. 1991. Representations of Discourse, Cognitive and Computational Aspects. Ph.D. the- sis, LinkSping University.",
805
- "links": null
806
- },
807
- "BIBREF9": {
808
- "ref_id": "b9",
809
- "title": "Data collection and processing in the carnegie mellon communicator",
810
- "authors": [
811
- {
812
- "first": "Maxine",
813
- "middle": [],
814
- "last": "Eskenazi",
815
- "suffix": ""
816
- },
817
- {
818
- "first": "Alexander",
819
- "middle": [],
820
- "last": "Rudnicki",
821
- "suffix": ""
822
- },
823
- {
824
- "first": "Karin",
825
- "middle": [],
826
- "last": "Gregory",
827
- "suffix": ""
828
- },
829
- {
830
- "first": "Paul",
831
- "middle": [],
832
- "last": "Constantinides",
833
- "suffix": ""
834
- },
835
- {
836
- "first": "Robert",
837
- "middle": [],
838
- "last": "Brennan",
839
- "suffix": ""
840
- },
841
- {
842
- "first": "Christina",
843
- "middle": [],
844
- "last": "Bennett",
845
- "suffix": ""
846
- },
847
- {
848
- "first": "Jwan",
849
- "middle": [],
850
- "last": "Allen",
851
- "suffix": ""
852
- }
853
- ],
854
- "year": 1999,
855
- "venue": "Proceedings of Eurospeech'99",
856
- "volume": "",
857
- "issue": "",
858
- "pages": "",
859
- "other_ids": {},
860
- "num": null,
861
- "urls": [],
862
- "raw_text": "Maxine Eskenazi, Alexander Rudnicki, Karin Grego- ry, Paul Constantinides, Robert Brennan, Christi- na Bennett, and Jwan Allen. 1999. Data collec- tion and processing in the carnegie mellon com- municator. In Proceedings of Eurospeech'99, Bu- dapest, Hungary.",
863
- "links": null
864
- },
865
- "BIBREF10": {
866
- "ref_id": "b10",
867
- "title": "A spoken dialogue system utilizing spatial information",
868
- "authors": [
869
- {
870
- "first": "Annika",
871
- "middle": [],
872
- "last": "Flycht-Eriksson",
873
- "suffix": ""
874
- },
875
- {
876
- "first": "Arne",
877
- "middle": [],
878
- "last": "Jsnsson",
879
- "suffix": ""
880
- }
881
- ],
882
- "year": 1998,
883
- "venue": "Proceedings of ICSLP'98",
884
- "volume": "",
885
- "issue": "",
886
- "pages": "",
887
- "other_ids": {},
888
- "num": null,
889
- "urls": [],
890
- "raw_text": "Annika Flycht-Eriksson and Arne JSnsson. 1998. A spoken dialogue system utilizing spatial informa- tion. In Proceedings of ICSLP'98, Sydney, Aus- tralia.",
891
- "links": null
892
- },
893
- "BIBREF11": {
894
- "ref_id": "b11",
895
- "title": "A survey of knowledge sources in dialogue systems",
896
- "authors": [
897
- {
898
- "first": "Annika",
899
- "middle": [],
900
- "last": "Flycht-Eriksson",
901
- "suffix": ""
902
- }
903
- ],
904
- "year": 1999,
905
- "venue": "Proceedings of lJCAI-99 Workshop on Knowledge and Reasoning in Practical Dialogue Systems",
906
- "volume": "",
907
- "issue": "",
908
- "pages": "",
909
- "other_ids": {},
910
- "num": null,
911
- "urls": [],
912
- "raw_text": "Annika Flycht-Eriksson. 1999. A survey of knowl- edge sources in dialogue systems. In Proceedings of lJCAI-99 Workshop on Knowledge and Reason- ing in Practical Dialogue Systems, August, Stock- holm.",
913
- "links": null
914
- },
915
- "BIBREF12": {
916
- "ref_id": "b12",
917
- "title": "Corpus Annotation. Longman",
918
- "authors": [
919
- {
920
- "first": "Roger",
921
- "middle": [],
922
- "last": "Garside",
923
- "suffix": ""
924
- },
925
- {
926
- "first": "Geoffrey",
927
- "middle": [],
928
- "last": "Leech",
929
- "suffix": ""
930
- },
931
- {
932
- "first": "Anthony",
933
- "middle": [],
934
- "last": "Meenery",
935
- "suffix": ""
936
- }
937
- ],
938
- "year": 1997,
939
- "venue": "",
940
- "volume": "",
941
- "issue": "",
942
- "pages": "",
943
- "other_ids": {},
944
- "num": null,
945
- "urls": [],
946
- "raw_text": "Roger Garside, Geoffrey Leech, and Anthony MeEnery. 1997. Corpus Annotation. Longman.",
947
- "links": null
948
- },
949
- "BIBREF13": {
950
- "ref_id": "b13",
951
- "title": "Arne JSnsson and Nils Dahlb/ick. 1988. Talking to a computer is not like talking to your best friend",
952
- "authors": [],
953
- "year": null,
954
- "venue": "Proceedings of the First Scandinavian Conference on Artificial InterUigence",
955
- "volume": "",
956
- "issue": "",
957
- "pages": "",
958
- "other_ids": {},
959
- "num": null,
960
- "urls": [],
961
- "raw_text": "Arne JSnsson and Nils Dahlb/ick. 1988. Talking to a computer is not like talking to your best friend. In Proceedings of the First Scandinavian Conference on Artificial InterUigence, Tvoms\u00a2.",
962
- "links": null
963
- },
964
- "BIBREF14": {
965
- "ref_id": "b14",
966
- "title": "Natural language generation without intentions",
967
- "authors": [
968
- {
969
- "first": "Arne",
970
- "middle": [],
971
- "last": "Jsnsson",
972
- "suffix": ""
973
- }
974
- ],
975
- "year": 1996,
976
- "venue": "Proceedings of ECAI'96 Workshop Gaps and Bridges: New Directions in Planning and Natural Language Generation",
977
- "volume": "",
978
- "issue": "",
979
- "pages": "102--104",
980
- "other_ids": {},
981
- "num": null,
982
- "urls": [],
983
- "raw_text": "Arne JSnsson. 1996. Natural language generation without intentions. In Proceedings of ECAI'96 Workshop Gaps and Bridges: New Directions in Planning and Natural Language Generation, pages 102-104.",
984
- "links": null
985
- },
986
- "BIBREF15": {
987
- "ref_id": "b15",
988
- "title": "Effects of using speech in timetable information systems for www",
989
- "authors": [
990
- {
991
- "first": "Pernilla",
992
- "middle": [],
993
- "last": "Qvarfordt",
994
- "suffix": ""
995
- },
996
- {
997
- "first": "Arne",
998
- "middle": [],
999
- "last": "Jsnsson",
1000
- "suffix": ""
1001
- }
1002
- ],
1003
- "year": 1998,
1004
- "venue": "Proceedings of ICSLP'98",
1005
- "volume": "",
1006
- "issue": "",
1007
- "pages": "",
1008
- "other_ids": {},
1009
- "num": null,
1010
- "urls": [],
1011
- "raw_text": "Pernilla Qvarfordt and Arne JSnsson. 1998. Effects of using speech in timetable information systems for www. In Proceedings of ICSLP'98, Sydney, Australia.",
1012
- "links": null
1013
- },
1014
- "BIBREF16": {
1015
- "ref_id": "b16",
1016
- "title": "Usability of multimodal timetables: Effects of different levels of domain knowledge on usability",
1017
- "authors": [
1018
- {
1019
- "first": "Pernilla",
1020
- "middle": [],
1021
- "last": "Qvarfordt",
1022
- "suffix": ""
1023
- }
1024
- ],
1025
- "year": 1998,
1026
- "venue": "",
1027
- "volume": "",
1028
- "issue": "",
1029
- "pages": "",
1030
- "other_ids": {},
1031
- "num": null,
1032
- "urls": [],
1033
- "raw_text": "Pernilla Qvarfordt. 1998. Usability of multimodal timetables: Effects of different levels of do- main knowledge on usability. Master's thesis, LinkSping University.",
1034
- "links": null
1035
- },
1036
- "BIBREF17": {
1037
- "ref_id": "b17",
1038
- "title": "Evaluating Natural Language Processing Systems",
1039
- "authors": [
1040
- {
1041
- "first": "Karen Sparck Jones",
1042
- "middle": [],
1043
- "last": "",
1044
- "suffix": ""
1045
- },
1046
- {
1047
- "first": "Julia",
1048
- "middle": [
1049
- "R"
1050
- ],
1051
- "last": "Galliers",
1052
- "suffix": ""
1053
- }
1054
- ],
1055
- "year": 1996,
1056
- "venue": "",
1057
- "volume": "",
1058
- "issue": "",
1059
- "pages": "",
1060
- "other_ids": {},
1061
- "num": null,
1062
- "urls": [],
1063
- "raw_text": "Karen Sparck Jones and Julia R. Galliers. 1996. Evaluating Natural Language Processing Systems. Springer Verlag.",
1064
- "links": null
1065
- },
1066
- "BIBREF18": {
1067
- "ref_id": "b18",
1068
- "title": "Paradise: A framework for evaluating spoken dialogue agents",
1069
- "authors": [
1070
- {
1071
- "first": "Marilyn",
1072
- "middle": [
1073
- "A"
1074
- ],
1075
- "last": "Walker",
1076
- "suffix": ""
1077
- },
1078
- {
1079
- "first": "Diane",
1080
- "middle": [
1081
- "J"
1082
- ],
1083
- "last": "Litman",
1084
- "suffix": ""
1085
- },
1086
- {
1087
- "first": "Candace",
1088
- "middle": [
1089
- "A"
1090
- ],
1091
- "last": "Kamm",
1092
- "suffix": ""
1093
- },
1094
- {
1095
- "first": "Alicia",
1096
- "middle": [],
1097
- "last": "Abella",
1098
- "suffix": ""
1099
- }
1100
- ],
1101
- "year": 1998,
1102
- "venue": "Readings in Intelligent User Interfaces",
1103
- "volume": "",
1104
- "issue": "",
1105
- "pages": "",
1106
- "other_ids": {},
1107
- "num": null,
1108
- "urls": [],
1109
- "raw_text": "Marilyn A. Walker, Diane J. Litman, Candace A. Kamm, and Alicia Abella. 1998. Paradise: A framework for evaluating spoken dialogue agents. In Mark Maybury & Wolfgang Wahlster, editor, Readings in Intelligent User Interfaces. Morgan Kaufmann.",
1110
- "links": null
1111
- }
1112
- },
1113
- "ref_entries": {
1114
- "FIGREF0": {
1115
- "num": null,
1116
- "uris": null,
1117
- "text": "Dialogue fragment from a real interaction on bus time-table information U4: I wonder if you have any buses or (.) like express buses going from LinkSping to Vadstena (.) on sunday S5",
1118
- "type_str": "figure"
1119
- },
1120
- "FIGREF1": {
1121
- "num": null,
1122
- "uris": null,
1123
- "text": "A distilled version of the dialogue infigure 1",
1124
- "type_str": "figure"
1125
- },
1126
- "FIGREF2": {
1127
- "num": null,
1128
- "uris": null,
1129
- "text": "Dialogue fragment from a natural bus timetable interaction search procedures as discussed above.",
1130
- "type_str": "figure"
1131
- },
1132
- "TABREF0": {
1133
- "num": null,
1134
- "html": null,
1135
- "text": "if you have any mm buses or (.) like express buses leaving from LinkSping to Vadstena (.) on sunday ja ville undra om ni hade ndgra 5h bussar eUer (.) typ expressbussar sore dkte frdn LinkSping till Vadstena (.) pd sSnda",
1136
- "content": "<table><tr><td>U4: $5:</td><td>yes I wonder no the bus does not run on sundays</td></tr><tr><td/><td>nej bussen g~r inte pd sSndagar</td></tr><tr><td>U6:</td><td>how can you (.) can you take the train and then change some way (.) because (.)</td></tr><tr><td/><td>to MjSlby 'n' so</td></tr><tr><td/><td>hur kan man (.) kan man ta tdg d sen byta p~ ndtt sStt (.) fSr de (.)</td></tr><tr><td/><td>till mjSlby ~ sd</td></tr><tr><td>$7:</td><td>that you can do too yes</td></tr><tr><td/><td>de kan du gSra ocksd ja</td></tr><tr><td>U8:</td><td>how (.) do you have any such suggestions</td></tr><tr><td/><td>hut (.) har du n~ra n~gra s~na fSrslag</td></tr><tr><td>$9:</td><td>yes let's see (4s) a moment (15s) now let us see here (.) was it on the sunday you should travel</td></tr><tr><td/><td>ja ska se h~ir (4s) eft 5gonblick (15s) nu ska vise hSr (.) va de p~ sSndagen du skulle dka pd</td></tr><tr><td>U10:</td><td>yes right afternoon preferably</td></tr><tr><td/><td>ja just de eftermidda ggirna</td></tr><tr><td>$11:</td><td>afternoon preferable (.) you have train from LinkSping fourteen twenty nine</td></tr><tr><td/><td>eftermidda gSrna (.) du hat t~g frdn LinkSping fjorton d tjugonie</td></tr><tr><td>U12:</td><td>mm</td></tr><tr><td/><td>mm</td></tr><tr><td>S13:</td><td>and then you will change from MjSlby station six hundred sixty</td></tr><tr><td/><td>sd byter du frdn MjSlby station sexhundrasexti</td></tr><tr><td>U14:</td><td>sixhundred sixty</td></tr><tr><td/><td>sexhundrasexti</td></tr><tr><td>$15:</td><td>fifteen and ten</td></tr><tr><td/><td>femton ~ tie</td></tr></table>",
1137
- "type_str": "table"
1138
- },
1139
- "TABREF1": {
1140
- "num": null,
1141
- "html": null,
1142
- "text": "~ dd behhver du va here strax e~ter tjuge 5vet tvd n~nting d~ It is bus two hundred ten which runs on old tannefors road that you have to take and get off at the bus stop at that bus stop named vetegatan",
1143
- "content": "<table><tr><td colspan=\"2\">U6: yes around that</td></tr><tr><td/><td>ja ungefgir</td></tr><tr><td>$7:</td><td>let's see here (lls) two hundred and fourteen Ryd end station leaves forty six (.) thirteen 'n'</td></tr><tr><td/><td>forty six then you will be down fourteen oh seven (.)</td></tr><tr><td/><td>d~ ska vise hSr (11s) tv~hundrafjorton Ryd 5ndh~llplatsen gdr ~5rtisex (.) tretton d</td></tr><tr><td/><td>]Srtisex d~ dr du nere ~jorton noll sju 5)</td></tr><tr><td colspan=\"2\">U8: aha</td></tr><tr><td/><td>jaha</td></tr><tr><td>$9:</td><td>'n' (.) the next one takes you there (.) fourteen thirty seven (.) but that is too late</td></tr><tr><td/><td>(.) ndsta dr du nere 5) ~jorton d trettisju (.) men de 5 ju ~Sr sent</td></tr><tr><td/><td>Figure 3: Dialogue fragment from a real interaction on bus time-table information</td></tr><tr><td colspan=\"2\">U2: Well, hi (.) I am going to Ugglegatan eighth</td></tr><tr><td/><td>ja hej (.) ja ska till Ugglegatan dtta</td></tr><tr><td colspan=\"2\">$3: Yes</td></tr><tr><td/><td>ja</td></tr><tr><td colspan=\"2\">U4: and (.) I wonder (.) it is somewhere in Tannefors</td></tr><tr><td/><td>och (.) jag undrar (.) det ligger ndnstans i Tannefors</td></tr><tr><td colspan=\"2\">U6: Oh Yeah</td></tr><tr><td/><td>jar~</td></tr><tr><td>$7:</td><td>(operator disconnects) (25s) mm (.) okey (hs) what the hell (2s)</td></tr><tr><td/><td>(operator connects again) hello yes</td></tr><tr><td/><td>((Telefonisten kopplar ur sig)) (25s) iihh (.) okey (hs) de va sore ]aan (2s)</td></tr><tr><td/><td>((Telefonisten kopplar in sig igen)) halld ja</td></tr><tr><td colspan=\"2\">U8: Yes hello</td></tr><tr><td/><td>ja hej</td></tr><tr><td colspan=\"2\">$9: det ~i buss tv~hundratio sore g~r gamla tanne~orsvSgen som du ~r ~ka ~ g~ av rid</td></tr><tr><td/><td>den hdllplatsen rid den hdllplatsen sore heter vetegatan.</td></tr></table>",
1144
- "type_str": "table"
1145
- },
1146
- "TABREF2": {
1147
- "num": null,
1148
- "html": null,
1149
- "text": "U2: Yees hi Anna Nilsson is my name and I would like to take the bus from Ryd center to Resecentrum in LinkSping jaa hej Anna Nilsson heter jag och jag rill ~ka buss ~r~n Ryds centrum till resecentrum i LinkSping. $3: mm When do you want to leave?",
1150
- "content": "<table/>",
1151
- "type_str": "table"
1152
- }
1153
- }
1154
- }
1155
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1008.json DELETED
@@ -1,1403 +0,0 @@
1
- {
2
- "paper_id": "A00-1008",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:54.738990Z"
6
- },
7
- "title": "Plan-Based Dialogue Management in a Physics Tutor",
8
- "authors": [
9
- {
10
- "first": "Reva",
11
- "middle": [],
12
- "last": "Freedman",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Pittsburgh Pittsburgh",
17
- "location": {
18
- "postCode": "15260",
19
- "region": "PA"
20
- }
21
- },
22
- "email": "freedrk@pitt@edu"
23
- }
24
- ],
25
- "year": "",
26
- "venue": null,
27
- "identifiers": {},
28
- "abstract": "This paper describes an application of APE (the Atlas Planning Engine), an integrated planning and execution system at the heart of the Atlas dialogue management system. APE controls a mixedinitiative dialogue between a human user and a host system, where turns in the 'conversation' may include graphical actions and/or written text. APE has full unification and can handle arbitrarily nested discourse constructs, making it more powerful than dialogue managers based on finitestate machines. We illustrate this work by describing Atlas-Andes, an intelligent tutoring system built using APE with the Andes physics tutor as the host.",
29
- "pdf_parse": {
30
- "paper_id": "A00-1008",
31
- "_pdf_hash": "",
32
- "abstract": [
33
- {
34
- "text": "This paper describes an application of APE (the Atlas Planning Engine), an integrated planning and execution system at the heart of the Atlas dialogue management system. APE controls a mixedinitiative dialogue between a human user and a host system, where turns in the 'conversation' may include graphical actions and/or written text. APE has full unification and can handle arbitrarily nested discourse constructs, making it more powerful than dialogue managers based on finitestate machines. We illustrate this work by describing Atlas-Andes, an intelligent tutoring system built using APE with the Andes physics tutor as the host.",
35
- "cite_spans": [],
36
- "ref_spans": [],
37
- "eq_spans": [],
38
- "section": "Abstract",
39
- "sec_num": null
40
- }
41
- ],
42
- "body_text": [
43
- {
44
- "text": "The purpose of the Atlas project is to enlarge the scope of student interaction in an intelligent tutoring system (ITS) to include coherent conversational sequences, including both written text and GUI actions. A key component of Atlas is APE, the Atlas Planning Engine, a \"just-intime\" planner specialized for easy construction and quick generation of hierarchically organized dialogues. APE is a domain-and task-independent system. Although to date we have used APE as a dialogue manager for intelligent tutoring systems, APE could also be used to manage other types of human-computer conversation, such as an advicegiving system or an interactive help system.",
45
- "cite_spans": [],
46
- "ref_spans": [],
47
- "eq_spans": [],
48
- "section": "Introduction",
49
- "sec_num": "1"
50
- },
51
- {
52
- "text": "Planning is an essential component of a dialogue-based ITS. Although there are many reasons for using natural language in an ITS, as soon as the student gives an unexpected response to a tutor question, the tutor needs to be able to plan in order to achieve its goals as well as respond appropriately to the student's statement. Yet classical planning is inappropriate for dialogue generation precisely because it assumes an unchanging world. A more appropriate approach is the \"practical reason\" approach pioneered by Bratman (1987 Bratman ( , 1990 . According to Bratman, human beings maintain plans and prefer to follow them, but they are also capable of changing the plans on the fly when needed. Bratman's approach has been introduced into computer science under the name of reactive planning Ingrand 1989, Wilkins et al. 1995) .",
53
- "cite_spans": [
54
- {
55
- "start": 519,
56
- "end": 532,
57
- "text": "Bratman (1987",
58
- "ref_id": "BIBREF0"
59
- },
60
- {
61
- "start": 533,
62
- "end": 549,
63
- "text": "Bratman ( , 1990",
64
- "ref_id": "BIBREF1"
65
- },
66
- {
67
- "start": 798,
68
- "end": 832,
69
- "text": "Ingrand 1989, Wilkins et al. 1995)",
70
- "ref_id": null
71
- }
72
- ],
73
- "ref_spans": [],
74
- "eq_spans": [],
75
- "section": "Introduction",
76
- "sec_num": "1"
77
- },
78
- {
79
- "text": "In this paper we discuss the rationale for the use of reactive planning as well as the use of the hierarchical task network (HTN) style of plan operators. Then we describe APE (the Atlas Planning Engine), a dialogue planner we have implemented to embody the above concepts. We demonstrate the use of APE by showing how we have used it to add a dialogue capability to an existing ITS, the Andes physics tutor. By showing dialogues that Atlas-Andes can generate, we demonstrate the advantages of this architecture over the finite-state machine approach to dialogue management.",
80
- "cite_spans": [],
81
- "ref_spans": [],
82
- "eq_spans": [],
83
- "section": "Introduction",
84
- "sec_num": "1"
85
- },
86
- {
87
- "text": "For an ITS, planning is required in order to ensure a coherent conversation as well as to accomplish tutorial goals. But it is impossible to plan a whole conversation in advance when the student can respond freely at every turn, just as human beings cannot plan their daily lives in advance because of possible changes in conditions. Classical planning algorithms are inappropriate because the tutor must be able to change plans based on the student's responses. For this reason we have adopted the ideas of the philosopher Michael Bratman (1987 Bratman ( , 1990 ). Bratman uses the term \"practical reason\" to describe his analysis since he is concerned with how to reason about practical matters. For human beings, planning is required in order to accomplish one's goals. Bratman's key insight is that human beings tend to follow a plan once they have one, although they are capable of dropping an intention or changing a partial plan when necessary. In other words, human beings do not decide what to do from scratch at each turn.",
88
- "cite_spans": [
89
- {
90
- "start": 532,
91
- "end": 545,
92
- "text": "Bratman (1987",
93
- "ref_id": "BIBREF0"
94
- },
95
- {
96
- "start": 546,
97
- "end": 562,
98
- "text": "Bratman ( , 1990",
99
- "ref_id": "BIBREF1"
100
- }
101
- ],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "'Practical reason' and the BDI model",
105
- "sec_num": "2.1"
106
- },
107
- {
108
- "text": "Bratman and others who have adopted his approach use a tripartite mental model that includes beliefs, desires and intentions (Bratman, Israel and Pollack 1988 , Pollack 1992 , Georgeff et al. 1998 , hence the name \"BDI model.\" Beliefs, which are uninstantiated plans in the speaker's head, are reified by the plan library. Desires are expressed as the agent's goals. Intentions, or plan steps that the agent has committed to but not yet acted on, are stored in an agenda. Thus the agent's partial plan for achieving a goal is a network of intentions. A plan can be left in a partially expanded state until it is necessary to refine it further.",
109
- "cite_spans": [
110
- {
111
- "start": 125,
112
- "end": 158,
113
- "text": "(Bratman, Israel and Pollack 1988",
114
- "ref_id": "BIBREF3"
115
- },
116
- {
117
- "start": 159,
118
- "end": 173,
119
- "text": ", Pollack 1992",
120
- "ref_id": "BIBREF14"
121
- },
122
- {
123
- "start": 174,
124
- "end": 196,
125
- "text": ", Georgeff et al. 1998",
126
- "ref_id": "BIBREF10"
127
- }
128
- ],
129
- "ref_spans": [],
130
- "eq_spans": [],
131
- "section": "'Practical reason' and the BDI model",
132
- "sec_num": "2.1"
133
- },
134
- {
135
- "text": "Bratman's approach has been elaborated in a computer science context by subsequent researchers (Bratman, Israel and Pollack 1988 , Pollack 1992 , Georgeff et al. 1998 . Reactive planning Ingrand 1989, Wilkins et al. 1995) , originally known as \"integrated planning and execution,\" is one way of implementing Bratman's model. Originally developed for real-time control of the space shuttle, reactive planning has since been used in a variety of other domains. For the Atlas project we have developed a reactive planner called APE (Atlas Planning Engine) which uses these ideas to conduct a conversation. After each student response, the planner can choose to continue with its previous intention or change something in the plan to respond better to the student's utterance.",
136
- "cite_spans": [
137
- {
138
- "start": 95,
139
- "end": 128,
140
- "text": "(Bratman, Israel and Pollack 1988",
141
- "ref_id": "BIBREF3"
142
- },
143
- {
144
- "start": 129,
145
- "end": 143,
146
- "text": ", Pollack 1992",
147
- "ref_id": "BIBREF14"
148
- },
149
- {
150
- "start": 144,
151
- "end": 166,
152
- "text": ", Georgeff et al. 1998",
153
- "ref_id": "BIBREF10"
154
- },
155
- {
156
- "start": 187,
157
- "end": 221,
158
- "text": "Ingrand 1989, Wilkins et al. 1995)",
159
- "ref_id": null
160
- }
161
- ],
162
- "ref_spans": [],
163
- "eq_spans": [],
164
- "section": "Implementation via reactive planning",
165
- "sec_num": "2.2"
166
- },
167
- {
168
- "text": "Like most reactive planners, APE is a hierarchical task network (HTN) style planner (Yang 1990, Erol, Hendler and Nau 1994) . Hierarchical decomposition asserts that each goal can be achieved via a series of subgoals instead of relying on means-end reasoning. Hierarchical decomposition is more appropriate to dialogue generation for a number of reasons. First, decomposition is better suited to the type of largescale dialogue planning required in a real-world tutoring system, as it is easier to establish what a human speaker will say in a given situation than to be able to understand why in sufficient detail and generality to do means-end planning. Second, Hierarchical decomposition minimizes search time. Third, our dialogues are task-oriented and have a hierarchical structure (Grosz and Sidner 1986) . In such a case, matching the structure of the domain simplifies operator development because they can often be derived from transcripts of human tutoring sessions. The hierarchy information is also useful in determining appropriate referring expressions. Fourth, interleaved planning and execution is important for dialogue generation because we cannot predict the human user's future utterances. In an HTN-based system, it is straightforward to implement interleaved planning and execution because one only needs to expand the portion of the plan that is about to be executed. Finally, the conversation is in a certain sense the trace of the plan. In other words, we care much more about the actions generated by the planner than the states involved, whether implicitly or explicitly specified. Hierarchical decomposition provides this trace naturally.",
169
- "cite_spans": [
170
- {
171
- "start": 84,
172
- "end": 113,
173
- "text": "(Yang 1990, Erol, Hendler and",
174
- "ref_id": null
175
- },
176
- {
177
- "start": 114,
178
- "end": 123,
179
- "text": "Nau 1994)",
180
- "ref_id": "BIBREF6"
181
- },
182
- {
183
- "start": 786,
184
- "end": 809,
185
- "text": "(Grosz and Sidner 1986)",
186
- "ref_id": "BIBREF11"
187
- }
188
- ],
189
- "ref_spans": [],
190
- "eq_spans": [],
191
- "section": "Implementation via reactive planning",
192
- "sec_num": "2.2"
193
- },
194
- {
195
- "text": "Andes (Gertner, Conati and VanLehn 1998) is an intelligent tutoring system in the domain of firstyear college physics. Andes teaches via coached problem solving (VanLehn 1996) . In coached problem solving, the tutoring system tracks the student as the latter attempts to solve a problem. If the student gets stuck or deviates too far from a correct solution path, the tutoring system provides hints and other assistance.",
196
- "cite_spans": [
197
- {
198
- "start": 6,
199
- "end": 40,
200
- "text": "(Gertner, Conati and VanLehn 1998)",
201
- "ref_id": "BIBREF8"
202
- },
203
- {
204
- "start": 161,
205
- "end": 175,
206
- "text": "(VanLehn 1996)",
207
- "ref_id": "BIBREF17"
208
- }
209
- ],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "Background: the Andes physics tutor",
213
- "sec_num": null
214
- },
215
- {
216
- "text": "A sample Andes problem is shown in midsolution in Figure 1 . A physics problem is given in the upper-left corner with a picture below it. Next to the picture the student has begun to sketch the vectors involved using the GUI buttons along the left-hand edge of the screen. As the student draws vectors, Andes and the student cooperatively fill in the variable definitions in the upper-right corner. Later the student will use the space below to write equations connecting the variables.",
217
- "cite_spans": [],
218
- "ref_spans": [
219
- {
220
- "start": 50,
221
- "end": 58,
222
- "text": "Figure 1",
223
- "ref_id": null
224
- }
225
- ],
226
- "eq_spans": [],
227
- "section": "Background: the Andes physics tutor",
228
- "sec_num": null
229
- },
230
- {
231
- "text": "In this example, the elevator is decelerating, so the acceleration vector should face the opposite direction from the velocity vector. (If the acceleration vector went the same direction as the velocity vector, the speed of the elevator would increase and it would crash into the ground.) This is an important issue in beginning physics; it occurs in five Andes problems.",
232
- "cite_spans": [],
233
- "ref_spans": [],
234
- "eq_spans": [],
235
- "section": "Background: the Andes physics tutor",
236
- "sec_num": null
237
- },
238
- {
239
- "text": "When such errors occur, Andes turns the incorrect item red and provides hints to students in the lower-left corner of the screen. A sample of these hints, shown in the order a student would encounter them, is shown in Fig. 2 . But hints are an output-only form of natural language; the student can't take the initiative or ask a question. In addition, there is no way for the system to ask the student a question or lead the student through a multi-step directed line of reasoning. Thus there is no way to use some of the effective rhetorical methods used by skilled human tutors, such as analogy and reductio ad absurdum. Current psychological research suggests that active methods, where students have to answer questions, will improve the performance of tutoring systems.",
240
- "cite_spans": [],
241
- "ref_spans": [
242
- {
243
- "start": 218,
244
- "end": 224,
245
- "text": "Fig. 2",
246
- "ref_id": null
247
- }
248
- ],
249
- "eq_spans": [],
250
- "section": "Background: the Andes physics tutor",
251
- "sec_num": null
252
- },
253
- {
254
- "text": "Figure3 shows a sample plan operator. For legibility, the key elements have been rendered in English instead of in Lisp. The hiercx slot provides a way for the planner to be aware of the context in which a decomposition is proposed. Items in the hiercx slot are instantiated and added to the transient database only so long as the operator which spawned them is in the agenda.",
255
- "cite_spans": [],
256
- "ref_spans": [],
257
- "eq_spans": [],
258
- "section": "Structure of the Atlas Planning Engine",
259
- "sec_num": null
260
- },
261
- {
262
- "text": "To initiate a planning session, the user invokes the planner with an initial goal. The system searches the operator library to find all operators whose goal field matches the next goal on the agenda and whose filter conditions and precon- Since APE is intended especially for generation of hierarchically organized taskoriented discourse, each operator has a multi-step recipe in the style of Wilkins (1988) . When a match is found, the matching goal is removed from the agenda and is replaced by the steps in the recipe. APE has two kinds of primitive actions; one ends a turn and the other doesn't.",
263
- "cite_spans": [
264
- {
265
- "start": 393,
266
- "end": 407,
267
- "text": "Wilkins (1988)",
268
- "ref_id": "BIBREF21"
269
- }
270
- ],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "Structure of the Atlas Planning Engine",
274
- "sec_num": null
275
- },
276
- {
277
- "text": "From the point of view of discourse generation, the most important APE recipe items are those allowing the planner to change the agenda when necessary. These three types of recipe items make APE more powerful than a classical planner.",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Structure of the Atlas Planning Engine",
282
- "sec_num": null
283
- },
284
- {
285
- "text": "\u2022 Fact: Evaluate a condition. If false, skip the rest of the recipe. Fact is used to allow run-time decision making by bypassing the rest of an operator when circumstances change during its execution. Fact can be used with retry-at to implement a loop just as in Prolog.",
286
- "cite_spans": [],
287
- "ref_spans": [],
288
- "eq_spans": [],
289
- "section": "Structure of the Atlas Planning Engine",
290
- "sec_num": null
291
- },
292
- {
293
- "text": "\u2022 Retry-at. The purpose of retry-at is to allow the planner to back up to a choice point and make a new decision. It removes goals sequentially from the top of the agenda, a full operator at a time, until the supplied argument is false. Then it restores the parent goal of the last operator removed, so that further planning can choose a new way to achieve it. Retry-at implements a Prolog-like choice of alternatives, but it differs from backtracking in that the new operator is chosen based on conditions that apply when the retry operation is executed, rather than on a list of possible operators formed when the original operator was chosen. For retry-at to be useful, the author must provide multiple operators for the same goal. Each operator must have a set of preconditions enabling it to be chosen at the appropriate time.",
294
- "cite_spans": [],
295
- "ref_spans": [],
296
- "eq_spans": [],
297
- "section": "Structure of the Atlas Planning Engine",
298
- "sec_num": null
299
- },
300
- {
301
- "text": "\u2022 Prune-replace: The intent of prune-replace is (def-operator handle-same-direction :goal (...) :filter () :precond (...) ; We have asked a question about acceleration ; ... and the student has given an answer ; ... from which we can deduce that s/he thinks accel, and velocity go in ; the same direction ; and we have not given the explanation below yet :recipe (...)",
302
- "cite_spans": [],
303
- "ref_spans": [],
304
- "eq_spans": [],
305
- "section": "Structure of the Atlas Planning Engine",
306
- "sec_num": null
307
- },
308
- {
309
- "text": "; Tell the student: \"But if the acceleration went the same direction as the velocity, then the elevator would be speeding up.\" ; Mark that we are giving this explanation ; Tell the student that tutor is requesting another answer (\"Try again.\") ; Edit the agenda (using prune-replace) so that responding to another answer is at the top of the agenda :hiercx ()) APE is domain-independent and communicates with a host system via an API. As a partner in a dialogue, it needs to obtain information from the world as well as produce output turns. Preconditions on plan operators can be used to access information from external knowledge sources. APE contains a recipe item type that can be used to execute an external program such as a call to a GUI interface. APE also has recipe items allowing the user to assert and retract facts in a knowledge base. Further details about the APE planner can be found in (Freedman, 2000) .",
310
- "cite_spans": [
311
- {
312
- "start": 903,
313
- "end": 919,
314
- "text": "(Freedman, 2000)",
315
- "ref_id": "BIBREF7"
316
- }
317
- ],
318
- "ref_spans": [],
319
- "eq_spans": [],
320
- "section": "Structure of the Atlas Planning Engine",
321
- "sec_num": null
322
- },
323
- {
324
- "text": "Implementation of Atlas-Andes",
325
- "cite_spans": [],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "5",
329
- "sec_num": null
330
- },
331
- {
332
- "text": "The first system we have implemented with APE is a prototype Atlas-Andes system that replaces the hints usually given for an incorrect acceleration vector by a choice of generated subdialogues. Figure 4 shows the architecture of Atlas-Andes; any other system built with APE would look similar. Robust natural language understanding in Atlas-Andes is provided by Ros6's CARMEL system (Ros6 2000) ; it uses the spelling correction algorithm devised by Elmi and Evens (1998).",
333
- "cite_spans": [
334
- {
335
- "start": 362,
336
- "end": 394,
337
- "text": "Ros6's CARMEL system (Ros6 2000)",
338
- "ref_id": null
339
- }
340
- ],
341
- "ref_spans": [
342
- {
343
- "start": 194,
344
- "end": 202,
345
- "text": "Figure 4",
346
- "ref_id": null
347
- }
348
- ],
349
- "eq_spans": [],
350
- "section": "Architecture of Atlas-Andes",
351
- "sec_num": "5.1"
352
- },
353
- {
354
- "text": "In an earlier analysis (Kim, Freedman and Evens 1998) we showed that a significant portion of human-human tutorial dialogues can be modeled with the hierarchical structure of task-oriented dialogues (Grosz and Sidner 1986) . Furthermore, a main building block of the discourse hierarchy, corresponding to the transaction level in Conversation Analysis (Sinclair and Coulthard 1975) , matches the tutoring episode defined by VanLehn et al. (1998) . A tutoring episode consists of the turns necessary to help the student make one correct entry on the interface. ",
355
- "cite_spans": [
356
- {
357
- "start": 23,
358
- "end": 53,
359
- "text": "(Kim, Freedman and Evens 1998)",
360
- "ref_id": "BIBREF12"
361
- },
362
- {
363
- "start": 199,
364
- "end": 222,
365
- "text": "(Grosz and Sidner 1986)",
366
- "ref_id": "BIBREF11"
367
- },
368
- {
369
- "start": 352,
370
- "end": 381,
371
- "text": "(Sinclair and Coulthard 1975)",
372
- "ref_id": "BIBREF16"
373
- },
374
- {
375
- "start": 424,
376
- "end": 445,
377
- "text": "VanLehn et al. (1998)",
378
- "ref_id": "BIBREF18"
379
- }
380
- ],
381
- "ref_spans": [],
382
- "eq_spans": [],
383
- "section": "Structure of human tutorial dialogues",
384
- "sec_num": "5.2"
385
- },
386
- {
387
- "text": "Plan",
388
- "cite_spans": [],
389
- "ref_spans": [],
390
- "eq_spans": [],
391
- "section": "NLU (CARMEL)",
392
- "sec_num": null
393
- },
394
- {
395
- "text": "To obtain empirical data for the Atlas-Andes plan operators, we analyzed portions of a corpus of human tutors helping students solve similar physics problems. Two experienced tutors were used. Tutor A was a graduate student in computer science who had majored in physics; tutor B was a professional physics tutor.",
396
- "cite_spans": [],
397
- "ref_spans": [],
398
- "eq_spans": [],
399
- "section": "Figure 4: Interface between Atlas and host system",
400
- "sec_num": null
401
- },
402
- {
403
- "text": "The complete corpus contained solutions to five physics problems by 41 students each. We analyzed every tutoring episode dealing with the acceleration vector during deceleration, totaling 29 examples divided among 20 students and both tutors. The tutors had very different styles. Tutor A tended to provide encouragement rather than content, making those transcripts less useful for deriving an information-based approach. Tutor B used an information-based approach, but after one wrong answer tended to complete the solution as a monologue. Largely following tutor B's approach to sequence and content, we isolated six ways of teaching the student about direction of acceleration. Figure 5 shows an example of text that can be generated by the Atlas-Andes system, showing an analogy-based approach to teaching this content. The operator library used to generate this text could generate a combinatorially large number of versions of this dialogue as well as selected examples of other ways of teaching about direction of acceleration.",
404
- "cite_spans": [],
405
- "ref_spans": [
406
- {
407
- "start": 682,
408
- "end": 690,
409
- "text": "Figure 5",
410
- "ref_id": null
411
- }
412
- ],
413
- "eq_spans": [],
414
- "section": "Figure 4: Interface between Atlas and host system",
415
- "sec_num": null
416
- },
417
- {
418
- "text": "This operator library used to generate this text contained 1 l 1 plan operators, divided as follows: ",
419
- "cite_spans": [],
420
- "ref_spans": [],
421
- "eq_spans": [],
422
- "section": "Sample output and evaluation",
423
- "sec_num": "5.3"
424
- },
425
- {
426
- "text": "We are currently working on components that will allow us to increase the number of physics concepts covered without a corresponding increase in the number of operators. The schema switching operators prevent the tutor from repeating itself during a physics problem. They could be reduced or eliminated by a general discourse history component that tutoring schema operators could refer to. Domain-dependent lexical insertion refers to the choice of lexical items such as car and east in the sample dialogue, while domain-independent iexical insertion refers to items such as OK and exactly. Both categories could be eliminated, or at least severely reduced, through the use of a text realization package.",
427
- "cite_spans": [],
428
- "ref_spans": [],
429
- "eq_spans": [],
430
- "section": "100%",
431
- "sec_num": "111"
432
- },
433
- {
434
- "text": "Together that would provide a one-third reduction in the number of operators needed. As the set of API and GUI handling operators is fixed, that would reduce by half the number of application operators needed. The largest remaining category of operators is the answer handlers. These operators handle a variety of answers for each of the five questions that the system can ask. The answers we recognize include categories such as \"don't know\" as well as specific answers (e.g. a direction perpendicular to the correct answer) which we recognize because the tutor has specific replies for them. In order to reduce the number of S: (draws acceleration vector in same direction as velocity) T: What is the definition of acceleration? S: Don't know. T: OK, let's try this. If a car was driving along east, which way would you have to push on it to make it stop? S: West. T: Exactly. The opposite direction. So the net force goes the opposite direction, and so does the acceleration. Try to draw the acceleration vector again now. S: (draws acceleration vector correctly) Figure 5 : Example of generated dialogue operators further, we must investigate more general methods of handling student errors. In particular, we plan to investigate error-classifying predicates that apply to more than one question as well as the use of intention-based predicates.",
435
- "cite_spans": [],
436
- "ref_spans": [
437
- {
438
- "start": 1067,
439
- "end": 1075,
440
- "text": "Figure 5",
441
- "ref_id": null
442
- }
443
- ],
444
- "eq_spans": [],
445
- "section": "100%",
446
- "sec_num": "111"
447
- },
448
- {
449
- "text": "Since the system only covers one rule of physics, albeit in a variety of ways, we plan to make some of these efficiency improvements before adding new rules of physics and testing it with users.",
450
- "cite_spans": [],
451
- "ref_spans": [],
452
- "eq_spans": [],
453
- "section": "100%",
454
- "sec_num": "111"
455
- },
456
- {
457
- "text": "Preconditions for the operators in the plan library utilize discourse or interaction history, the current goal hierarchy, recent information such as the tutor's current goal and the student's latest response, shared information such as a model of objects on the screen, and domain knowledge. As an example of the latter, if the student draws an acceleration vector which is incorrect but not opposite to the velocity vector, a different response will be generated.",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "100%",
462
- "sec_num": "111"
463
- },
464
- {
465
- "text": "Many previous dialogue-based ITSs have been implemented with finite-state machines, either simple or augmented. In the most common finite state mode[, each time the human user issues an utterance, the processor reduces it to one of a small number of categories. These categories represent the possible transitions between states. Thus history can be stored, and context considered, only by expanding the number of states. This approach puts an arbitrary restriction on the amount of context or depth of conversational nesting that can be considered. More importantly, it misses the significant generalization that these types of dialogues are hierarchical: larger units contain repeated instances of the same smaller units in different sequences and instantiated with different values. Furthermore, the finite-state machine approach does not allow the author to drop one line of attack and replace it by another without hardcoding every possible transition.",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Discussion",
470
- "sec_num": "5.4"
471
- },
472
- {
473
- "text": "It is also clear that the dialogue-based approach has many benefits over the hint-sequence approach. In addition to providing a multi-step teaching methods with new content, it can respond flexibly to a variety of student answers at each step and take context into account when generating a reply.",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "Discussion",
478
- "sec_num": "5.4"
479
- },
480
- {
481
- "text": "Related work Wenger (1987) , still the chief textbook on ITSs, states that using a global planner to control an ITS is too inefficient to try. This is no longer true, if indeed it ever was. Vassileva (1995) proposes a system based on AND-OR graphs with a separate set of rules for reacting to unexpected events. Lehuen, Nicolle and Luzzati (1996) present a method of dialogue analysis that produces schemata very similar to ours. Earlier dialoguebased ITSs that use augmented finite-state machines or equivalent include CIRCSIM-Tutor (Woo et al. 1991 , Zhouet al. 1999 and the system described by Woolf (1984) . Cook (1998) uses levels of finite-state machines. None of these systems provides for predicates with variables or unification.",
482
- "cite_spans": [
483
- {
484
- "start": 13,
485
- "end": 26,
486
- "text": "Wenger (1987)",
487
- "ref_id": "BIBREF20"
488
- },
489
- {
490
- "start": 190,
491
- "end": 206,
492
- "text": "Vassileva (1995)",
493
- "ref_id": "BIBREF19"
494
- },
495
- {
496
- "start": 312,
497
- "end": 346,
498
- "text": "Lehuen, Nicolle and Luzzati (1996)",
499
- "ref_id": "BIBREF13"
500
- },
501
- {
502
- "start": 534,
503
- "end": 550,
504
- "text": "(Woo et al. 1991",
505
- "ref_id": "BIBREF23"
506
- },
507
- {
508
- "start": 551,
509
- "end": 568,
510
- "text": ", Zhouet al. 1999",
511
- "ref_id": null
512
- },
513
- {
514
- "start": 597,
515
- "end": 609,
516
- "text": "Woolf (1984)",
517
- "ref_id": null
518
- },
519
- {
520
- "start": 612,
521
- "end": 623,
522
- "text": "Cook (1998)",
523
- "ref_id": "BIBREF4"
524
- }
525
- ],
526
- "ref_spans": [],
527
- "eq_spans": [],
528
- "section": "6",
529
- "sec_num": null
530
- },
531
- {
532
- "text": "In this paper we described APE, an integrated planner and execution system that we have implemented as part of the Atlas dialogue manager. APE uses HTN-style operators and is based on reactive planning concepts. Although APE is intended largely for use in domains with hierarchical, multi-turn plans, it can be used to implement any conversation-based system, where turns in the 'conversation' may include graphical actions and/or text. We illustrated the use of APE with an example from the Atlas-Andes physics tutor. We showed that previous models based on finite-state machines are insufficient to handle the nested subdialogues and abandoned partial subdialogues that occur in practical applications. We showed how APE generated a sample dialogue that earlier systems could not handle.",
533
- "cite_spans": [],
534
- "ref_spans": [],
535
- "eq_spans": [],
536
- "section": "Conclusions",
537
- "sec_num": "7"
538
- }
539
- ],
540
- "back_matter": [
541
- {
542
- "text": "We thank Abigail Gertner for her generous assistance with the Andes system, and Michael Ringenberg for indispensible programming support.Carolyn Ros6 built the CARMEL natural language understanding component.",
543
- "cite_spans": [],
544
- "ref_spans": [],
545
- "eq_spans": [],
546
- "section": "Acknowledgments",
547
- "sec_num": null
548
- },
549
- {
550
- "text": "Institute of Technology provided the spelling correction code. We thank Pamela Jordan and the referees for their comments.",
551
- "cite_spans": [],
552
- "ref_spans": [],
553
- "eq_spans": [],
554
- "section": "Mohammed EImi and Michael Glass of Illinois",
555
- "sec_num": null
556
- }
557
- ],
558
- "bib_entries": {
559
- "BIBREF0": {
560
- "ref_id": "b0",
561
- "title": "Intentions, Plans, and Practical Reason",
562
- "authors": [
563
- {
564
- "first": "M",
565
- "middle": [
566
- "E"
567
- ],
568
- "last": "Bratman",
569
- "suffix": ""
570
- }
571
- ],
572
- "year": 1987,
573
- "venue": "",
574
- "volume": "",
575
- "issue": "",
576
- "pages": "",
577
- "other_ids": {},
578
- "num": null,
579
- "urls": [],
580
- "raw_text": "Bratman, M. E. 1987. Intentions, Plans, and Practical Reason. Cambridge, MA: Harvard.",
581
- "links": null
582
- },
583
- "BIBREF1": {
584
- "ref_id": "b1",
585
- "title": "What is Intention?",
586
- "authors": [
587
- {
588
- "first": "M",
589
- "middle": [
590
- "E"
591
- ],
592
- "last": "Bratman",
593
- "suffix": ""
594
- }
595
- ],
596
- "year": 1990,
597
- "venue": "",
598
- "volume": "",
599
- "issue": "",
600
- "pages": "",
601
- "other_ids": {},
602
- "num": null,
603
- "urls": [],
604
- "raw_text": "Bratman, M. E. 1990. What is Intention? In P.R.",
605
- "links": null
606
- },
607
- "BIBREF2": {
608
- "ref_id": "b2",
609
- "title": "Intentions in Communication",
610
- "authors": [
611
- {
612
- "first": "J",
613
- "middle": [],
614
- "last": "Cohen",
615
- "suffix": ""
616
- },
617
- {
618
- "first": "M",
619
- "middle": [
620
- "E"
621
- ],
622
- "last": "Morgan",
623
- "suffix": ""
624
- },
625
- {
626
- "first": "",
627
- "middle": [],
628
- "last": "Pollack",
629
- "suffix": ""
630
- }
631
- ],
632
- "year": null,
633
- "venue": "",
634
- "volume": "",
635
- "issue": "",
636
- "pages": "",
637
- "other_ids": {},
638
- "num": null,
639
- "urls": [],
640
- "raw_text": "Cohen, J. Morgan and M. E. Pollack, Intentions in Communication. Cambridge, MA: MIT Press.",
641
- "links": null
642
- },
643
- "BIBREF3": {
644
- "ref_id": "b3",
645
- "title": "Plans and Resource-Bounded Practical Reasoning",
646
- "authors": [
647
- {
648
- "first": "M",
649
- "middle": [
650
- "E"
651
- ],
652
- "last": "Bratman",
653
- "suffix": ""
654
- },
655
- {
656
- "first": "D",
657
- "middle": [
658
- "J"
659
- ],
660
- "last": "Israel",
661
- "suffix": ""
662
- },
663
- {
664
- "first": "M",
665
- "middle": [
666
- "E"
667
- ],
668
- "last": "Pollack",
669
- "suffix": ""
670
- }
671
- ],
672
- "year": 1988,
673
- "venue": "Computational Intelligence",
674
- "volume": "4",
675
- "issue": "4",
676
- "pages": "349--355",
677
- "other_ids": {},
678
- "num": null,
679
- "urls": [],
680
- "raw_text": "Bratman, M. E., Israel, D. J. and Pollack, M.E. 1988. Plans and Resource-Bounded Practical Reasoning. Computational Intelligence 4(4): 349-355.",
681
- "links": null
682
- },
683
- "BIBREF4": {
684
- "ref_id": "b4",
685
- "title": "Knowledge Mentoring as a Framework for Designing Computer-Based Agents for Supporting Musical Composition Learning",
686
- "authors": [
687
- {
688
- "first": "J",
689
- "middle": [],
690
- "last": "Cook",
691
- "suffix": ""
692
- }
693
- ],
694
- "year": 1998,
695
- "venue": "",
696
- "volume": "",
697
- "issue": "",
698
- "pages": "",
699
- "other_ids": {},
700
- "num": null,
701
- "urls": [],
702
- "raw_text": "Cook, J. 1998. Knowledge Mentoring as a Framework for Designing Computer-Based Agents for Sup- porting Musical Composition Learning. PhD. diss., Computing Department, The Open University.",
703
- "links": null
704
- },
705
- "BIBREF5": {
706
- "ref_id": "b5",
707
- "title": "Spelling Correction using Context",
708
- "authors": [
709
- {
710
- "first": "M",
711
- "middle": [
712
- "A"
713
- ],
714
- "last": "Eimi",
715
- "suffix": ""
716
- },
717
- {
718
- "first": "M",
719
- "middle": [
720
- "W"
721
- ],
722
- "last": "Evens",
723
- "suffix": ""
724
- }
725
- ],
726
- "year": 1998,
727
- "venue": "Proceedings of the 17th COLING/36th ACL (COLING-ACL '98)",
728
- "volume": "",
729
- "issue": "",
730
- "pages": "",
731
- "other_ids": {},
732
- "num": null,
733
- "urls": [],
734
- "raw_text": "EImi, M.A. and Evens, M.W. 1998. Spelling Correction using Context. In Proceedings of the 17th COLING/36th ACL (COLING-ACL '98), Montreal.",
735
- "links": null
736
- },
737
- "BIBREF6": {
738
- "ref_id": "b6",
739
- "title": "HTN Planning: Complexity and Expressivity",
740
- "authors": [
741
- {
742
- "first": "K",
743
- "middle": [],
744
- "last": "Erol",
745
- "suffix": ""
746
- },
747
- {
748
- "first": "J",
749
- "middle": [],
750
- "last": "Hendler",
751
- "suffix": ""
752
- },
753
- {
754
- "first": "D",
755
- "middle": [
756
- "S"
757
- ],
758
- "last": "Nau",
759
- "suffix": ""
760
- }
761
- ],
762
- "year": 1994,
763
- "venue": "Proceedings of the Twelfth National Conference on Artificial Intelligence (AAAI '94)",
764
- "volume": "",
765
- "issue": "",
766
- "pages": "",
767
- "other_ids": {},
768
- "num": null,
769
- "urls": [],
770
- "raw_text": "Erol, K., Hendler, J. and Nau, D.S. 1994. HTN Planning: Complexity and Expressivity. In Proceedings of the Twelfth National Conference on Artificial Intelligence (AAAI '94), Seattle.",
771
- "links": null
772
- },
773
- "BIBREF7": {
774
- "ref_id": "b7",
775
- "title": "Using a Reactive Planner as the Basis for a Dialogue Agent",
776
- "authors": [
777
- {
778
- "first": "R",
779
- "middle": [],
780
- "last": "Freedman",
781
- "suffix": ""
782
- }
783
- ],
784
- "year": 2000,
785
- "venue": "Proceedings of the Thirteenth Florida Artificial Intelligence Research Symposium (FLAIRS'00)",
786
- "volume": "",
787
- "issue": "",
788
- "pages": "",
789
- "other_ids": {},
790
- "num": null,
791
- "urls": [],
792
- "raw_text": "Freedman, R. 2000 (to appear). Using a Reactive Planner as the Basis for a Dialogue Agent. In Proceedings of the Thirteenth Florida Artificial Intelligence Research Symposium (FLAIRS'00), Orlando.",
793
- "links": null
794
- },
795
- "BIBREF8": {
796
- "ref_id": "b8",
797
- "title": "Procedural Help in Andes: Generating Hints Using a Bayesian Network Student Model",
798
- "authors": [
799
- {
800
- "first": "A",
801
- "middle": [
802
- "S"
803
- ],
804
- "last": "Gertner",
805
- "suffix": ""
806
- },
807
- {
808
- "first": "C",
809
- "middle": [],
810
- "last": "Conati",
811
- "suffix": ""
812
- },
813
- {
814
- "first": "K",
815
- "middle": [],
816
- "last": "Vanlehn",
817
- "suffix": ""
818
- }
819
- ],
820
- "year": 1998,
821
- "venue": "Proceedings of the Fifteenth National Conference on Artificial Intelligence (AAAI '98)",
822
- "volume": "",
823
- "issue": "",
824
- "pages": "",
825
- "other_ids": {},
826
- "num": null,
827
- "urls": [],
828
- "raw_text": "Gertner, A.S., Conati, C. and VanLehn, K. 1998. Procedural Help in Andes: Generating Hints Using a Bayesian Network Student Model. In Proceedings of the Fifteenth National Conference on Artificial Intelligence (AAAI '98), Madison.",
829
- "links": null
830
- },
831
- "BIBREF9": {
832
- "ref_id": "b9",
833
- "title": "Decision-Making in an Embedded Reasoning System",
834
- "authors": [
835
- {
836
- "first": "M",
837
- "middle": [
838
- "P"
839
- ],
840
- "last": "Georgeff",
841
- "suffix": ""
842
- },
843
- {
844
- "first": "F",
845
- "middle": [
846
- "F"
847
- ],
848
- "last": "Ingrand",
849
- "suffix": ""
850
- }
851
- ],
852
- "year": 1989,
853
- "venue": "Proceedings of the Eleventh International Joint Conference on Artificial Intelligence (IJCAI '89)",
854
- "volume": "",
855
- "issue": "",
856
- "pages": "",
857
- "other_ids": {},
858
- "num": null,
859
- "urls": [],
860
- "raw_text": "Georgeff, M. P. and Ingrand, F. F. 1989. Decision- Making in an Embedded Reasoning System. In Proceedings of the Eleventh International Joint Conference on Artificial Intelligence (IJCAI '89), Detroit.",
861
- "links": null
862
- },
863
- "BIBREF10": {
864
- "ref_id": "b10",
865
- "title": "The Belief-Desire-Intention Model of Agency",
866
- "authors": [
867
- {
868
- "first": "M",
869
- "middle": [
870
- "P"
871
- ],
872
- "last": "Georgeff",
873
- "suffix": ""
874
- },
875
- {
876
- "first": "B",
877
- "middle": [],
878
- "last": "Pell",
879
- "suffix": ""
880
- },
881
- {
882
- "first": "M",
883
- "middle": [
884
- "E"
885
- ],
886
- "last": "Pollack",
887
- "suffix": ""
888
- },
889
- {
890
- "first": "M",
891
- "middle": [],
892
- "last": "Tambe",
893
- "suffix": ""
894
- },
895
- {
896
- "first": "M",
897
- "middle": [],
898
- "last": "Wooldridge",
899
- "suffix": ""
900
- }
901
- ],
902
- "year": 1998,
903
- "venue": "Intelligent Agents V",
904
- "volume": "",
905
- "issue": "",
906
- "pages": "",
907
- "other_ids": {},
908
- "num": null,
909
- "urls": [],
910
- "raw_text": "Georgeff, M.P., Pell, B., Pollack, M. E., Tambe, M. and Wooldridge, M. 1998. The Belief-Desire- Intention Model of Agency. In N. Jenning, J. Muller, and M. Wooldridge (Eds.), Intelligent Agents V. Springer.",
911
- "links": null
912
- },
913
- "BIBREF11": {
914
- "ref_id": "b11",
915
- "title": "Attention, Intentions, and the Structure of Discourse",
916
- "authors": [
917
- {
918
- "first": "B",
919
- "middle": [
920
- "J"
921
- ],
922
- "last": "Grosz",
923
- "suffix": ""
924
- },
925
- {
926
- "first": "C",
927
- "middle": [
928
- "L"
929
- ],
930
- "last": "Sidner",
931
- "suffix": ""
932
- }
933
- ],
934
- "year": 1986,
935
- "venue": "Computational Linguistics",
936
- "volume": "12",
937
- "issue": "3",
938
- "pages": "175--204",
939
- "other_ids": {},
940
- "num": null,
941
- "urls": [],
942
- "raw_text": "Grosz, B.J. and Sidner, C.L. 1986. Attention, Intentions, and the Structure of Discourse. Computational Linguistics 12(3): 175-204.",
943
- "links": null
944
- },
945
- "BIBREF12": {
946
- "ref_id": "b12",
947
- "title": "Responding to Unexpected Student Utterances in CIRCSIM-Tutor v. 3: Analysis of Transcripts",
948
- "authors": [
949
- {
950
- "first": "J",
951
- "middle": [],
952
- "last": "Kim",
953
- "suffix": ""
954
- },
955
- {
956
- "first": "R",
957
- "middle": [],
958
- "last": "Freedman",
959
- "suffix": ""
960
- },
961
- {
962
- "first": "M",
963
- "middle": [],
964
- "last": "Evens",
965
- "suffix": ""
966
- }
967
- ],
968
- "year": 1998,
969
- "venue": "Proceedings of the Eleventh Florida Artificial Intelligence Research Symposium (FLAIRS '98)",
970
- "volume": "",
971
- "issue": "",
972
- "pages": "",
973
- "other_ids": {},
974
- "num": null,
975
- "urls": [],
976
- "raw_text": "Kim, J., Freedman, R. and Evens, M. 1998. Responding to Unexpected Student Utterances in CIRCSIM-Tutor v. 3: Analysis of Transcripts. In Proceedings of the Eleventh Florida Artificial Intelligence Research Symposium (FLAIRS '98), Sanibel Island.",
977
- "links": null
978
- },
979
- "BIBREF13": {
980
- "ref_id": "b13",
981
- "title": "Un mod61e hypoth6tico-exp6rimental dynamique pour la gestion des dialogues homme-machine",
982
- "authors": [
983
- {
984
- "first": "J",
985
- "middle": [],
986
- "last": "Lehuen",
987
- "suffix": ""
988
- },
989
- {
990
- "first": "A",
991
- "middle": [],
992
- "last": "Nicolle",
993
- "suffix": ""
994
- },
995
- {
996
- "first": "D",
997
- "middle": [],
998
- "last": "Luzzati",
999
- "suffix": ""
1000
- }
1001
- ],
1002
- "year": 1996,
1003
- "venue": "Actes du dixi6me congr6s de reconnaissance des formes et intelligence artificielle (RFIA '96)",
1004
- "volume": "",
1005
- "issue": "",
1006
- "pages": "",
1007
- "other_ids": {},
1008
- "num": null,
1009
- "urls": [],
1010
- "raw_text": "Lehuen, J., Nicolle, A. and Luzzati, D. 1996. Un mod61e hypoth6tico-exp6rimental dynamique pour la gestion des dialogues homme-machine. In Actes du dixi6me congr6s de reconnaissance des formes et intelligence artificielle (RFIA '96), Rennes.",
1011
- "links": null
1012
- },
1013
- "BIBREF14": {
1014
- "ref_id": "b14",
1015
- "title": "The Uses of Plans",
1016
- "authors": [
1017
- {
1018
- "first": "M",
1019
- "middle": [
1020
- "E"
1021
- ],
1022
- "last": "Pollack",
1023
- "suffix": ""
1024
- }
1025
- ],
1026
- "year": 1992,
1027
- "venue": "Artificial Intelligence",
1028
- "volume": "57",
1029
- "issue": "1",
1030
- "pages": "43--69",
1031
- "other_ids": {},
1032
- "num": null,
1033
- "urls": [],
1034
- "raw_text": "Pollack, M.E. 1992. The Uses of Plans. Artificial Intelligence 57(1): 43-69.",
1035
- "links": null
1036
- },
1037
- "BIBREF15": {
1038
- "ref_id": "b15",
1039
- "title": "A Framework for Robust Semantic Interpretation",
1040
- "authors": [
1041
- {
1042
- "first": "C",
1043
- "middle": [
1044
- "P"
1045
- ],
1046
- "last": "Ros6",
1047
- "suffix": ""
1048
- }
1049
- ],
1050
- "year": 2000,
1051
- "venue": "Proceedings of the First Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL '00)",
1052
- "volume": "",
1053
- "issue": "",
1054
- "pages": "",
1055
- "other_ids": {},
1056
- "num": null,
1057
- "urls": [],
1058
- "raw_text": "Ros6, C. P. 2000. A Framework for Robust Semantic Interpretation. In Proceedings of the First Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL '00).",
1059
- "links": null
1060
- },
1061
- "BIBREF16": {
1062
- "ref_id": "b16",
1063
- "title": "Towards an Analysis of Discourse: The English Used by Teachers and Pupils",
1064
- "authors": [
1065
- {
1066
- "first": "J",
1067
- "middle": [
1068
- "M"
1069
- ],
1070
- "last": "Sinclair",
1071
- "suffix": ""
1072
- },
1073
- {
1074
- "first": "R",
1075
- "middle": [
1076
- "M"
1077
- ],
1078
- "last": "Coulthard",
1079
- "suffix": ""
1080
- }
1081
- ],
1082
- "year": 1975,
1083
- "venue": "",
1084
- "volume": "",
1085
- "issue": "",
1086
- "pages": "",
1087
- "other_ids": {},
1088
- "num": null,
1089
- "urls": [],
1090
- "raw_text": "Sinclair, J. M. and Coulthard, R. M. 1975. Towards an Analysis of Discourse: The English Used by Teachers and Pupils. London: Oxford University Press.",
1091
- "links": null
1092
- },
1093
- "BIBREF17": {
1094
- "ref_id": "b17",
1095
- "title": "Conceptual and Meta Learning during Coached Problem Solving. In Intelligent Tutoring Systems",
1096
- "authors": [
1097
- {
1098
- "first": "K",
1099
- "middle": [],
1100
- "last": "Vanlehn",
1101
- "suffix": ""
1102
- }
1103
- ],
1104
- "year": 1996,
1105
- "venue": "Third International Conference (ITS '96)",
1106
- "volume": "1086",
1107
- "issue": "",
1108
- "pages": "",
1109
- "other_ids": {},
1110
- "num": null,
1111
- "urls": [],
1112
- "raw_text": "VanLehn, K. 1996. Conceptual and Meta Learning during Coached Problem Solving. In Intelligent Tutoring Systems.\" Third International Conference (ITS '96), Montreal. Berlin: Springer. LNCS 1086.",
1113
- "links": null
1114
- },
1115
- "BIBREF18": {
1116
- "ref_id": "b18",
1117
- "title": "What Makes a Tutorial Event Effective?",
1118
- "authors": [
1119
- {
1120
- "first": "K",
1121
- "middle": [],
1122
- "last": "Vanlehn",
1123
- "suffix": ""
1124
- },
1125
- {
1126
- "first": "S",
1127
- "middle": [],
1128
- "last": "Siler",
1129
- "suffix": ""
1130
- },
1131
- {
1132
- "first": "C",
1133
- "middle": [],
1134
- "last": "Murray",
1135
- "suffix": ""
1136
- },
1137
- {
1138
- "first": "W",
1139
- "middle": [],
1140
- "last": "Baggett",
1141
- "suffix": ""
1142
- }
1143
- ],
1144
- "year": 1998,
1145
- "venue": "Proceedings of the Twenty-first Annual Conference of the Cognitive Science Society",
1146
- "volume": "",
1147
- "issue": "",
1148
- "pages": "",
1149
- "other_ids": {},
1150
- "num": null,
1151
- "urls": [],
1152
- "raw_text": "VanLehn, K., Siler, S., Murray, C. and Baggett, W. 1998. What Makes a Tutorial Event Effective? In Proceedings of the Twenty-first Annual Conference of the Cognitive Science Society, Madison. Hillsdale, N J: Erlbaum.",
1153
- "links": null
1154
- },
1155
- "BIBREF19": {
1156
- "ref_id": "b19",
1157
- "title": "Reactive Instructional Planning to Support Interacting Teaching Strategies",
1158
- "authors": [
1159
- {
1160
- "first": "J",
1161
- "middle": [],
1162
- "last": "Vassileva",
1163
- "suffix": ""
1164
- }
1165
- ],
1166
- "year": 1995,
1167
- "venue": "Proceedings of the Seventh World Conference on AI and Education (AI-ED '95)",
1168
- "volume": "",
1169
- "issue": "",
1170
- "pages": "",
1171
- "other_ids": {},
1172
- "num": null,
1173
- "urls": [],
1174
- "raw_text": "Vassileva, J. 1995. Reactive Instructional Planning to Support Interacting Teaching Strategies. In Proceedings of the Seventh World Conference on AI and Education (AI-ED '95), Washington, D.C. Charlottesville, VA: AACE.",
1175
- "links": null
1176
- },
1177
- "BIBREF20": {
1178
- "ref_id": "b20",
1179
- "title": "Computational and Cognitive Approaches to the Communication of Knowledge",
1180
- "authors": [
1181
- {
1182
- "first": "E",
1183
- "middle": [],
1184
- "last": "Wenger",
1185
- "suffix": ""
1186
- }
1187
- ],
1188
- "year": 1987,
1189
- "venue": "",
1190
- "volume": "",
1191
- "issue": "",
1192
- "pages": "",
1193
- "other_ids": {},
1194
- "num": null,
1195
- "urls": [],
1196
- "raw_text": "Wenger, E. 1987. Artificial Intelligence and Tutoring Systems.\" Computational and Cognitive Approaches to the Communication of Knowledge. San Mateo, CA: Morgan Kaufmann.",
1197
- "links": null
1198
- },
1199
- "BIBREF21": {
1200
- "ref_id": "b21",
1201
- "title": "Practical Planning: Extending the Classical AI Planning Paradigm",
1202
- "authors": [
1203
- {
1204
- "first": "D",
1205
- "middle": [],
1206
- "last": "Wilkins",
1207
- "suffix": ""
1208
- }
1209
- ],
1210
- "year": 1988,
1211
- "venue": "",
1212
- "volume": "",
1213
- "issue": "",
1214
- "pages": "",
1215
- "other_ids": {},
1216
- "num": null,
1217
- "urls": [],
1218
- "raw_text": "Wilkins, D. 1988. Practical Planning: Extending the Classical AI Planning Paradigm. San Mateo, CA: Morgan Kaufmann.",
1219
- "links": null
1220
- },
1221
- "BIBREF22": {
1222
- "ref_id": "b22",
1223
- "title": "Planning and Reacting in Uncertain and Dynamic Environments",
1224
- "authors": [
1225
- {
1226
- "first": "D",
1227
- "middle": [],
1228
- "last": "Wilkins",
1229
- "suffix": ""
1230
- },
1231
- {
1232
- "first": "K",
1233
- "middle": [],
1234
- "last": "Myers",
1235
- "suffix": ""
1236
- },
1237
- {
1238
- "first": "J",
1239
- "middle": [],
1240
- "last": "Lowrance",
1241
- "suffix": ""
1242
- },
1243
- {
1244
- "first": "L",
1245
- "middle": [],
1246
- "last": "Wesley",
1247
- "suffix": ""
1248
- }
1249
- ],
1250
- "year": 1995,
1251
- "venue": "Journal of Experimental and Theoretical Artificial Intelligence",
1252
- "volume": "7",
1253
- "issue": "",
1254
- "pages": "121--152",
1255
- "other_ids": {},
1256
- "num": null,
1257
- "urls": [],
1258
- "raw_text": "Wilkins, D., Myers, K., Lowrance, J. and Wesley, L. 1995. Planning and Reacting in Uncertain and Dynamic Environments. Journal of Experimental and Theoretical Artificial Intelligence 7:121-152.",
1259
- "links": null
1260
- },
1261
- "BIBREF23": {
1262
- "ref_id": "b23",
1263
- "title": "Dynamic Instructional Planning for an Intelligent Physiology Tutoring System",
1264
- "authors": [
1265
- {
1266
- "first": "C",
1267
- "middle": [],
1268
- "last": "Woo",
1269
- "suffix": ""
1270
- },
1271
- {
1272
- "first": "M",
1273
- "middle": [
1274
- "W"
1275
- ],
1276
- "last": "Evens",
1277
- "suffix": ""
1278
- },
1279
- {
1280
- "first": "J",
1281
- "middle": [
1282
- "A"
1283
- ],
1284
- "last": "Michael",
1285
- "suffix": ""
1286
- },
1287
- {
1288
- "first": "A",
1289
- "middle": [
1290
- "A"
1291
- ],
1292
- "last": "Rovick",
1293
- "suffix": ""
1294
- }
1295
- ],
1296
- "year": 1984,
1297
- "venue": "Proceedings of the Fourth Annual 1EEE Computer-Based Medical Systems Symposium",
1298
- "volume": "",
1299
- "issue": "",
1300
- "pages": "",
1301
- "other_ids": {},
1302
- "num": null,
1303
- "urls": [],
1304
- "raw_text": "Woo, C., Evens, M.W., Michael, J.A. and Rovick, A.A. 1991. Dynamic Instructional Planning for an Intelligent Physiology Tutoring System. In Proceedings of the Fourth Annual 1EEE Computer- Based Medical Systems Symposium, Baltimore. Woolf, B. 1984. Context-Dependent Planning in a Machine Tutor. Ph.D. diss., Dept. of Computer and Information Science, University of Massachusetts at Amherst. COINS Technical Report 84-21.",
1305
- "links": null
1306
- },
1307
- "BIBREF24": {
1308
- "ref_id": "b24",
1309
- "title": "Formalizing planning knowledge for hierarchical planning",
1310
- "authors": [
1311
- {
1312
- "first": "Q",
1313
- "middle": [],
1314
- "last": "Yang",
1315
- "suffix": ""
1316
- }
1317
- ],
1318
- "year": 1990,
1319
- "venue": "Computational Intelligence",
1320
- "volume": "6",
1321
- "issue": "I",
1322
- "pages": "12--24",
1323
- "other_ids": {},
1324
- "num": null,
1325
- "urls": [],
1326
- "raw_text": "Yang, Q. 1990. Formalizing planning knowledge for hierarchical planning. Computational Intelligence 6(I): 12-24.",
1327
- "links": null
1328
- },
1329
- "BIBREF25": {
1330
- "ref_id": "b25",
1331
- "title": "Delivering Hints in a Dialogue-Based Intelligent Tutoring System",
1332
- "authors": [
1333
- {
1334
- "first": "Y",
1335
- "middle": [],
1336
- "last": "Zhou",
1337
- "suffix": ""
1338
- },
1339
- {
1340
- "first": "R",
1341
- "middle": [],
1342
- "last": "Freedman",
1343
- "suffix": ""
1344
- },
1345
- {
1346
- "first": "M",
1347
- "middle": [],
1348
- "last": "Glass",
1349
- "suffix": ""
1350
- },
1351
- {
1352
- "first": "J",
1353
- "middle": [
1354
- "A"
1355
- ],
1356
- "last": "Michael",
1357
- "suffix": ""
1358
- },
1359
- {
1360
- "first": "A",
1361
- "middle": [
1362
- "A"
1363
- ],
1364
- "last": "Rovick",
1365
- "suffix": ""
1366
- },
1367
- {
1368
- "first": "M",
1369
- "middle": [
1370
- "W"
1371
- ],
1372
- "last": "Evens",
1373
- "suffix": ""
1374
- }
1375
- ],
1376
- "year": 1999,
1377
- "venue": "Proceedings of the Sixteenth National Conference on Artificial Intelligence (AAAI '99)",
1378
- "volume": "",
1379
- "issue": "",
1380
- "pages": "",
1381
- "other_ids": {},
1382
- "num": null,
1383
- "urls": [],
1384
- "raw_text": "Zhou, Y., Freedman, R., Glass, M., Michael, J.A., Rovick, A.A. and Evens, M.W. 1999. Delivering Hints in a Dialogue-Based Intelligent Tutoring System. In Proceedings of the Sixteenth National Conference on Artificial Intelligence (AAAI '99), Orlando, FL.",
1385
- "links": null
1386
- }
1387
- },
1388
- "ref_entries": {
1389
- "FIGREF0": {
1390
- "uris": null,
1391
- "type_str": "figure",
1392
- "num": null,
1393
- "text": "elevator slows to a stop from an initial downward velocity of 10.0 m]s in 2.00 seconds. A passenger in the elevator is holding a 3.00 kilogram package by a vertical string. What is the tension in the string during the process? i ........ ii ....... iii . I \u00f7Y ~TO e',ev~o, at 10 m/s elev~or at a stop mass of p~:w'.,I,,~ magnitude of the inst~\u00a2~taneous Velocity of pack, age ~ {rkne TO v._w magnitude of the avelage Acceleratiorl of package ,dudng TO... aScreen shot of the Andes physics tutor S: (draws acceleration vector in same direction as velocity) T: Wrong. S: What's wrongwith that? T: Think about the direction of the acceleration vector. S: Please explain further. T: Remember that the direction of acceleration is the direction of the change in velocity. S: Please explain further. T: The'direction of the acceleration vector is straight up. S: (draws acceleration vector correctly) Andes hint sequence formatted as dialogue ditions are satisfied. Goals are represented in first-order logic without quantifiers and matched via unification."
1394
- },
1395
- "FIGREF1": {
1396
- "uris": null,
1397
- "type_str": "figure",
1398
- "num": null,
1399
- "text": "Sample plan operatorto allow the planner to remove goals from the agenda based on a change in circumstances. It removes goals sequentially from the top of the agenda, one at a time, until the supplied argument becomes false. Then it replaces the removed goals with an optional list of new goals. Prune-replace allows a type of decision-making frequently used in dialogue generation. When a conversation partner does not give the expected response, one would often like to remove the next goal from the agenda and replace it with one or more replacement goals. Prune-replace implements a generalized version of this concept."
1400
- }
1401
- }
1402
- }
1403
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1009.json DELETED
@@ -1,1257 +0,0 @@
1
- {
2
- "paper_id": "A00-1009",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:09.535519Z"
6
- },
7
- "title": "A Framework for MT and Multilingual NLG Systems Based on Uniform Lexico-Structural Processing",
8
- "authors": [
9
- {
10
- "first": "Benoit",
11
- "middle": [],
12
- "last": "Lavoie",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "[email protected]"
16
- },
17
- {
18
- "first": "Richard",
19
- "middle": [],
20
- "last": "Kittredge",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": "[email protected]"
24
- },
25
- {
26
- "first": "Tanya",
27
- "middle": [],
28
- "last": "Korelsky",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- },
33
- {
34
- "first": "Owen",
35
- "middle": [],
36
- "last": "Rambow",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": "[email protected]"
40
- }
41
- ],
42
- "year": "",
43
- "venue": null,
44
- "identifiers": {},
45
- "abstract": "In this paper we describe an implemented framework for developing monolingual or multilingual natural language generation (NLG) applications and machine translation (MT) applications. The framework demonstrates a uniform approach to generation and transfer based on declarative lexico-structural transformations of dependency structures of syntactic or conceptual levels (\"uniform lexico-structural processing\"). We describe how this framework has been used in practical NLG and MT applications, and report the lessons learned.",
46
- "pdf_parse": {
47
- "paper_id": "A00-1009",
48
- "_pdf_hash": "",
49
- "abstract": [
50
- {
51
- "text": "In this paper we describe an implemented framework for developing monolingual or multilingual natural language generation (NLG) applications and machine translation (MT) applications. The framework demonstrates a uniform approach to generation and transfer based on declarative lexico-structural transformations of dependency structures of syntactic or conceptual levels (\"uniform lexico-structural processing\"). We describe how this framework has been used in practical NLG and MT applications, and report the lessons learned.",
52
- "cite_spans": [],
53
- "ref_spans": [],
54
- "eq_spans": [],
55
- "section": "Abstract",
56
- "sec_num": null
57
- }
58
- ],
59
- "body_text": [
60
- {
61
- "text": "In this paper we present a linguistically motivated framework for uniform lexicostructural processing. It has been used for transformations of conceptual and syntactic structures during generation in monolingual and multilingual natural language generation (NLG) and for transfer in machine translation (MT). Our work extends directions taken in systems such as Ariane (Vauquois and Boitet, 1985) , FoG (Kittredge and Polgu6re, 1991) , JOYCE (Rainbow and Korelsky, 1992) , and LFS (Iordanskaja et al., 1992) . Although it adopts the general principles found in the abovementioned systems, the approach presented in this paper is more practical, and we believe, would eventually integrate better with emerging statistics-based approaches to MT. * The work performed on the framework by this coauthor was done while at CoGenTex, Inc.",
62
- "cite_spans": [
63
- {
64
- "start": 257,
65
- "end": 262,
66
- "text": "(NLG)",
67
- "ref_id": null
68
- },
69
- {
70
- "start": 369,
71
- "end": 396,
72
- "text": "(Vauquois and Boitet, 1985)",
73
- "ref_id": "BIBREF14"
74
- },
75
- {
76
- "start": 403,
77
- "end": 433,
78
- "text": "(Kittredge and Polgu6re, 1991)",
79
- "ref_id": null
80
- },
81
- {
82
- "start": 442,
83
- "end": 470,
84
- "text": "(Rainbow and Korelsky, 1992)",
85
- "ref_id": null
86
- },
87
- {
88
- "start": 481,
89
- "end": 507,
90
- "text": "(Iordanskaja et al., 1992)",
91
- "ref_id": "BIBREF3"
92
- }
93
- ],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "Introduction",
97
- "sec_num": "1"
98
- },
99
- {
100
- "text": "The framework consists of a portable Java environment for building NLG or MT applications by defining modules using a core tree transduction engine and single declarative ASCII specification language for conceptual or syntactic dependency tree structures 1 and their transformations. Developers can define new modules, add or remove modules, or modify their connections. Because the processing of the transformation engine is restricted to transduction of trees, it is computationally efficient.",
101
- "cite_spans": [],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "Introduction",
105
- "sec_num": "1"
106
- },
107
- {
108
- "text": "Having declarative rules facilitates their reuse when migrating from one programming environment to another; if the rules are based on functions specific to a programming language, the implementation of these functions might no longer be available in a different environment. In addition, having all lexical information and all rules represented declaratively makes it relatively easy to integrate into the framework techniques for generating some of the rules automatically, for example using corpus-based methods.",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "Introduction",
113
- "sec_num": "1"
114
- },
115
- {
116
- "text": "The declarative form of transformations makes it easier to process them, compare them, and cluster them to achieve proper classification and ordering.",
117
- "cite_spans": [],
118
- "ref_spans": [],
119
- "eq_spans": [],
120
- "section": "Introduction",
121
- "sec_num": "1"
122
- },
123
- {
124
- "text": "1 In this paper, we use the term syntactic dependency (tree) structure as defined in the Meaning-Text Theory (MTT; Mel'cuk, 1988) . However, we extrapolate from this theory when we use the term conceptual dependency (tree) structure, which has no equivalent in MTT (and is unrelated to Shank's CD structures proposed in the 1970s).",
125
- "cite_spans": [
126
- {
127
- "start": 109,
128
- "end": 114,
129
- "text": "(MTT;",
130
- "ref_id": null
131
- },
132
- {
133
- "start": 115,
134
- "end": 129,
135
- "text": "Mel'cuk, 1988)",
136
- "ref_id": "BIBREF9"
137
- }
138
- ],
139
- "ref_spans": [],
140
- "eq_spans": [],
141
- "section": "Introduction",
142
- "sec_num": "1"
143
- },
144
- {
145
- "text": "Thus, the framework represents a generalized processing environment that can be reused in different types of natural language processing (NLP) applications. So far the framework has been used successfully to build a wide variety of NLG and MT applications in several limited domains (meteorology, battlefield messages, object modeling) and for different languages (English, French, Arabic, and Korean).",
146
- "cite_spans": [],
147
- "ref_spans": [],
148
- "eq_spans": [],
149
- "section": "Introduction",
150
- "sec_num": "1"
151
- },
152
- {
153
- "text": "In the next sections, we present the design of the core tree transduction module (Section 2), describe the representations that it uses (Section 3) and the linguistic resources (Section 4). We then discuss the processing performed by the tree transduction module (Section 5) and its instantiation for different applications (Section 6). Finally, we discuss lessons learned from developing and using the framework (Section 7) and describe the history of the framework comparing it to other systems (Section 8).",
154
- "cite_spans": [],
155
- "ref_spans": [],
156
- "eq_spans": [],
157
- "section": "Introduction",
158
- "sec_num": "1"
159
- },
160
- {
161
- "text": "The core processing engine of the framework is a generic tree transduction module for lexicostructural processing, shown in Figure 1 . The module has dependency stuctures as input and output, expressed in the same tree formalism, although not necessarily at the same level (see Section 3). This design facilitates the pipelining of modules for stratificational transformation. In fact, in an application, there are usually several instantiations of this module.",
162
- "cite_spans": [],
163
- "ref_spans": [
164
- {
165
- "start": 124,
166
- "end": 132,
167
- "text": "Figure 1",
168
- "ref_id": null
169
- }
170
- ],
171
- "eq_spans": [],
172
- "section": "The Framework's Tree Transduction Module",
173
- "sec_num": "2"
174
- },
175
- {
176
- "text": "The transduction module consists of three processing steps: lexico-structural preprocessing, main lexico-structural processing, and lexico-structural post-processing. Each of these steps is driven by a separate grammar, and all three steps draw on a common feature data base and lexicon. The grammars, the lexicon and the feature data base are referred to as the linguistic resources (even if they sometimes apply to a conceptual representation). The representations used by all instantiations of the tree transduction module in the framework are dependency tree structures. The main characteristics of all the dependency tree structures are:",
177
- "cite_spans": [],
178
- "ref_spans": [],
179
- "eq_spans": [],
180
- "section": "The Framework's Tree Transduction Module",
181
- "sec_num": "2"
182
- },
183
- {
184
- "text": "\u2022 A dependency tree is unordered (in contrast with phrase structure trees, there is no ordering between the branches of the tree). \u2022 All the nodes in the tree correspond to lexemes (i.e., lexical heads) or concepts depending on the level of representation. In contrast with a phrase structure representation, there are no phrase-structure nodes labeled with nonterminal symbols. Labelled arcs indicate the dependency relationships between the lexemes.",
185
- "cite_spans": [],
186
- "ref_spans": [],
187
- "eq_spans": [],
188
- "section": "The Framework's Tree Transduction Module",
189
- "sec_num": "2"
190
- },
191
- {
192
- "text": "The first of these characteristics makes a dependency tree structure a very useful representation for MT and multilingual NLG, since it gives linguists a representation that allows them to abstract over numerous crosslinguistic divergences due to language specific ordering (Polgu~re, 1991) .",
193
- "cite_spans": [
194
- {
195
- "start": 274,
196
- "end": 290,
197
- "text": "(Polgu~re, 1991)",
198
- "ref_id": "BIBREF5"
199
- }
200
- ],
201
- "ref_spans": [],
202
- "eq_spans": [],
203
- "section": "The Framework's Tree Transduction Module",
204
- "sec_num": "2"
205
- },
206
- {
207
- "text": "We have implemented 4 different types of dependency tree structures that can be used for NLG, MT or both: The DSyntSs and SSyntSs correspond closely to the equivalent structures of the Meaning-Text Theory (MTT; Mel'cuk, 1988) : both structures are unordered syntactic representations, but a DSyntS only includes full meaning-bearing lexemes while a SSyntS also contains function words such as determiners, auxiliaries, and strongly governed prepositions.",
208
- "cite_spans": [
209
- {
210
- "start": 205,
211
- "end": 210,
212
- "text": "(MTT;",
213
- "ref_id": null
214
- },
215
- {
216
- "start": 211,
217
- "end": 225,
218
- "text": "Mel'cuk, 1988)",
219
- "ref_id": "BIBREF9"
220
- }
221
- ],
222
- "ref_spans": [],
223
- "eq_spans": [],
224
- "section": "The Framework's Tree Transduction Module",
225
- "sec_num": "2"
226
- },
227
- {
228
- "text": "In the implemented applications, the DSyntSs are the pivotal representations involved in most transformations, as this is also often the case in practice in linguistic-based MT (Hutchins and Somers, 1997) . Figure 2 illustrates a DSyntS from a meteorological application, MeteoCogent (Kittredge and Lavoie, 1998) , represented using the standard graphical notation and also the RealPro ASCII notation used internally in the framework .",
229
- "cite_spans": [
230
- {
231
- "start": 177,
232
- "end": 204,
233
- "text": "(Hutchins and Somers, 1997)",
234
- "ref_id": "BIBREF2"
235
- },
236
- {
237
- "start": 284,
238
- "end": 312,
239
- "text": "(Kittredge and Lavoie, 1998)",
240
- "ref_id": "BIBREF4"
241
- }
242
- ],
243
- "ref_spans": [
244
- {
245
- "start": 207,
246
- "end": 215,
247
- "text": "Figure 2",
248
- "ref_id": "FIGREF3"
249
- }
250
- ],
251
- "eq_spans": [],
252
- "section": "The Framework's Tree Transduction Module",
253
- "sec_num": "2"
254
- },
255
- {
256
- "text": "As Figure 2 illustrates, there is a straightforward mapping between the graphical notation and the ASCII notation supported in the framework. This also applies for all the transformation rules in the framework which illustrates the declarative nature of our approach, The ConcSs correspond to the standard framelike structures used in knowledge representation, with labeled arcs corresponding to slots. We have used them only for a very limited meteorological domain (in MeteoCogent), and we imagine that they will typically be defined in a domain-specific manner. Figure 3 illustrates the mapping between an interlingua defined as a ConcS and a corresponding English DSyntS. This example, also taken from MeteoCogent, illustrates that the conceptual interlingua in NLG can be closer to a database representation of domain data than to its linguistic representations.",
257
- "cite_spans": [],
258
- "ref_spans": [
259
- {
260
- "start": 3,
261
- "end": 11,
262
- "text": "Figure 2",
263
- "ref_id": "FIGREF3"
264
- },
265
- {
266
- "start": 565,
267
- "end": 573,
268
- "text": "Figure 3",
269
- "ref_id": "FIGREF4"
270
- }
271
- ],
272
- "eq_spans": [],
273
- "section": "The Framework's Tree Transduction Module",
274
- "sec_num": "2"
275
- },
276
- {
277
- "text": "I 1 LOW -5 TO ' t LOw ( A'I~R -5 ATTR TO ( il HIGH ( A']I~R 20 ) ) ) Low -S to high 20",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "The Framework's Tree Transduction Module",
282
- "sec_num": "2"
283
- },
284
- {
285
- "text": "As mentioned in (Polgu~re, 1991) , the high level of abstraction of the ConcSs makes them a suitable interlingua for multilingual NLG since they bridge the semantic discrepancies between languages, and they can be produced easily from the domain data. However, most off-the-shelf parsers available for MT produce only syntactic structures, thus the DSyntS level is often more suitable for transfer. Finally, the PSyntSs correspond to the parser outputs represented using RealPro's dependency structure formalism. The PSyntSs may not be valid directly for realization or transfer since they may contain unsupported features or dependency relations. However, the PSyntSs are represented in a way to allow the framework to convert them into valid DSyntS via lexicostructural processing. This conversion is done via conversion grammars customized for each parser. There is a practical need to convert one syntactic formalism to another and so far we have implemented converters for three off-theshelf parsers (Palmer et al., 1998) .",
286
- "cite_spans": [
287
- {
288
- "start": 16,
289
- "end": 32,
290
- "text": "(Polgu~re, 1991)",
291
- "ref_id": "BIBREF5"
292
- },
293
- {
294
- "start": 1005,
295
- "end": 1026,
296
- "text": "(Palmer et al., 1998)",
297
- "ref_id": "BIBREF11"
298
- }
299
- ],
300
- "ref_spans": [],
301
- "eq_spans": [],
302
- "section": "The Framework's Tree Transduction Module",
303
- "sec_num": "2"
304
- },
305
- {
306
- "text": "As mentioned previously, the framework is composed of instantiations of the tree transduction module shown in Figure 1 . Each module has the following resources: This consists of lexico-structural mapping rules for transforming the output structures before they can be processed by the next module. As for the preprocessing rules, these rules can be used to fix some discrepancies between modules.",
307
- "cite_spans": [],
308
- "ref_spans": [
309
- {
310
- "start": 110,
311
- "end": 118,
312
- "text": "Figure 1",
313
- "ref_id": null
314
- }
315
- ],
316
- "eq_spans": [],
317
- "section": "The Framework's Linguistic Resources",
318
- "sec_num": "4"
319
- },
320
- {
321
- "text": "\u2022",
322
- "cite_spans": [],
323
- "ref_spans": [],
324
- "eq_spans": [],
325
- "section": "The Framework's Linguistic Resources",
326
- "sec_num": "4"
327
- },
328
- {
329
- "text": "Our representation of the lexicon at the lexical level (as opposed to conceptual) is similar to the one found in RealPro. Figure 4 shows a specification for the lexeme SELL. This lexeme is defined as a verb of regular morphology with two lexical-structural mappings, the first one introducing the preposition TO for its 3 r\u00b0 actant, and the preposition FOR for its 4 th actant: (a seller) X1 sells (merchandise) X2 to (a buyer) X3 for (a price) X4. What is important is that each mapping specifies a transformation between structures at different levels of representation but that are represented in one and the same representation formalism (DSyntS and SSyntS in this case).",
330
- "cite_spans": [],
331
- "ref_spans": [
332
- {
333
- "start": 122,
334
- "end": 130,
335
- "text": "Figure 4",
336
- "ref_id": "FIGREF5"
337
- }
338
- ],
339
- "eq_spans": [],
340
- "section": "The Framework's Linguistic Resources",
341
- "sec_num": "4"
342
- },
343
- {
344
- "text": "As we will see below, grammar rules are also expressed in a similar way. At the conceptual level, the conceptual lexicon associates lexical-structural mapping with concepts in a similar way. Figure 5 illustrates the mapping at the deep-syntactic level associated with the concept #TEMPERATURE. Except for the slight differences in the labelling, this type of specification is similar to the one used on the lexical level. The first mapping rule corresponds to one of the lexico-structural transformations used to convert the interlingual ConcS of Figure 3 to the corresponding DSyntS.",
345
- "cite_spans": [],
346
- "ref_spans": [
347
- {
348
- "start": 191,
349
- "end": 199,
350
- "text": "Figure 5",
351
- "ref_id": null
352
- },
353
- {
354
- "start": 547,
355
- "end": 555,
356
- "text": "Figure 3",
357
- "ref_id": "FIGREF4"
358
- }
359
- ],
360
- "eq_spans": [],
361
- "section": "The Framework's Linguistic Resources",
362
- "sec_num": "4"
363
- },
364
- {
365
- "text": "ZONCEPT: #TEMPERATURE 5EXICAL:",
366
- "cite_spans": [],
367
- "ref_spans": [],
368
- "eq_spans": [],
369
- "section": "The Framework's Linguistic Resources",
370
- "sec_num": "4"
371
- },
372
- {
373
- "text": "[ L~-RULE: #TEMPERATURE ( #minimum SX #maxim~ $Y <--> LOW ( ATTR $X ATTR TO ( II HIGH ( ATTR SY ) ) ) LEX-RULE: #TEMPERATURE ( #minim~ SX <--> LOW ( ATTR $X ) LEX-RULE: #TEMPE~TURE ( #maximum $X <--> HIGH ( ATTR SX ) ]",
374
- "cite_spans": [],
375
- "ref_spans": [],
376
- "eq_spans": [],
377
- "section": "The Framework's Linguistic Resources",
378
- "sec_num": "4"
379
- },
380
- {
381
- "text": "Figure 5: Specification of Concept #TEMPERATURE Note that since each lexicon entry can have more than one lexical-structural mapping rule, the list of these rules represents a small grammar specific to this lexeme or concept.",
382
- "cite_spans": [],
383
- "ref_spans": [],
384
- "eq_spans": [],
385
- "section": "The Framework's Linguistic Resources",
386
- "sec_num": "4"
387
- },
388
- {
389
- "text": "Realization grammar rules of the main grammar include generic mapping rules (which are not lexeme-specific) such as the DSyntS-rule illustrated in Figure 6 , for inserting a determiner. The lexicon formalism has also been extended to implement lexeme-specific lexico-structural transfer rules. Figure 7 shows the lexicostructural transfer of the English verb lexeme MOVE to French implemented for a military and weather domain (Nasr et al., 1998 ):",
390
- "cite_spans": [
391
- {
392
- "start": 427,
393
- "end": 445,
394
- "text": "(Nasr et al., 1998",
395
- "ref_id": "BIBREF10"
396
- }
397
- ],
398
- "ref_spans": [
399
- {
400
- "start": 147,
401
- "end": 155,
402
- "text": "Figure 6",
403
- "ref_id": "FIGREF6"
404
- },
405
- {
406
- "start": 294,
407
- "end": 302,
408
- "text": "Figure 7",
409
- "ref_id": "FIGREF7"
410
- }
411
- ],
412
- "eq_spans": [],
413
- "section": "The Framework's Linguistic Resources",
414
- "sec_num": "4"
415
- },
416
- {
417
- "text": "Cloud will move into the western regions.",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "The Framework's Linguistic Resources",
422
- "sec_num": "4"
423
- },
424
- {
425
- "text": "Des nuages envahiront les rdgions ouest.",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "The Framework's Linguistic Resources",
430
- "sec_num": "4"
431
- },
432
- {
433
- "text": "They moved the assets forward.",
434
- "cite_spans": [],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "The Framework's Linguistic Resources",
438
- "sec_num": "4"
439
- },
440
- {
441
- "text": "-.9 lls ont amen~ les ressources vers l 'avant. More general lexico-structural rules for transfer can also be implemented using our grammar rule formalism. Figure 8 gives an English-French transfer rule applied to a weather domain for the transfer of a verb modified by the adverb ALMOST: More details on how the structural divergences described in (Dorr, 1994) can be accounted for using our formalism can be found in .",
442
- "cite_spans": [
443
- {
444
- "start": 349,
445
- "end": 361,
446
- "text": "(Dorr, 1994)",
447
- "ref_id": "BIBREF0"
448
- }
449
- ],
450
- "ref_spans": [
451
- {
452
- "start": 156,
453
- "end": 164,
454
- "text": "Figure 8",
455
- "ref_id": "FIGREF8"
456
- }
457
- ],
458
- "eq_spans": [],
459
- "section": "The Framework's Linguistic Resources",
460
- "sec_num": "4"
461
- },
462
- {
463
- "text": "Before being processed, the rules are first compiled and indexed for optimisation. Each module applies the following processing.",
464
- "cite_spans": [],
465
- "ref_spans": [],
466
- "eq_spans": [],
467
- "section": "The Rule Processing",
468
- "sec_num": "5"
469
- },
470
- {
471
- "text": "The rules are assumed to be ordered from most specific to least specific. The application of the rules to the structures is top-down in a recursive way from the f'n-st rule to the last. For the main grammar, before applying a grammar rule to a given node, dictionary lookup is carried out in order to first apply the lexeme-or conceptspecific rules associated with this node. These are also assumed to be ordered from the most specific to the least specific.",
472
- "cite_spans": [],
473
- "ref_spans": [],
474
- "eq_spans": [],
475
- "section": "The Rule Processing",
476
- "sec_num": "5"
477
- },
478
- {
479
- "text": "If a lexico-structural transformation involves switching a governor node with one of its dependents in the tree, the process is reapplied with the new node governor. When no more rules can be applied, the same process is applied to each dependent of the current governor. When all nodes have been processed, the processing is completed, 6 Using the Framework to build Applications Figure 9 shows how different instantiations of the tree transduction module can be combined to build NLP applications. The diagram does not represent a particular system, but rather shows the kind of transformations that have been implemented using the framework, and how they interact. Each arrow represents one type of processing implemented by an instantiation of the tree transduction module. Each triangle represents a different level of representation. For example, in Figure 9 , starting with the \"Input Sentence LI\" and passing through Parsing, Conversion, Transfer, DSyntS Realization and SSyntS Realization to \"Generated Sentence L2\" we obtain an Ll-to-L2 MT system. Starting with \"Sentence Planning\" and passing through DSyntS Realization, and SSyntS Realization (including linearization and inflection) to \"Generated Sentence LI\", we obtain a monolingual NLG system for L1.",
480
- "cite_spans": [],
481
- "ref_spans": [
482
- {
483
- "start": 381,
484
- "end": 389,
485
- "text": "Figure 9",
486
- "ref_id": "FIGREF9"
487
- },
488
- {
489
- "start": 856,
490
- "end": 864,
491
- "text": "Figure 9",
492
- "ref_id": "FIGREF9"
493
- }
494
- ],
495
- "eq_spans": [],
496
- "section": "The Rule Processing",
497
- "sec_num": "5"
498
- },
499
- {
500
- "text": "So far the framework has been used successfully for building a wide variety of applications in different domains and for different languages:",
501
- "cite_spans": [],
502
- "ref_spans": [],
503
- "eq_spans": [],
504
- "section": "Scope of the Framework ~Conversion bl Parsed",
505
- "sec_num": null
506
- },
507
- {
508
- "text": "level for the domains of meteorology (MeteoCogent; Kittredge and Lavoie, 1998) and object modeling (ModelExplainer; . \u2022 Generation of English text from conceptual interlingua for the meteorology domain (MeteoCogent). (The design of the interlingua can also support the generation of French but this functionality has not yet been implemented.)",
509
- "cite_spans": [
510
- {
511
- "start": 51,
512
- "end": 78,
513
- "text": "Kittredge and Lavoie, 1998)",
514
- "ref_id": "BIBREF4"
515
- }
516
- ],
517
- "ref_spans": [],
518
- "eq_spans": [],
519
- "section": "NLG: \u2022 Realization of English DSyntSs via SSyntS",
520
- "sec_num": null
521
- },
522
- {
523
- "text": "MT:",
524
- "cite_spans": [],
525
- "ref_spans": [],
526
- "eq_spans": [],
527
- "section": "NLG: \u2022 Realization of English DSyntSs via SSyntS",
528
- "sec_num": null
529
- },
530
- {
531
- "text": "\u2022 Transfer on the DSyntS level and realization via SSyntS level for English--French, English--Arabic, English---Korean and Korean--English. Translation in the meteorology and battlefield domains (Nasr et al., 1998) .",
532
- "cite_spans": [
533
- {
534
- "start": 195,
535
- "end": 214,
536
- "text": "(Nasr et al., 1998)",
537
- "ref_id": "BIBREF10"
538
- }
539
- ],
540
- "ref_spans": [],
541
- "eq_spans": [],
542
- "section": "NLG: \u2022 Realization of English DSyntSs via SSyntS",
543
- "sec_num": null
544
- },
545
- {
546
- "text": "\u2022 Conversion of the output structures from off-the-shelf English, French and Korean parsers to DSyntS level before their processing by the other components in the framework (Palmer et al., 1998) .",
547
- "cite_spans": [
548
- {
549
- "start": 173,
550
- "end": 194,
551
- "text": "(Palmer et al., 1998)",
552
- "ref_id": "BIBREF11"
553
- }
554
- ],
555
- "ref_spans": [],
556
- "eq_spans": [],
557
- "section": "NLG: \u2022 Realization of English DSyntSs via SSyntS",
558
- "sec_num": null
559
- },
560
- {
561
- "text": "Empirical results obtained from the applications listed in Section 6 have shown that the approach used in the framework is flexible enough and easily portable to new domains, new languages, and new applications. Moreover, the time spent for development was relatively short compared to that formerly required in developing similar types of applications. Finally, as intended, the limited computational power of the transduction module, as well as careful implementation, including the compilation of declarative linguistic knowledge to Java, have ensured efficient run-time behavior. For example, in the MT domain we did not originally plan for a separate conversion step from the parser output to DSyntS. However, it quickly became apparent that there was a considerable gap between the output of the parsers we were using and the DSyntS representation that was required, and furthermore, that we could use the tree transduction module to quickly bridge this gap.",
562
- "cite_spans": [],
563
- "ref_spans": [],
564
- "eq_spans": [],
565
- "section": "Lessons Learned Using the Framework",
566
- "sec_num": "7"
567
- },
568
- {
569
- "text": "Nevertheless, our tree transduction-based approach has some important limitations. In particular, the framework requires the developer of the transformation rules to maintain them and specify the order in which the rules must be applied. For a small or a stable grammar, this does not pose a problem. However, for large or rapidly changing grammar (such as a transfer grammar in MT that may need to be adjusted when switching from one parser to another), the burden of the developer's task may be quite heavy. In practice, a considerable amount of time can be spent in testing a grammar after its revision.",
570
- "cite_spans": [],
571
- "ref_spans": [],
572
- "eq_spans": [],
573
- "section": "Lessons Learned Using the Framework",
574
- "sec_num": "7"
575
- },
576
- {
577
- "text": "Another major problem is related to the maintenance of both the grammar and the lexicon. On several occasions during the development of these resources, the developer in charge of adding lexical and grammatical data must make some decisions that are domain specific. For example, in MT, writing transfer rules for terms that can have several meanings or uses, they may simplify the problem by choosing a solution based on the context found in the current corpus, which is a perfectly natural strategy. However, later, when porting the transfer resources to other domains, the chosen strategy may need to be revised because the context has changed, and other meanings or uses are found in the new corpora. Because the current approach is based on handcrafted rules, maintenance problems of this sort cannot be avoided when porting the resources to new domains.",
578
- "cite_spans": [],
579
- "ref_spans": [],
580
- "eq_spans": [],
581
- "section": "Lessons Learned Using the Framework",
582
- "sec_num": "7"
583
- },
584
- {
585
- "text": "An approach such as the one described in (Nasr et al., 1998; seems to be solving a part of the problem when it uses corpus analysis techniques for automatically creating a first draft of the lexical transfer dictionary using statistical methods. However, the remaining work is still based on handcrafting because the developer must refine the rules manually. The current framework offers no support for merging handcrafted rules with new lexical rules obtained statistically while preserving the valid handcrafted changes and deleting the invalid ones. In general, a better integration of linguistically based and statistical methods during all the development phases is greatly needed.",
586
- "cite_spans": [
587
- {
588
- "start": 41,
589
- "end": 60,
590
- "text": "(Nasr et al., 1998;",
591
- "ref_id": "BIBREF10"
592
- }
593
- ],
594
- "ref_spans": [],
595
- "eq_spans": [],
596
- "section": "Lessons Learned Using the Framework",
597
- "sec_num": "7"
598
- },
599
- {
600
- "text": "The framework represents a generalization of several predecessor NLG systems based on Meaning-Text Theory: FoG (Kittredge and Polgu~re, 1991) , LFS (Iordanskaja et al., 1992) , and JOYCE (Rambow and Korelsky, 1992) . The framework was originally developed for the realization of deep-syntactic structures in NLG .",
601
- "cite_spans": [
602
- {
603
- "start": 111,
604
- "end": 141,
605
- "text": "(Kittredge and Polgu~re, 1991)",
606
- "ref_id": "BIBREF5"
607
- },
608
- {
609
- "start": 148,
610
- "end": 174,
611
- "text": "(Iordanskaja et al., 1992)",
612
- "ref_id": "BIBREF3"
613
- },
614
- {
615
- "start": 187,
616
- "end": 214,
617
- "text": "(Rambow and Korelsky, 1992)",
618
- "ref_id": "BIBREF13"
619
- }
620
- ],
621
- "ref_spans": [],
622
- "eq_spans": [],
623
- "section": "Lessons Learned Using the Framework",
624
- "sec_num": "7"
625
- },
626
- {
627
- "text": "It was later extended for generation of deep-syntactic structures from conceptual interlingua (Kittredge and Lavoie, 1998) . Finally, it was applied to MT for transfer between deep-syntactic structures of different languages (Palmer et al., 1998) . The current framework encompasses the full spectrum of such transformations, i.e. from the processing of conceptual structures to the processing of deep-syntactic structures, either for NLG or MT.",
628
- "cite_spans": [
629
- {
630
- "start": 94,
631
- "end": 122,
632
- "text": "(Kittredge and Lavoie, 1998)",
633
- "ref_id": "BIBREF4"
634
- },
635
- {
636
- "start": 225,
637
- "end": 246,
638
- "text": "(Palmer et al., 1998)",
639
- "ref_id": "BIBREF11"
640
- }
641
- ],
642
- "ref_spans": [],
643
- "eq_spans": [],
644
- "section": "Lessons Learned Using the Framework",
645
- "sec_num": "7"
646
- },
647
- {
648
- "text": "Compared to its predecessors (Fog, LFS, JOYCE), our approach has obvious advantages in uniformity, declarativity and portability. The framework has been used in a wider variety of domains, for more languages, and for more applications (NLG as well as MT). The framework uses the same engine for all the transformations at all levels because all the syntactic and conceptual structures are represented as dependency tree structures.",
649
- "cite_spans": [],
650
- "ref_spans": [],
651
- "eq_spans": [],
652
- "section": "Lessons Learned Using the Framework",
653
- "sec_num": "7"
654
- },
655
- {
656
- "text": "In contrast, the predecessor systems were not designed to be rapidly portable. These systems used programming languages or scripts for the implementation of the transformation rules, and used different types of processing at different levels of representation. For instance, in LFS conceptual structures were represented as graphs, whereas syntactic structures were represented as trees which required different types of processing at these two levels.",
657
- "cite_spans": [],
658
- "ref_spans": [],
659
- "eq_spans": [],
660
- "section": "Lessons Learned Using the Framework",
661
- "sec_num": "7"
662
- },
663
- {
664
- "text": "Our approach also has some disadvantages compared with the systems mentioned above.",
665
- "cite_spans": [],
666
- "ref_spans": [],
667
- "eq_spans": [],
668
- "section": "Lessons Learned Using the Framework",
669
- "sec_num": "7"
670
- },
671
- {
672
- "text": "Our lexico-structural transformations are far less powerful than those expressible using an arbitrary programming language. In practice, the formalism that we are using for expressing the transformations is inadequate for long-range phenomena (inter-sentential or intra-sentential), including syntactic phenomena such as longdistance wh-movement and discourse phenomena such as anaphora and ellipsis. The formalism could be extended to handle intrasentential syntactic effects, but inter-sentential discourse phenomena probably require procedural rules in order to access lexemes in other sentences.",
673
- "cite_spans": [],
674
- "ref_spans": [],
675
- "eq_spans": [],
676
- "section": "Lessons Learned Using the Framework",
677
- "sec_num": "7"
678
- },
679
- {
680
- "text": "In fact, LFS and JOYCE include a specific module for elliptical structure processing.",
681
- "cite_spans": [],
682
- "ref_spans": [],
683
- "eq_spans": [],
684
- "section": "Lessons Learned Using the Framework",
685
- "sec_num": "7"
686
- },
687
- {
688
- "text": "Similarly, the limited power of the tree transformation rule formalism distinguishes the framework from other NLP frameworks based on more general processing paradigms such as unification of FUF/SURGE in the generation domain (Elhadad and Robin, 1992) .",
689
- "cite_spans": [
690
- {
691
- "start": 226,
692
- "end": 251,
693
- "text": "(Elhadad and Robin, 1992)",
694
- "ref_id": "BIBREF1"
695
- }
696
- ],
697
- "ref_spans": [],
698
- "eq_spans": [],
699
- "section": "Lessons Learned Using the Framework",
700
- "sec_num": "7"
701
- },
702
- {
703
- "text": "The framework is currently being improved in order to use XML-based specifications for representing the dependency structures and the transformation rules in order to offer a more standard development environment and to facilitate the framework extension and maintenance.",
704
- "cite_spans": [],
705
- "ref_spans": [],
706
- "eq_spans": [],
707
- "section": "Status",
708
- "sec_num": "9"
709
- },
710
- {
711
- "text": "History of the Framework and Comparison with Other Systems",
712
- "cite_spans": [],
713
- "ref_spans": [],
714
- "eq_spans": [],
715
- "section": "",
716
- "sec_num": null
717
- }
718
- ],
719
- "back_matter": [
720
- {
721
- "text": "A first implementation of the framework (C++ processor and ASCII formalism for expressing the lexico-structural transformation rules) applied to NLG was developed under SBIR F30602-92-C-0015 awarded by USAF Rome Laboratory.The extensions to MT were developed under SBIR DAAL01-97-C-0016 awarded by the Army Research Laboratory. The Java implementation and general improvements of the framework were developed under SBIR DAAD17-99-C-0008 awarded by the Army Research Laboratory. We are thankful to Ted Caldwell, Daryl McCullough, Alexis Nasr and Mike White for their comments and criticism on the work reported in this paper.",
722
- "cite_spans": [],
723
- "ref_spans": [],
724
- "eq_spans": [],
725
- "section": "Acknowledgements",
726
- "sec_num": null
727
- }
728
- ],
729
- "bib_entries": {
730
- "BIBREF0": {
731
- "ref_id": "b0",
732
- "title": "Machine translation divergences: A formal description and proposed solution",
733
- "authors": [
734
- {
735
- "first": "B",
736
- "middle": [
737
- "J"
738
- ],
739
- "last": "Dorr",
740
- "suffix": ""
741
- }
742
- ],
743
- "year": 1994,
744
- "venue": "Computational Linguistics",
745
- "volume": "20",
746
- "issue": "",
747
- "pages": "597--635",
748
- "other_ids": {},
749
- "num": null,
750
- "urls": [],
751
- "raw_text": "Dorr, B. J. (1994) Machine translation divergences: A formal description and proposed solution. In Computational Linguistics, vol. 20, no. 4, pp. 597- 635.",
752
- "links": null
753
- },
754
- "BIBREF1": {
755
- "ref_id": "b1",
756
- "title": "Controlling Content Realization with Functional Unification Grammars",
757
- "authors": [
758
- {
759
- "first": "M",
760
- "middle": [],
761
- "last": "Elhadad",
762
- "suffix": ""
763
- },
764
- {
765
- "first": "J",
766
- "middle": [],
767
- "last": "Robin",
768
- "suffix": ""
769
- },
770
- {
771
- "first": "R",
772
- "middle": [],
773
- "last": "Dale",
774
- "suffix": ""
775
- },
776
- {
777
- "first": "E",
778
- "middle": [],
779
- "last": "Hovy",
780
- "suffix": ""
781
- }
782
- ],
783
- "year": 1992,
784
- "venue": "Aspects of Automated Natural Language Generation",
785
- "volume": "",
786
- "issue": "",
787
- "pages": "89--104",
788
- "other_ids": {},
789
- "num": null,
790
- "urls": [],
791
- "raw_text": "Elhadad, M. and Robin, J. (1992) Controlling Content Realization with Functional Unification Grammars. In Aspects of Automated Natural Language Generation, Dale, R., Hovy, E., Rosner, D. and Stock, O. Eds., Springer Verlag, pp. 89- 104.",
792
- "links": null
793
- },
794
- "BIBREF2": {
795
- "ref_id": "b2",
796
- "title": "An Introduction to Machine Translation",
797
- "authors": [
798
- {
799
- "first": "W",
800
- "middle": [
801
- "J"
802
- ],
803
- "last": "Hutchins",
804
- "suffix": ""
805
- },
806
- {
807
- "first": "H",
808
- "middle": [
809
- "L"
810
- ],
811
- "last": "Somers",
812
- "suffix": ""
813
- }
814
- ],
815
- "year": 1997,
816
- "venue": "",
817
- "volume": "",
818
- "issue": "",
819
- "pages": "",
820
- "other_ids": {},
821
- "num": null,
822
- "urls": [],
823
- "raw_text": "Hutchins, W. J. and Somers, H. L. (1997) An Introduction to Machine Translation. Academic Press, second edition.",
824
- "links": null
825
- },
826
- "BIBREF3": {
827
- "ref_id": "b3",
828
- "title": "Generation of Extended Bilingual Statistical Reports",
829
- "authors": [
830
- {
831
- "first": "L",
832
- "middle": [],
833
- "last": "Iordanskaja",
834
- "suffix": ""
835
- },
836
- {
837
- "first": "M",
838
- "middle": [],
839
- "last": "Kim",
840
- "suffix": ""
841
- },
842
- {
843
- "first": "R",
844
- "middle": [],
845
- "last": "Kittredge",
846
- "suffix": ""
847
- },
848
- {
849
- "first": "B",
850
- "middle": [],
851
- "last": "Lavoie",
852
- "suffix": ""
853
- },
854
- {
855
- "first": "A",
856
- "middle": [],
857
- "last": "Polgu6re",
858
- "suffix": ""
859
- }
860
- ],
861
- "year": 1992,
862
- "venue": "Proceedings of the 15th International Conference on Computational Linguistics",
863
- "volume": "",
864
- "issue": "",
865
- "pages": "1019--1023",
866
- "other_ids": {},
867
- "num": null,
868
- "urls": [],
869
- "raw_text": "Iordanskaja, L., Kim, M., Kittredge, R., Lavoie, B. and Polgu6re, A. (1992) Generation of Extended Bilingual Statistical Reports. In Proceedings of the 15th International Conference on Computational Linguistics, Nantes, France, pp. 1019-1023.",
870
- "links": null
871
- },
872
- "BIBREF4": {
873
- "ref_id": "b4",
874
- "title": "MeteoCogent: A Knowledge-Based Tool For Generating Weather Forecast Texts",
875
- "authors": [
876
- {
877
- "first": "R",
878
- "middle": [],
879
- "last": "Kittredge",
880
- "suffix": ""
881
- },
882
- {
883
- "first": "B",
884
- "middle": [],
885
- "last": "Lavoie",
886
- "suffix": ""
887
- }
888
- ],
889
- "year": 1998,
890
- "venue": "Proceedings of the American Meteorological Society AI Conference",
891
- "volume": "",
892
- "issue": "",
893
- "pages": "80--83",
894
- "other_ids": {},
895
- "num": null,
896
- "urls": [],
897
- "raw_text": "Kittredge, R. and Lavoie, B. (1998) MeteoCogent: A Knowledge-Based Tool For Generating Weather Forecast Texts. In Proceedings of the American Meteorological Society AI Conference (AMS-98), Phoenix, Arizona, pp. 80--83.",
898
- "links": null
899
- },
900
- "BIBREF5": {
901
- "ref_id": "b5",
902
- "title": "Dependency Grammars for Bilingual Text Generation: Inside FoG's Stratificational Models",
903
- "authors": [
904
- {
905
- "first": "R",
906
- "middle": [],
907
- "last": "Kittredge",
908
- "suffix": ""
909
- },
910
- {
911
- "first": "A",
912
- "middle": [],
913
- "last": "Polgu~re",
914
- "suffix": ""
915
- }
916
- ],
917
- "year": 1991,
918
- "venue": "Proceedings of the International Conference on Current Issues in Computational Linguistics",
919
- "volume": "",
920
- "issue": "",
921
- "pages": "318--330",
922
- "other_ids": {},
923
- "num": null,
924
- "urls": [],
925
- "raw_text": "Kittredge, R. and Polgu~re, A. (1991) Dependency Grammars for Bilingual Text Generation: Inside FoG's Stratificational Models. In Proceedings of the International Conference on Current Issues in Computational Linguistics, Penang, Malaysia, pp. 318-330.",
926
- "links": null
927
- },
928
- "BIBREF6": {
929
- "ref_id": "b6",
930
- "title": "Interlingua for Bilingual Statistical Reports",
931
- "authors": [
932
- {
933
- "first": "B",
934
- "middle": [],
935
- "last": "Lavoie",
936
- "suffix": ""
937
- }
938
- ],
939
- "year": 1995,
940
- "venue": "Notes of IJCAI-95 Workshop on Multilingual Text Generation, Montr6al, Canada",
941
- "volume": "",
942
- "issue": "",
943
- "pages": "84--94",
944
- "other_ids": {},
945
- "num": null,
946
- "urls": [],
947
- "raw_text": "Lavoie, B. (1995) Interlingua for Bilingual Statistical Reports. In Notes of IJCAI-95 Workshop on Multilingual Text Generation, Montr6al, Canada, pp. 84---94.",
948
- "links": null
949
- },
950
- "BIBREF7": {
951
- "ref_id": "b7",
952
- "title": "A Fast and Portable Realizer for Text Generation Systems",
953
- "authors": [
954
- {
955
- "first": "B",
956
- "middle": [],
957
- "last": "Lavoie",
958
- "suffix": ""
959
- },
960
- {
961
- "first": "O",
962
- "middle": [],
963
- "last": "Rambow",
964
- "suffix": ""
965
- }
966
- ],
967
- "year": 1997,
968
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
969
- "volume": "",
970
- "issue": "",
971
- "pages": "265--268",
972
- "other_ids": {},
973
- "num": null,
974
- "urls": [],
975
- "raw_text": "Lavoie, B. and Rambow, O. (1997) A Fast and Portable Realizer for Text Generation Systems. In Proceedings of the Fifth Conference on Applied Natural Language Processing, Washington, DC., pp. 265-268.",
976
- "links": null
977
- },
978
- "BIBREF8": {
979
- "ref_id": "b8",
980
- "title": "Customizable Descriptions of Object-Oriented Models",
981
- "authors": [
982
- {
983
- "first": "B",
984
- "middle": [],
985
- "last": "Lavoie",
986
- "suffix": ""
987
- },
988
- {
989
- "first": "O",
990
- "middle": [],
991
- "last": "Rambow",
992
- "suffix": ""
993
- },
994
- {
995
- "first": "E",
996
- "middle": [],
997
- "last": "Reiter",
998
- "suffix": ""
999
- }
1000
- ],
1001
- "year": 1997,
1002
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
1003
- "volume": "",
1004
- "issue": "",
1005
- "pages": "253--256",
1006
- "other_ids": {},
1007
- "num": null,
1008
- "urls": [],
1009
- "raw_text": "Lavoie, B., Rambow, O. and Reiter, E. (1997) Customizable Descriptions of Object-Oriented Models. In Proceedings of the Fifth Conference on Applied Natural Language Processing, Washington, DC., pp. 253-256.",
1010
- "links": null
1011
- },
1012
- "BIBREF9": {
1013
- "ref_id": "b9",
1014
- "title": "Dependency Syntax",
1015
- "authors": [
1016
- {
1017
- "first": "I",
1018
- "middle": [],
1019
- "last": "Mel'cuk",
1020
- "suffix": ""
1021
- }
1022
- ],
1023
- "year": 1988,
1024
- "venue": "",
1025
- "volume": "",
1026
- "issue": "",
1027
- "pages": "",
1028
- "other_ids": {},
1029
- "num": null,
1030
- "urls": [],
1031
- "raw_text": "Mel'cuk, I. (1988) Dependency Syntax. State University of New York Press, Albany, NY.",
1032
- "links": null
1033
- },
1034
- "BIBREF10": {
1035
- "ref_id": "b10",
1036
- "title": "Enriching lexical transfer with crosslinguistic semantic features",
1037
- "authors": [
1038
- {
1039
- "first": "A",
1040
- "middle": [],
1041
- "last": "Nasr",
1042
- "suffix": ""
1043
- },
1044
- {
1045
- "first": "O",
1046
- "middle": [],
1047
- "last": "Rambow",
1048
- "suffix": ""
1049
- },
1050
- {
1051
- "first": "M",
1052
- "middle": [],
1053
- "last": "Palmer",
1054
- "suffix": ""
1055
- },
1056
- {
1057
- "first": "J",
1058
- "middle": [],
1059
- "last": "Rosenzweig",
1060
- "suffix": ""
1061
- }
1062
- ],
1063
- "year": 1998,
1064
- "venue": "Proceedings of the Interlingua Workshop at the MT Summit",
1065
- "volume": "",
1066
- "issue": "",
1067
- "pages": "",
1068
- "other_ids": {},
1069
- "num": null,
1070
- "urls": [],
1071
- "raw_text": "Nasr, A., Rambow, O., Palmer, M. and Rosenzweig, J. (1998) Enriching lexical transfer with cross- linguistic semantic features. In Proceedings of the Interlingua Workshop at the MT Summit, San Diego, California.",
1072
- "links": null
1073
- },
1074
- "BIBREF11": {
1075
- "ref_id": "b11",
1076
- "title": "Rapid Prototyping of Domain-Specific Machine Translation Systems",
1077
- "authors": [
1078
- {
1079
- "first": "M",
1080
- "middle": [],
1081
- "last": "Palmer",
1082
- "suffix": ""
1083
- },
1084
- {
1085
- "first": "O",
1086
- "middle": [],
1087
- "last": "Rambow",
1088
- "suffix": ""
1089
- },
1090
- {
1091
- "first": "A",
1092
- "middle": [],
1093
- "last": "Nasr",
1094
- "suffix": ""
1095
- }
1096
- ],
1097
- "year": 1998,
1098
- "venue": "Proceedings of the Third Conference on Machine Translation in the Americas (AMTA-98)",
1099
- "volume": "",
1100
- "issue": "",
1101
- "pages": "95--102",
1102
- "other_ids": {},
1103
- "num": null,
1104
- "urls": [],
1105
- "raw_text": "Palmer, M., Rambow, O. and Nasr, A. (1998) Rapid Prototyping of Domain-Specific Machine Translation Systems. In Proceedings of the Third Conference on Machine Translation in the Americas (AMTA-98), PA, USA, pp. 95-102.",
1106
- "links": null
1107
- },
1108
- "BIBREF12": {
1109
- "ref_id": "b12",
1110
- "title": "Everything has not been said about interlinguae: the case of multi-lingual text generation system",
1111
- "authors": [
1112
- {
1113
- "first": "A",
1114
- "middle": [],
1115
- "last": "Polgu6re",
1116
- "suffix": ""
1117
- }
1118
- ],
1119
- "year": 1991,
1120
- "venue": "Proc. of Natural Language Processing Pacific Rim Symposium",
1121
- "volume": "",
1122
- "issue": "",
1123
- "pages": "",
1124
- "other_ids": {},
1125
- "num": null,
1126
- "urls": [],
1127
- "raw_text": "Polgu6re, A. (1991) Everything has not been said about interlinguae: the case of multi-lingual text generation system. In Proc. of Natural Language Processing Pacific Rim Symposium, Singapore.",
1128
- "links": null
1129
- },
1130
- "BIBREF13": {
1131
- "ref_id": "b13",
1132
- "title": "Applied Text Generation",
1133
- "authors": [
1134
- {
1135
- "first": "O",
1136
- "middle": [],
1137
- "last": "Rambow",
1138
- "suffix": ""
1139
- },
1140
- {
1141
- "first": "T",
1142
- "middle": [],
1143
- "last": "Korelsky",
1144
- "suffix": ""
1145
- }
1146
- ],
1147
- "year": 1992,
1148
- "venue": "Proceedings of the 6th International Workshop on Natural Language Generation",
1149
- "volume": "",
1150
- "issue": "",
1151
- "pages": "40--47",
1152
- "other_ids": {},
1153
- "num": null,
1154
- "urls": [],
1155
- "raw_text": "Rambow, O. and Korelsky, T. (1992) Applied Text Generation. In Proceedings of the 6th International Workshop on Natural Language Generation, Trento, Italy, pp. 40--47.",
1156
- "links": null
1157
- },
1158
- "BIBREF14": {
1159
- "ref_id": "b14",
1160
- "title": "Automated translation at Grenoble University",
1161
- "authors": [
1162
- {
1163
- "first": "B",
1164
- "middle": [],
1165
- "last": "Vauquois",
1166
- "suffix": ""
1167
- },
1168
- {
1169
- "first": "C",
1170
- "middle": [],
1171
- "last": "Boitet",
1172
- "suffix": ""
1173
- }
1174
- ],
1175
- "year": 1985,
1176
- "venue": "Computational Linguistics",
1177
- "volume": "11",
1178
- "issue": "",
1179
- "pages": "28--36",
1180
- "other_ids": {},
1181
- "num": null,
1182
- "urls": [],
1183
- "raw_text": "Vauquois, B. and Boitet C. (1985) Automated translation at Grenoble University. In Computational Linguistics, Vol. 11, pp. 28-36.",
1184
- "links": null
1185
- }
1186
- },
1187
- "ref_entries": {
1188
- "FIGREF0": {
1189
- "uris": null,
1190
- "type_str": "figure",
1191
- "num": null,
1192
- "text": "All linguistic resources are represented in a declarative manner. An instantiation of the tree transduction module consists of a specification of the linguistic resources."
1193
- },
1194
- "FIGREF1": {
1195
- "uris": null,
1196
- "type_str": "figure",
1197
- "num": null,
1198
- "text": "Figure 1: Design of the Tree Transduction Module 3 The Framework's Representations"
1199
- },
1200
- "FIGREF2": {
1201
- "uris": null,
1202
- "type_str": "figure",
1203
- "num": null,
1204
- "text": "\u2022 Deep-syntactic structures (DSyntSs); \u2022 Surface syntactic structures (SSyntSs); \u2022 Conceptual structures (ConcSs); \u2022 Parsed syntactic structures (PSyntSs)."
1205
- },
1206
- "FIGREF3": {
1207
- "uris": null,
1208
- "type_str": "figure",
1209
- "num": null,
1210
- "text": "DSyntS (Graphical and ASCII Notation)"
1211
- },
1212
- "FIGREF4": {
1213
- "uris": null,
1214
- "type_str": "figure",
1215
- "num": null,
1216
- "text": "ConcS Interlingua and English DSyntS"
1217
- },
1218
- "FIGREF5": {
1219
- "uris": null,
1220
- "type_str": "figure",
1221
- "num": null,
1222
- "text": "Specification of Lexeme SELL"
1223
- },
1224
- "FIGREF6": {
1225
- "uris": null,
1226
- "type_str": "figure",
1227
- "num": null,
1228
- "text": "Deep-Syntactic Rule for Determiner Insertion"
1229
- },
1230
- "FIGREF7": {
1231
- "uris": null,
1232
- "type_str": "figure",
1233
- "num": null,
1234
- "text": "Lexico-Structural Transfer of English Lexerne MOVE to French"
1235
- },
1236
- "FIGREF8": {
1237
- "uris": null,
1238
- "type_str": "figure",
1239
- "num": null,
1240
- "text": "English to French Lexico-Structural Transfer Rule with Verb Modifier ALMOST"
1241
- },
1242
- "FIGREF9": {
1243
- "uris": null,
1244
- "type_str": "figure",
1245
- "num": null,
1246
- "text": "Scope of the Framework's Transformations"
1247
- },
1248
- "TABREF0": {
1249
- "content": "<table><tr><td>\u2022 Lexicon: This consists of the available</td></tr><tr><td>lexemes or concepts, depending on whether</td></tr><tr><td>the module works at syntactic or conceptual</td></tr><tr><td>level. Each lexeme and concept is defined</td></tr><tr><td>with its features, and may contain specific</td></tr><tr><td>lexico-structural rules: transfer rules for MT,</td></tr><tr><td>mapping rules to the next level of</td></tr><tr><td>representation for surface realization of</td></tr><tr><td>DSyntS or lexicalization of ConcS.</td></tr><tr><td>\u2022 Main Grammar: This consists of the lexico-</td></tr><tr><td>structural mapping rules that apply at this</td></tr><tr><td>level and which are not lexeme-or concept-</td></tr><tr><td>specific (e.g. DSynt-rules for the DSynt-</td></tr><tr><td>module, Transfer-rules for the Transfer</td></tr><tr><td>module, etc.)</td></tr><tr><td>\u2022 Preprocessing grammar: This consists of</td></tr><tr><td>the lexico-structural mapping rules for</td></tr><tr><td>transforming the input structures in order to</td></tr><tr><td>make them compliant with the main</td></tr><tr><td>grammar, if this is necessary. Such rules are</td></tr><tr><td>used to integrate new modules together</td></tr><tr><td>when discrepancies in the formalism need to</td></tr><tr><td>be fixed. This grammar can also be used</td></tr><tr><td>for adding default features (e.g. setting the</td></tr><tr><td>default number of nouns to singular) or for</td></tr><tr><td>applying default transformations (e.g.</td></tr><tr><td>replacing non meaning-bearing lexemes</td></tr><tr><td>with features).</td></tr><tr><td>Postprocessing grammar:</td></tr></table>",
1250
- "type_str": "table",
1251
- "num": null,
1252
- "text": "Feature Data-Base: This consists of the feature system defining available features and their possible values in the module.",
1253
- "html": null
1254
- }
1255
- }
1256
- }
1257
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1010.json DELETED
@@ -1,1055 +0,0 @@
1
- {
2
- "paper_id": "A00-1010",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:13.798898Z"
6
- },
7
- "title": "TALK'N'TRAVEL: A CONVERSATIONAL SYSTEM FOR AIR TRAVEL PLANNING",
8
- "authors": [
9
- {
10
- "first": "David",
11
- "middle": [],
12
- "last": "Stallard",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "BBN Technologies",
17
- "location": {
18
- "addrLine": "GTE 70 Fawcett St",
19
- "settlement": "Cambridge",
20
- "region": "MA",
21
- "country": "USA"
22
- }
23
- },
24
- "email": "[email protected]"
25
- }
26
- ],
27
- "year": "",
28
- "venue": null,
29
- "identifiers": {},
30
- "abstract": "We describe Talk'n'Travel, a spoken dialogue language system for making air travel plans over the telephone. Talk'n'Travel is a fully conversational, mixed-initiative system that allows the user to specify the constraints on his travel plan in arbitrary order, ask questions, etc., in general spoken English. The system operates according to a plan-based agenda mechanism, rather than a finite state network, and attempts to negotiate with the user when not all of his constraints can be met.",
31
- "pdf_parse": {
32
- "paper_id": "A00-1010",
33
- "_pdf_hash": "",
34
- "abstract": [
35
- {
36
- "text": "We describe Talk'n'Travel, a spoken dialogue language system for making air travel plans over the telephone. Talk'n'Travel is a fully conversational, mixed-initiative system that allows the user to specify the constraints on his travel plan in arbitrary order, ask questions, etc., in general spoken English. The system operates according to a plan-based agenda mechanism, rather than a finite state network, and attempts to negotiate with the user when not all of his constraints can be met.",
37
- "cite_spans": [],
38
- "ref_spans": [],
39
- "eq_spans": [],
40
- "section": "Abstract",
41
- "sec_num": null
42
- }
43
- ],
44
- "body_text": [
45
- {
46
- "text": "This paper describes Talk'n'Travel, a spoken language dialogue system for making complex air travel plans over the telephone. Talk'n'Travel is a research prototype system sponsored under the DARPA Communicator program (MITRE, 1999) . Some other systems in the program are Ward and Pellom (1999) , Seneff and Polifroni (2000) and . The common task of this program is a mixed-initiative dialogue over the telephone, in which the user plans a multi-city trip by air, including all flights, hotels, and rental cars, all in conversational English over the telephone.",
47
- "cite_spans": [
48
- {
49
- "start": 218,
50
- "end": 231,
51
- "text": "(MITRE, 1999)",
52
- "ref_id": null
53
- },
54
- {
55
- "start": 272,
56
- "end": 294,
57
- "text": "Ward and Pellom (1999)",
58
- "ref_id": "BIBREF1"
59
- },
60
- {
61
- "start": 297,
62
- "end": 324,
63
- "text": "Seneff and Polifroni (2000)",
64
- "ref_id": "BIBREF7"
65
- }
66
- ],
67
- "ref_spans": [],
68
- "eq_spans": [],
69
- "section": "Introduction",
70
- "sec_num": null
71
- },
72
- {
73
- "text": "The Communicator common task presents special challenges. It is a complex task with many subtasks, including the booking of each flight, hotel, and car reservation. Because the number of legs of the trip may be arbitrary, the number of such subtasks is not known in advance. Furthermore, the user has complete freedom to say anything at any time. His utterances can affect just the current subtask, or multiple subtasks at once (\"I want to go from Denver to Chicago and then to San Diego\"). He can go back and change the specifications for completed subtasks. And there are important constraints, such as temporal relationships between flights, that must be maintained for the solution to the whole task to be coherent.",
74
- "cite_spans": [],
75
- "ref_spans": [],
76
- "eq_spans": [],
77
- "section": "Introduction",
78
- "sec_num": null
79
- },
80
- {
81
- "text": "In order to meet this challenge, we have sought to develop dialogue techniques for Talk'n'Travel that go beyond the rigid systemdirected style of familiar IVR systems. Talk'n'Travel is instead a mixed initiative system that allows the user to specify constraints on his travel plan in arbitrary order. At any point in the dialogue, the user can supply information other than what the system is currently prompting for, change his mind about information he has previously given and even ask questions himself. The system also tries to be helpful, eliciting constraints from the user when necessary. Furthermore, if at any point the constraints the user has specified cannot all be met, the system steps in and offers a relaxation of them in an attempt to negotiate a partial solution with the user.",
82
- "cite_spans": [],
83
- "ref_spans": [],
84
- "eq_spans": [],
85
- "section": "Introduction",
86
- "sec_num": null
87
- },
88
- {
89
- "text": "The next section gives a brief overview of the system. Relevant components are discussed in subsequent sections.",
90
- "cite_spans": [],
91
- "ref_spans": [],
92
- "eq_spans": [],
93
- "section": "Introduction",
94
- "sec_num": null
95
- },
96
- {
97
- "text": "The system consists of the following modules: speech recognizer, language understander, dialogue manager, state manager, language generator, and speech synthesizer. The modules interact with each other via the central hub module of the Communicator Common Architecture.",
98
- "cite_spans": [],
99
- "ref_spans": [],
100
- "eq_spans": [],
101
- "section": "I System Overview",
102
- "sec_num": null
103
- },
104
- {
105
- "text": "The speech recognizer is the Byblos system (Nguyen, 1995) . It uses an acoustic model trained from the Macrophone telephone corpus, and a bigram/trigram language model trained from -40K utterances derived from various sources, including data collected under the previous ATIS program (Dahl et al, 1994) .",
106
- "cite_spans": [
107
- {
108
- "start": 43,
109
- "end": 57,
110
- "text": "(Nguyen, 1995)",
111
- "ref_id": "BIBREF8"
112
- },
113
- {
114
- "start": 284,
115
- "end": 302,
116
- "text": "(Dahl et al, 1994)",
117
- "ref_id": "BIBREF3"
118
- }
119
- ],
120
- "ref_spans": [],
121
- "eq_spans": [],
122
- "section": "I System Overview",
123
- "sec_num": null
124
- },
125
- {
126
- "text": "The speech synthesizer is Lucent's commercial system.",
127
- "cite_spans": [],
128
- "ref_spans": [],
129
- "eq_spans": [],
130
- "section": "I System Overview",
131
- "sec_num": null
132
- },
133
- {
134
- "text": "Synthesizer and recognizer both interface to the telephone via Dialogics telephony board. The database is currently a frozen snapshot of actual flights between 40 different US cities (we are currently engaged in interfacing to a commercial air travel website). The various language components are written in Java. The complete system runs on Windows NT, and is compliant with the DARPA Communicator Common architecture.",
135
- "cite_spans": [],
136
- "ref_spans": [],
137
- "eq_spans": [],
138
- "section": "I System Overview",
139
- "sec_num": null
140
- },
141
- {
142
- "text": "The present paper is concerned with the dialogue and discourse management, language generation and language understanding components. In the remainder of the paper, we present more detailed discussion of these components, beginning with the language understander in Section 2. Section 3 discusses the discourse and dialogue components, and Section 4, the language generator.",
143
- "cite_spans": [],
144
- "ref_spans": [],
145
- "eq_spans": [],
146
- "section": "I System Overview",
147
- "sec_num": null
148
- },
149
- {
150
- "text": "Semantic frames have proven useful as a meaning representation for many applications. Their simplicity and useful computational properties have often been seen as more important than their limitations in expressive power, especially in simpler domains.",
151
- "cite_spans": [],
152
- "ref_spans": [],
153
- "eq_spans": [],
154
- "section": "Meaning Representation",
155
- "sec_num": "2.1"
156
- },
157
- {
158
- "text": "Even in such domains, however, flames still have some shortcomings. While most naturally representing equalities between slot and filler, flames have a harder time with inequalities, such as 'the departure time is before 10 AM', or 'the airline is not Delta'. These require the slot-filler to be some sort of predicate, interval, or set object, at a cost to simplicity uniformity. Other problematic cases include n-ary relations ('3 miles from Denver'), and disjunctions of properties on different slots.",
159
- "cite_spans": [],
160
- "ref_spans": [],
161
- "eq_spans": [],
162
- "section": "Meaning Representation",
163
- "sec_num": "2.1"
164
- },
165
- {
166
- "text": "In our Talk'n'Travel work, we have developed a meaning representation formalism called path constraints, which overcomes these problems, while retaining the computational advantages that made frames attractive in the first place. A path constraint is an expression of the form :",
167
- "cite_spans": [],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "Meaning Representation",
171
- "sec_num": "2.1"
172
- },
173
- {
174
- "text": "(<path> <relation> <arguments>*)",
175
- "cite_spans": [],
176
- "ref_spans": [],
177
- "eq_spans": [],
178
- "section": "Meaning Representation",
179
- "sec_num": "2.1"
180
- },
181
- {
182
- "text": "The path is a compositional chain of one or more attributes, and relations are 1-place or higher predicates, whose first argument is implicitly the path. The relation is followed by zero or more other arguments. In the simplest case, path constraints can be thought of as flattenings of a tree of frames. The following represents the constraint that the departure time of the first leg of the itinerary is the city Boston :",
183
- "cite_spans": [],
184
- "ref_spans": [],
185
- "eq_spans": [],
186
- "section": "Meaning Representation",
187
- "sec_num": "2.1"
188
- },
189
- {
190
- "text": "Because this syntax generalizes to any relation, however, the constraint \"departing before 10 AM\" can be represented in a syntactically equivalent way:",
191
- "cite_spans": [],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "LEGS.0.ORIG_CITY EQ BoSToN",
195
- "sec_num": null
196
- },
197
- {
198
- "text": "LEGS.0.DEPART_TIME LT 1000",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "LEGS.0.ORIG_CITY EQ BoSToN",
203
- "sec_num": null
204
- },
205
- {
206
- "text": "Because the number of arguments is arbitrary, it is equally straightforward to represent a oneplace property like \"x is nonstop\" and a three place predicate like \"x is 10 miles from Denver\".",
207
- "cite_spans": [],
208
- "ref_spans": [],
209
- "eq_spans": [],
210
- "section": "LEGS.0.ORIG_CITY EQ BoSToN",
211
- "sec_num": null
212
- },
213
- {
214
- "text": "Like flames, path constraints have a fixed format that is indexed in a computationally useful way, and are simpler than logical forms. Unlike flames, however, path constraints can be combined in arbitrary conjunctions, disjunctions, and negations, even across different paths. Path constraint meaning representations are also flat lists of constraints rather than trees, making matching rules, etc, easier to write for them.",
215
- "cite_spans": [],
216
- "ref_spans": [],
217
- "eq_spans": [],
218
- "section": "LEGS.0.ORIG_CITY EQ BoSToN",
219
- "sec_num": null
220
- },
221
- {
222
- "text": "Language understanding in Talk'n'Travel is carried out using a system called GEM (for Generative Extraction Model). GEM (Miller, 1998) is a probabilistic semantic grammar that is an outgrowth of the work on the HUM system (Miller, 1996) , but uses hand-specified knowledge in addition to probability. The handspecified knowledge is quite simple, and is expressed by a two-level semantic dictionary. In the first level, the entries map alternative word strings to a single word class. For example, the following entry maps several alternative forms to the word class DEPART:",
223
- "cite_spans": [
224
- {
225
- "start": 120,
226
- "end": 134,
227
- "text": "(Miller, 1998)",
228
- "ref_id": "BIBREF2"
229
- },
230
- {
231
- "start": 222,
232
- "end": 236,
233
- "text": "(Miller, 1996)",
234
- "ref_id": null
235
- }
236
- ],
237
- "ref_spans": [],
238
- "eq_spans": [],
239
- "section": "The GEM Understanding System",
240
- "sec_num": "2.2"
241
- },
242
- {
243
- "text": "Leave, depart, get out of => DEPART",
244
- "cite_spans": [],
245
- "ref_spans": [],
246
- "eq_spans": [],
247
- "section": "The GEM Understanding System",
248
- "sec_num": "2.2"
249
- },
250
- {
251
- "text": "In the second level, entries map sequences of word classes to constraints:",
252
- "cite_spans": [],
253
- "ref_spans": [],
254
- "eq_spans": [],
255
- "section": "The GEM Understanding System",
256
- "sec_num": "2.2"
257
- },
258
- {
259
- "text": "Name: DepartCity 1 Head: DEPART Classes: [DEPART FROM CITY] Meaning: (DEST_CITY EQ <CITY>)",
260
- "cite_spans": [],
261
- "ref_spans": [],
262
- "eq_spans": [],
263
- "section": "The GEM Understanding System",
264
- "sec_num": "2.2"
265
- },
266
- {
267
- "text": "The \"head\" feature allows the entry to pass one of its constituent word classes up to a higher level pattern, allowing the given pattern to be a constituent of others.",
268
- "cite_spans": [],
269
- "ref_spans": [],
270
- "eq_spans": [],
271
- "section": "The GEM Understanding System",
272
- "sec_num": "2.2"
273
- },
274
- {
275
- "text": "The dictionary entries generate a probabilistic recursive transition network (PRTN), whose specific structure is determined by dictionary entries. Paths through this network correspond one-to-one with parse trees, so that given a path, there is exactly one corresponding tree. The probabilities for the arcs in this network can be estimated from training data using the EM (Expectation-Maximization) procedure.",
276
- "cite_spans": [],
277
- "ref_spans": [],
278
- "eq_spans": [],
279
- "section": "The GEM Understanding System",
280
- "sec_num": "2.2"
281
- },
282
- {
283
- "text": "GEM also includes a noise state to which arbitrary input between patterns can be mapped, making the system quite robust to ill-formed input. There is no separate phase for handling ungrammatical input, nor any distinction between grammatical and ungrammatical input.",
284
- "cite_spans": [],
285
- "ref_spans": [],
286
- "eq_spans": [],
287
- "section": "The GEM Understanding System",
288
- "sec_num": "2.2"
289
- },
290
- {
291
- "text": "A key feature of the Communicator task is that the user can say anything at any time, adding or changing information at will. He may add new subtasks (e.g. trip legs) or modifying existing ones. A conventional dialogue state network approach would be therefore infeasible, as the network would be almost unboundedly large and complex.",
292
- "cite_spans": [],
293
- "ref_spans": [],
294
- "eq_spans": [],
295
- "section": "Discourse and Dialogue Processing",
296
- "sec_num": "3"
297
- },
298
- {
299
- "text": "A signifigant additional problem is that changes need not be monotonic. In particular, when changing his mind, or correcting the system's misinterpretations, the user may delete subtask structures altogether, as in the subdialog:",
300
- "cite_spans": [],
301
- "ref_spans": [],
302
- "eq_spans": [],
303
- "section": "Discourse and Dialogue Processing",
304
- "sec_num": "3"
305
- },
306
- {
307
- "text": "S: What day are you returning to Chicago? U: No, I don't want a return flight.",
308
- "cite_spans": [],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "Discourse and Dialogue Processing",
312
- "sec_num": "3"
313
- },
314
- {
315
- "text": "Because they take information away rather than add it, scenarios like this one make it problematic to view discourse processing as producing a contextualized, or \"thick frame\", version of the user's utterance. In our system, therefore, we have chosen a somewhat different approach.",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "Discourse and Dialogue Processing",
320
- "sec_num": "3"
321
- },
322
- {
323
- "text": "The discourse processor, called the state manager, computes the most likely new task state, based on the user's input and the current task state. It also computes a discourse event, representing its interpretation of what happened in the conversation as a result of the user's utterance.",
324
- "cite_spans": [],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "Discourse and Dialogue Processing",
328
- "sec_num": "3"
329
- },
330
- {
331
- "text": "The dialogue manager is a separate module, as has no state managing responsibilities at all. Rather, it simply computes the next action to take, based on its current goal agenda, the discourse event returned by the state manager, and the new state. This design has the advantage of making the dialogue manager considerably simpler. The discourse event also becomes available to convey to the user as confirmation.",
332
- "cite_spans": [],
333
- "ref_spans": [],
334
- "eq_spans": [],
335
- "section": "Discourse and Dialogue Processing",
336
- "sec_num": "3"
337
- },
338
- {
339
- "text": "We discuss these two modules in more detail below.",
340
- "cite_spans": [],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "Discourse and Dialogue Processing",
344
- "sec_num": "3"
345
- },
346
- {
347
- "text": "The state manager is responsible for computing and maintaining the current task state. The task state is simply the set of path constraints which currently constrain the user's itinerary. Also included in the task state are the history of user and system utterances, and the current subtask and object in focus, if any. At any of these steps, zero or more alternative new states can result, and are fed to the next step. If zero states result at any step, the new meaning representation is rejected, and another one requested from the understander. If no more hypotheses are available, the entire utterance is rejected, and a DONT_UNDERSTAND event is returned to the dialogue manager.",
348
- "cite_spans": [],
349
- "ref_spans": [],
350
- "eq_spans": [],
351
- "section": "State Manager",
352
- "sec_num": "3.1"
353
- },
354
- {
355
- "text": "Step 1 resolves ellipses. Ellipses include both short responses like \"Boston\" and yes/no responses. In this step, a complete meaning representation such as '(ORIQCITY EQ BOSTON)' is generated based on the system's prompt and the input meaning. The hypothesis is rejected if this cannot be done.",
356
- "cite_spans": [],
357
- "ref_spans": [],
358
- "eq_spans": [],
359
- "section": "State Manager",
360
- "sec_num": "3.1"
361
- },
362
- {
363
- "text": "Step 2 matches the input meaning to one or more of the subtasks of the problem. For the Communicator problem, the subtasks are legs of the user's itinerary, and matching is done based on cities mentioned in the input meaning. The default is the subtask currently in focus in the dialogue.",
364
- "cite_spans": [],
365
- "ref_spans": [],
366
- "eq_spans": [],
367
- "section": "State Manager",
368
- "sec_num": "3.1"
369
- },
370
- {
371
- "text": "A match to a subtask is represented by adding the prefix for the subtask to the path of the constraint. For example, \"I want to arrive in Denver by 4 PM\" and then continue on to Chicago would be : Step 3, local ambiguities are expanded into their different possibilities. These include partially specified times such as \"2 o'clock\"",
372
- "cite_spans": [],
373
- "ref_spans": [],
374
- "eq_spans": [],
375
- "section": "State Manager",
376
- "sec_num": "3.1"
377
- },
378
- {
379
- "text": "LEGS.",
380
- "cite_spans": [],
381
- "ref_spans": [],
382
- "eq_spans": [],
383
- "section": "State Manager",
384
- "sec_num": "3.1"
385
- },
386
- {
387
- "text": "Step 4 applies inference and coherency rules. These rules will vary from application to application. They are written in the path constraint formalism, augmented with variables that can range over attributes and other values.",
388
- "cite_spans": [],
389
- "ref_spans": [],
390
- "eq_spans": [],
391
- "section": "State Manager",
392
- "sec_num": "3.1"
393
- },
394
- {
395
- "text": "The following is an example, representing the constraint a flight leg cannot be scheduled to depart until after the preceding flight arrives:",
396
- "cite_spans": [],
397
- "ref_spans": [],
398
- "eq_spans": [],
399
- "section": "State Manager",
400
- "sec_num": "3.1"
401
- },
402
- {
403
- "text": "LEGS.$N.ARRIVE LT LEGS. $N+ 1 .DEPART",
404
- "cite_spans": [],
405
- "ref_spans": [],
406
- "eq_spans": [],
407
- "section": "State Manager",
408
- "sec_num": "3.1"
409
- },
410
- {
411
- "text": "States that violate coherency constraints are discarded.",
412
- "cite_spans": [],
413
- "ref_spans": [],
414
- "eq_spans": [],
415
- "section": "State Manager",
416
- "sec_num": "3.1"
417
- },
418
- {
419
- "text": "Step 5 computes the set of objects in the database that satisfy the constraints on the current subtask. This set will be empty when the constraints are not all satisfiable, in which case the relaxation of Step 6 is invoked. This relaxation is a best-first search for the satisfiable subset of the constraints that are deemed closest to what the user originally wanted. Alternative relaxations are scored according to a sum of penalty scores for each relaxed constraint, derived from earlier work by Stallard (1995) . The penalty score is the sum of two terms: one for the relative importance of the attribute concerned (e.g. relaxations of DEPART_DATE are penalised more than relaxations of AIRLINE) and the other for the nearness of the satisfiers to the original constraint (relevant for number-like attributes like departure time).",
420
- "cite_spans": [
421
- {
422
- "start": 499,
423
- "end": 514,
424
- "text": "Stallard (1995)",
425
- "ref_id": "BIBREF9"
426
- }
427
- ],
428
- "ref_spans": [],
429
- "eq_spans": [],
430
- "section": "State Manager",
431
- "sec_num": "3.1"
432
- },
433
- {
434
- "text": "The latter allows the system to give credit to solutions that are near fits to the user's goals, even if they relax strongly desired constraints. For example, suppose the user has expressed a desire to fly on Delta and arrive by 3 PM, while the system is only able to find a flight on Delta that arrives at 3:15 PM. In this case, this flight, which meets one constraint and almost meets the other, may well satisfy the user more than a flight on a different airline that happens to meet the time constraint exactly.",
435
- "cite_spans": [],
436
- "ref_spans": [],
437
- "eq_spans": [],
438
- "section": "State Manager",
439
- "sec_num": "3.1"
440
- },
441
- {
442
- "text": "In the final step, the alternative new states are rank-ordered according to a pragmatic score, and the highest-scoring alternative is chosen. The pragmatic score is computed based on a number of factors, including the plausibility of disambiguated times and whether or not the state interpreted the user as responding to the system prompt.",
443
- "cite_spans": [],
444
- "ref_spans": [],
445
- "eq_spans": [],
446
- "section": "State Manager",
447
- "sec_num": "3.1"
448
- },
449
- {
450
- "text": "The appropriate discourse event is then deterministicaUy computed and returned. There are several types of discourse event. The most common is UPDATE, which specifies the constraints that have been added, removed, or relaxed. Another type is REPEAT, which is generated when the user has simply repeated constraints the system already knows. Other types include QUESTION, TIMEOUT, and DONT UNDERSTAND.",
451
- "cite_spans": [],
452
- "ref_spans": [],
453
- "eq_spans": [],
454
- "section": "State Manager",
455
- "sec_num": "3.1"
456
- },
457
- {
458
- "text": "Upon receiving the new discourse event from the state manager, the dialogue manager determines what next action to take. Actions can be external, such as speaking to the user or asking him a question, or internal, such as querying the database or other elements of the system state. The current action is determined by consulting a stack-based agenda of goals and actions.",
459
- "cite_spans": [],
460
- "ref_spans": [],
461
- "eq_spans": [],
462
- "section": "Dialogue Manager",
463
- "sec_num": "3.1"
464
- },
465
- {
466
- "text": "The agenda stack is in turn determined by an application-dependent library of plans. Plans are tree structures whose root is the name of the goal the plan is designed to solve, and whose leaves are either other goal names or actions. An example of a plan is the following: The system begins the interaction with the highlevel goal START on its stack. At each step, the system examines the top of its goal stack and either executes it if it is an action suitable for execution, or replaces it on the stack with its plan steps if it is a goal.",
467
- "cite_spans": [],
468
- "ref_spans": [],
469
- "eq_spans": [],
470
- "section": "Dialogue Manager",
471
- "sec_num": "3.1"
472
- },
473
- {
474
- "text": "Actions are objects with success and relevancy predicates and an execute method, somewhat similar to the \"handlers\" of . An action has an underlying goal, such as finding out the user's constraints on some attribute. The action's success predicate will return true if this underlying goal has been achieved, and its relevancy predicate will return true if it is still relevant to the current situation. Before carrying out an action, the dialogue manager first checks to see if its success predicate returns false and its relevancy predicate returns true. If either condition is not met, the action is popped off the stack and disposed of without being executed. Otherwise, the action's execute method is invoked.",
475
- "cite_spans": [],
476
- "ref_spans": [],
477
- "eq_spans": [],
478
- "section": "Dialogue Manager",
479
- "sec_num": "3.1"
480
- },
481
- {
482
- "text": "The system includes a set of actions that are built in, and may be parameterized for each each domain. For example, the action type ELICIT is parameterized by an attribute A, a path prefix P, and verbalization string S. Its success predicate returns true if the path 'P.A' is constrained in the current state. Its execute method generates a meaning frame that is passed to the language generator, ultimately prompting the user with a question such as \"What city are you flying to?\"",
483
- "cite_spans": [],
484
- "ref_spans": [],
485
- "eq_spans": [],
486
- "section": "Dialogue Manager",
487
- "sec_num": "3.1"
488
- },
489
- {
490
- "text": "Once an action's execute method is invoked, it remains on the stack for the next cycle, where it is tested again for success and relevancy. In this case, if the success condition is met -that is, if the user did indeed reply with a specification of his destination city -the action is popped off the stack.",
491
- "cite_spans": [],
492
- "ref_spans": [],
493
- "eq_spans": [],
494
- "section": "Dialogue Manager",
495
- "sec_num": "3.1"
496
- },
497
- {
498
- "text": "If the system did not receive this information, either because the user made a stipulation about some different attribute, asked a question, or simply was not understood, the action remains on the stack to be executed again. Of course, the user may have already specified the destination city in a previous utterance. In this case, the action is already satisfied, and is not executed.",
499
- "cite_spans": [],
500
- "ref_spans": [],
501
- "eq_spans": [],
502
- "section": "Dialogue Manager",
503
- "sec_num": "3.1"
504
- },
505
- {
506
- "text": "In this way, the user has flexibility in how he actually carries out the dialogue.",
507
- "cite_spans": [],
508
- "ref_spans": [],
509
- "eq_spans": [],
510
- "section": "Dialogue Manager",
511
- "sec_num": "3.1"
512
- },
513
- {
514
- "text": "In certain situations, other goals and actions may be pushed onto the stack, temporarily interrupting the execution of the current plan. For example, the user himself may ask a question. In this case, an action to answer the question is created, and pushed onto the stack. The dialogue manager then executes this action to answer the user's question before continuing on with the plan. Or the state manager may generate a clarification question, which the dialogue manager seeks to have the user answer.",
515
- "cite_spans": [],
516
- "ref_spans": [],
517
- "eq_spans": [],
518
- "section": "Dialogue Manager",
519
- "sec_num": "3.1"
520
- },
521
- {
522
- "text": "Actions can also have a set of conditional branchings that are tested after the action is executed. If present, these determine the next action to execute or goal to work on. For example, the action that asks the user \"Do you want a return flight to X?\" specifies the branch to be taken when the user replies in the negative. This branch includes an action that asks the user \"Is Y your final destination?\", an action that is executed if the user did not specify an additional destination along with his negative reply.",
523
- "cite_spans": [],
524
- "ref_spans": [],
525
- "eq_spans": [],
526
- "section": "Dialogue Manager",
527
- "sec_num": "3.1"
528
- },
529
- {
530
- "text": "Unlike the approach taken by Ward and Pellom (1999) , which seeks to avoid scripting entirely by driving the dialogue off the current status of the itinerary, the Talk'n'Travel dialogue manager thus seeks to allow partially scripted dialogue where appropriate to the situation.",
531
- "cite_spans": [
532
- {
533
- "start": 29,
534
- "end": 51,
535
- "text": "Ward and Pellom (1999)",
536
- "ref_id": "BIBREF1"
537
- }
538
- ],
539
- "ref_spans": [],
540
- "eq_spans": [],
541
- "section": "Dialogue Manager",
542
- "sec_num": "3.1"
543
- },
544
- {
545
- "text": "The language generator takes a meaning frame from the dialogue manager, and generates a text string in English for it. It uses a set of patternbased rules that map constraints into alternative syntactic realisations.",
546
- "cite_spans": [],
547
- "ref_spans": [],
548
- "eq_spans": [],
549
- "section": "Language Generation",
550
- "sec_num": "4"
551
- },
552
- {
553
- "text": "For example, the following rule allows a constraint on departure time to be realized as \"leave at 3 PM\" or \"3 PM flight\":",
554
- "cite_spans": [],
555
- "ref_spans": [],
556
- "eq_spans": [],
557
- "section": "Language Generation",
558
- "sec_num": "4"
559
- },
560
- {
561
- "text": "LEG.$N.DEPART_TIME EQ $X =~ [leave at $X], [nom-comp $X]",
562
- "cite_spans": [],
563
- "ref_spans": [],
564
- "eq_spans": [],
565
- "section": "Language Generation",
566
- "sec_num": "4"
567
- },
568
- {
569
- "text": "Different realization rules can be selected for depending upon whether the constraint is to be realized as an assertion or as a description. The generation algorithm assembles the selected realizations for each constraint into a simplified syntax tree, selecting appropriate inflections of verb and noun heads as it does so. Terminal values in constraints are realized as type-specific nominals, such as \"3 PM\" or \"Delta\".",
570
- "cite_spans": [],
571
- "ref_spans": [],
572
- "eq_spans": [],
573
- "section": "Language Generation",
574
- "sec_num": "4"
575
- },
576
- {
577
- "text": "A crucial feature of the generation process is that it adds to each prompt a paraphrase of the most recent discourse event, corresponding to what the system thinks the user just said. This helps keep the conversation grounded in terms of mutual understanding between the participants.",
578
- "cite_spans": [],
579
- "ref_spans": [],
580
- "eq_spans": [],
581
- "section": "Language Generation",
582
- "sec_num": "4"
583
- },
584
- {
585
- "text": "The following is an example dialog with the system: ",
586
- "cite_spans": [],
587
- "ref_spans": [],
588
- "eq_spans": [],
589
- "section": "Example Scenario",
590
- "sec_num": "5"
591
- },
592
- {
593
- "text": "The Talk'n'Travel system described here was successfully demonstrated at the DARPA Communicator Compare and Contrast Workshop in June 1999. We are currently collecting data with test subjects and are using the results to improve the system's performance in all areas, in preparation for the forthcoming common evaluation of Communicator systems in June 2000. 8 of the subjects were successful. Of successful sessions, the average duration was 387 seconds, with a minimum of 272 and a maximum of 578. The average number of user utterances was 25, with a minimum of 18 and a maximum of 37. The word error rate of the recognizer was 11.8%.",
594
- "cite_spans": [],
595
- "ref_spans": [],
596
- "eq_spans": [],
597
- "section": "Current Status and Conclusions",
598
- "sec_num": "6"
599
- },
600
- {
601
- "text": "The primary cause of failure to complete the scenario, as well as excessive time spent on completing it, was corruption of the discourse state due to recognition or interpretation errors. While the system informs the user of the change in state after every utterance, the user was not always successful in correcting it when it made errors, and sometimes the user did not even notice when the system had made an error. If the user is not attentive at the time, or happens not to understand what the synthesizer said, there is no implicit way for him to find out afterwards what the system thinks his constraints are.",
602
- "cite_spans": [],
603
- "ref_spans": [],
604
- "eq_spans": [],
605
- "section": "Current Status and Conclusions",
606
- "sec_num": "6"
607
- },
608
- {
609
- "text": "While preliminary, these results point to two directions for future work. One is that the system needs to be better able to recognize and deal with problem situations in which the dialogue is not advancing. The other is that the system needs to be more communicative about its current understanding of the user's goals, even at points in the dialogue at which it might be assumed that user and system were in agreement.",
610
- "cite_spans": [],
611
- "ref_spans": [],
612
- "eq_spans": [],
613
- "section": "Current Status and Conclusions",
614
- "sec_num": "6"
615
- }
616
- ],
617
- "back_matter": [
618
- {
619
- "text": "This work was sponsored by DARPA and monitored by SPAWAR Systems Center under Contract No. N66001-99-D-8615.To determine the performance of the system, we ran an informal experiment in which 11 different subjects called into the system and attempted to use it to solve a travel problem. None of the subjects were system developers. Each subject had a single session in which he was given a three-city trip to plan, including dates of travel, constraints on departure and arrival times, airline preferences.The author wishes to thank Scott Miller for the use of his GEM system.",
620
- "cite_spans": [],
621
- "ref_spans": [],
622
- "eq_spans": [],
623
- "section": "Acknowledgements",
624
- "sec_num": null
625
- }
626
- ],
627
- "bib_entries": {
628
- "BIBREF0": {
629
- "ref_id": "b0",
630
- "title": "DARPA Communicator homepage",
631
- "authors": [],
632
- "year": 1999,
633
- "venue": "MITRE",
634
- "volume": "",
635
- "issue": "",
636
- "pages": "",
637
- "other_ids": {},
638
- "num": null,
639
- "urls": [],
640
- "raw_text": "MITRE (1999) DARPA Communicator homepage http://fofoca.mitre.org].",
641
- "links": null
642
- },
643
- "BIBREF1": {
644
- "ref_id": "b1",
645
- "title": "The CU Communicator System",
646
- "authors": [
647
- {
648
- "first": "W",
649
- "middle": [],
650
- "last": "Ward",
651
- "suffix": ""
652
- },
653
- {
654
- "first": "B",
655
- "middle": [],
656
- "last": "Pellom",
657
- "suffix": ""
658
- }
659
- ],
660
- "year": 1999,
661
- "venue": "IEEE Workshop on Automatic Speech Recognition and Understanding",
662
- "volume": "",
663
- "issue": "",
664
- "pages": "",
665
- "other_ids": {},
666
- "num": null,
667
- "urls": [],
668
- "raw_text": "Ward W., and Pellom, B. (1999) The CU Communicator System. In 1999 IEEE Workshop on Automatic Speech Recognition and Understanding, Keystone, Colorado.",
669
- "links": null
670
- },
671
- "BIBREF2": {
672
- "ref_id": "b2",
673
- "title": "The Generative Extraction Model",
674
- "authors": [
675
- {
676
- "first": "S",
677
- "middle": [],
678
- "last": "Miller",
679
- "suffix": ""
680
- }
681
- ],
682
- "year": 1998,
683
- "venue": "",
684
- "volume": "",
685
- "issue": "",
686
- "pages": "",
687
- "other_ids": {},
688
- "num": null,
689
- "urls": [],
690
- "raw_text": "Miller S. (1998) The Generative Extraction Model. Unpublished manuscript.",
691
- "links": null
692
- },
693
- "BIBREF3": {
694
- "ref_id": "b3",
695
- "title": "Expanding the scope of the ATIS task",
696
- "authors": [
697
- {
698
- "first": "D",
699
- "middle": [],
700
- "last": "Dahl",
701
- "suffix": ""
702
- },
703
- {
704
- "first": "M",
705
- "middle": [],
706
- "last": "Bates",
707
- "suffix": ""
708
- },
709
- {
710
- "first": "M",
711
- "middle": [],
712
- "last": "Brown",
713
- "suffix": ""
714
- },
715
- {
716
- "first": "W",
717
- "middle": [],
718
- "last": "Fisher",
719
- "suffix": ""
720
- },
721
- {
722
- "first": "K",
723
- "middle": [],
724
- "last": "Hunicke-Smith",
725
- "suffix": ""
726
- },
727
- {
728
- "first": "D",
729
- "middle": [],
730
- "last": "Pallet",
731
- "suffix": ""
732
- },
733
- {
734
- "first": "C",
735
- "middle": [],
736
- "last": "Pao",
737
- "suffix": ""
738
- },
739
- {
740
- "first": "A",
741
- "middle": [],
742
- "last": "Rudnicky",
743
- "suffix": ""
744
- },
745
- {
746
- "first": "Shriberg",
747
- "middle": [
748
- "E"
749
- ],
750
- "last": "",
751
- "suffix": ""
752
- }
753
- ],
754
- "year": 1994,
755
- "venue": "Proceedings of the ARPA Spoken Language Technology Workshop",
756
- "volume": "",
757
- "issue": "",
758
- "pages": "3--8",
759
- "other_ids": {},
760
- "num": null,
761
- "urls": [],
762
- "raw_text": "Dahl D., Bates M., Brown M., Fisher, W. Hunicke- Smith K., Pallet D., Pao C., Rudnicky A., and Shriberg E. (1994) Expanding the scope of the ATIS task. In Proceedings of the ARPA Spoken Language Technology Workshop, Plainsboro, NJ., pp 3-8.",
763
- "links": null
764
- },
765
- "BIBREF4": {
766
- "ref_id": "b4",
767
- "title": "A schema-based approach to dialog control",
768
- "authors": [
769
- {
770
- "first": "P",
771
- "middle": [],
772
- "last": "Constantinides",
773
- "suffix": ""
774
- },
775
- {
776
- "first": "S",
777
- "middle": [],
778
- "last": "Hansma",
779
- "suffix": ""
780
- },
781
- {
782
- "first": "C",
783
- "middle": [],
784
- "last": "Tchou",
785
- "suffix": ""
786
- },
787
- {
788
- "first": "A",
789
- "middle": [],
790
- "last": "Rudnicky",
791
- "suffix": ""
792
- }
793
- ],
794
- "year": 1999,
795
- "venue": "Proceedings oflCSLP",
796
- "volume": "",
797
- "issue": "",
798
- "pages": "",
799
- "other_ids": {},
800
- "num": null,
801
- "urls": [],
802
- "raw_text": "Constantinides P., Hansma S., Tchou C. and Rudnicky, A. (1999) A schema-based approach to dialog control. Proceedings oflCSLP, Paper 637.",
803
- "links": null
804
- },
805
- "BIBREF5": {
806
- "ref_id": "b5",
807
- "title": "Creating natural dialogs in the Carnegie Mellon Communicator system",
808
- "authors": [
809
- {
810
- "first": "A",
811
- "middle": [],
812
- "last": "Rudnicky",
813
- "suffix": ""
814
- },
815
- {
816
- "first": "E",
817
- "middle": [],
818
- "last": "Thayer",
819
- "suffix": ""
820
- },
821
- {
822
- "first": "P",
823
- "middle": [],
824
- "last": "Constantinides",
825
- "suffix": ""
826
- },
827
- {
828
- "first": "C",
829
- "middle": [],
830
- "last": "Tchou",
831
- "suffix": ""
832
- },
833
- {
834
- "first": "R",
835
- "middle": [],
836
- "last": "Shern",
837
- "suffix": ""
838
- },
839
- {
840
- "first": "K",
841
- "middle": [],
842
- "last": "Lenzo",
843
- "suffix": ""
844
- },
845
- {
846
- "first": "W",
847
- "middle": [],
848
- "last": "Xu",
849
- "suffix": ""
850
- },
851
- {
852
- "first": "A",
853
- "middle": [],
854
- "last": "Oh",
855
- "suffix": ""
856
- }
857
- ],
858
- "year": 1999,
859
- "venue": "Proceedings of Eurospeech",
860
- "volume": "4",
861
- "issue": "",
862
- "pages": "1531--1534",
863
- "other_ids": {},
864
- "num": null,
865
- "urls": [],
866
- "raw_text": "Rudnicky A., Thayer, E., Constantinides P., Tchou C., Shern, R., Lenzo K., Xu W., Oh A. (1999) Creating natural dialogs in the Carnegie Mellon Communicator system. Proceedings of Eurospeech, 1999, Vol 4, pp. 1531-1534",
867
- "links": null
868
- },
869
- "BIBREF6": {
870
- "ref_id": "b6",
871
- "title": "An agenda-based dialog management architecture for soken language systems",
872
- "authors": [
873
- {
874
- "first": "A",
875
- "middle": [],
876
- "last": "Rudnicky",
877
- "suffix": ""
878
- },
879
- {
880
- "first": "W",
881
- "middle": [],
882
- "last": "Xu",
883
- "suffix": ""
884
- }
885
- ],
886
- "year": 1999,
887
- "venue": "IEEE Workshop on Automatic Speech Recognition and Understanding",
888
- "volume": "",
889
- "issue": "",
890
- "pages": "",
891
- "other_ids": {},
892
- "num": null,
893
- "urls": [],
894
- "raw_text": "Rudnicky A., and Xu W. (1999) An agenda-based dialog management architecture for soken language systems. In 1999 IEEE Workshop on Automatic Speech Recognition and Understanding, Keystone, Colorado.",
895
- "links": null
896
- },
897
- "BIBREF7": {
898
- "ref_id": "b7",
899
- "title": "Dialogue Management in the Mercury Flight Reservation System",
900
- "authors": [
901
- {
902
- "first": "S",
903
- "middle": [],
904
- "last": "Seneff",
905
- "suffix": ""
906
- },
907
- {
908
- "first": "J",
909
- "middle": [],
910
- "last": "Polifroni",
911
- "suffix": ""
912
- }
913
- ],
914
- "year": 2000,
915
- "venue": "ANLP Conversational Systems Workshop",
916
- "volume": "",
917
- "issue": "",
918
- "pages": "",
919
- "other_ids": {},
920
- "num": null,
921
- "urls": [],
922
- "raw_text": "Seneff S., and Polifroni, J. (2000) Dialogue Management in the Mercury Flight Reservation System. ANLP Conversational Systems Workshop.",
923
- "links": null
924
- },
925
- "BIBREF8": {
926
- "ref_id": "b8",
927
- "title": "The 1994 BBN/BYBLOS Speech Recognition System",
928
- "authors": [
929
- {
930
- "first": "L",
931
- "middle": [],
932
- "last": "Nguyen",
933
- "suffix": ""
934
- },
935
- {
936
- "first": "T",
937
- "middle": [],
938
- "last": "Anastasakos",
939
- "suffix": ""
940
- },
941
- {
942
- "first": "F",
943
- "middle": [],
944
- "last": "Kubala",
945
- "suffix": ""
946
- },
947
- {
948
- "first": "C",
949
- "middle": [],
950
- "last": "Lapre",
951
- "suffix": ""
952
- },
953
- {
954
- "first": "J",
955
- "middle": [],
956
- "last": "Makhoul",
957
- "suffix": ""
958
- },
959
- {
960
- "first": "R",
961
- "middle": [],
962
- "last": "Schwartz",
963
- "suffix": ""
964
- },
965
- {
966
- "first": "N",
967
- "middle": [],
968
- "last": "Yuan",
969
- "suffix": ""
970
- },
971
- {
972
- "first": "G",
973
- "middle": [],
974
- "last": "Zavaliagkos",
975
- "suffix": ""
976
- },
977
- {
978
- "first": "Y",
979
- "middle": [],
980
- "last": "Zhao",
981
- "suffix": ""
982
- }
983
- ],
984
- "year": 1995,
985
- "venue": "Proc of ARPA Spoken Language Systems Technology Workshop",
986
- "volume": "",
987
- "issue": "",
988
- "pages": "77--81",
989
- "other_ids": {},
990
- "num": null,
991
- "urls": [],
992
- "raw_text": "Nguyen L., Anastasakos T., Kubala F., LaPre C., Makhoul J., Schwartz R., Yuan N., Zavaliagkos G., and Zhao Y. (1995) The 1994 BBN/BYBLOS Speech Recognition System, In Proc of ARPA Spoken Language Systems Technology Workshop, Austin, Texas, pp. 77-81.",
993
- "links": null
994
- },
995
- "BIBREF9": {
996
- "ref_id": "b9",
997
- "title": "The Initial Implementation of the BBN ATIS4 Dialog System",
998
- "authors": [
999
- {
1000
- "first": "D",
1001
- "middle": [],
1002
- "last": "Stallard",
1003
- "suffix": ""
1004
- }
1005
- ],
1006
- "year": 1995,
1007
- "venue": "Proc of ARPA Spoken Language Systems Technology Workshop",
1008
- "volume": "",
1009
- "issue": "",
1010
- "pages": "208--211",
1011
- "other_ids": {},
1012
- "num": null,
1013
- "urls": [],
1014
- "raw_text": "Stallard D. (1995) The Initial Implementation of the BBN ATIS4 Dialog System, In Proc of ARPA Spoken Language Systems Technology Workshop, Austin, Texas, pp. 208-211.",
1015
- "links": null
1016
- },
1017
- "BIBREF10": {
1018
- "ref_id": "b10",
1019
- "title": "i996) A Fully Statistical Approach to Natural Language Interfaces",
1020
- "authors": [
1021
- {
1022
- "first": "S",
1023
- "middle": [],
1024
- "last": "Miller",
1025
- "suffix": ""
1026
- },
1027
- {
1028
- "first": "D",
1029
- "middle": [],
1030
- "last": "Stallard",
1031
- "suffix": ""
1032
- }
1033
- ],
1034
- "year": null,
1035
- "venue": "Proc of the 34 th Annual Meeting of the Association for Computational Linguistics",
1036
- "volume": "",
1037
- "issue": "",
1038
- "pages": "",
1039
- "other_ids": {},
1040
- "num": null,
1041
- "urls": [],
1042
- "raw_text": "Miller S. and Stallard D. (i996) A Fully Statistical Approach to Natural Language Interfaces, In Proc of the 34 th Annual Meeting of the Association for Computational Linguistics, Santa Cruz, California.",
1043
- "links": null
1044
- }
1045
- },
1046
- "ref_entries": {
1047
- "FIGREF0": {
1048
- "num": null,
1049
- "type_str": "figure",
1050
- "text": "Welcome to Talk'n'Travel. How can I help you? U: I need a flight from Boston to Denver tomorrow. S: On Thursday, November 18, flying from Boston to Denver. What time do you want to leave? U: I want to leave before 10 AM. S: Leaving before 10 AM. Do you have an airline preference? U: Continental. S: Continental. I'm sorry, there are no flights on Continental leaving before 10 AM. I can get you a flight on Continental leaving at 10:10 AM instead. Would that be OK? U: Yes. S: OK, I've got you on Continental flight 429, departing Boston at 10:10 AM, arriving at Denver at 3:44 PM. Do you want to book a return flight from Denver to Boston? U: Yes, on Monday. S: Alright, we'll add a flight on Monday, November 22, from Boston to Denver to your itinerary. I have a Continental flight at 7 AM. Is that OK? U. Anything later? S: Leaving after 7 AM. There are no flights on Continental after 7 AM. I can get you a flight after 7 AM on Frontier, Northwest, and United instead. Would that be OK? U: How about United. S: United. I have a United flight at 8:01 AM. Is that flight OK? U: Yes. S: OK, I've got you on United flight 1762, departing Denver at 8:01 AM, arriving at Boston at 1:42 PM.",
1051
- "uris": null
1052
- }
1053
- }
1054
- }
1055
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1011.json DELETED
@@ -1,739 +0,0 @@
1
- {
2
- "paper_id": "A00-1011",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:39.382616Z"
6
- },
7
- "title": "REES: A Large-Scale Relation and Event Extraction System",
8
- "authors": [
9
- {
10
- "first": "Chinatsu",
11
- "middle": [],
12
- "last": "Aone",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "[email protected]"
16
- },
17
- {
18
- "first": "Mila",
19
- "middle": [],
20
- "last": "Ramos-Santacruz",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- },
25
- {
26
- "first": "Artifact-Name&aliases",
27
- "middle": [],
28
- "last": "Artifact-Type",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- },
33
- {
34
- "first": "Artifact-Subtype",
35
- "middle": [],
36
- "last": "Artifact-Descriptor",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": ""
40
- },
41
- {
42
- "first": "",
43
- "middle": [],
44
- "last": "Person-Affiliation",
45
- "suffix": "",
46
- "affiliation": {},
47
- "email": ""
48
- }
49
- ],
50
- "year": "",
51
- "venue": null,
52
- "identifiers": {},
53
- "abstract": "This paper reports on a large-scale, end-toend relation and event extraction system. At present, the system extracts a total of 100 types of relations and events, which represents a much wider coverage than is typical of extraction systems. The system consists of three specialized pattem-based tagging modules, a high-precision coreference resolution module, and a configurable template generation module. We report quantitative evaluation results, analyze the results in detail, and discuss future directions.",
54
- "pdf_parse": {
55
- "paper_id": "A00-1011",
56
- "_pdf_hash": "",
57
- "abstract": [
58
- {
59
- "text": "This paper reports on a large-scale, end-toend relation and event extraction system. At present, the system extracts a total of 100 types of relations and events, which represents a much wider coverage than is typical of extraction systems. The system consists of three specialized pattem-based tagging modules, a high-precision coreference resolution module, and a configurable template generation module. We report quantitative evaluation results, analyze the results in detail, and discuss future directions.",
60
- "cite_spans": [],
61
- "ref_spans": [],
62
- "eq_spans": [],
63
- "section": "Abstract",
64
- "sec_num": null
65
- }
66
- ],
67
- "body_text": [
68
- {
69
- "text": "One major goal of information extraction (IE) technology is to help users quickly identify a variety of relations and events and their key players in a large volume of documents. In contrast with this goal, state-of-the-art information extraction systems, as shown in the various Message Understanding Conferences (MUCs), extract a small number of relations and events. For instance, the most recent MUC, MUC-7, called for the extraction of 3 relations (person-employer, maker-product, and organization-location) and 1 event (spacecraft launches). Our goal is to develop an IE system which scales up to extract as many types of relations and events as possible with a minimum amount of porting effort combined with high accuracy. Currently, REES handles 100 types of relations and events, and it does so in a modular, configurable, and scalable manner.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Introduction",
74
- "sec_num": null
75
- },
76
- {
77
- "text": "Below, Section 1 presents the ontologies of relations and events that we have developed.",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": null
83
- },
84
- {
85
- "text": "Section 2 describes REES' system architecture. Section 3 evaluates the system's performance, and offers a qualitative analysis of system errors. Section 4 discusses future directions.",
86
- "cite_spans": [],
87
- "ref_spans": [],
88
- "eq_spans": [],
89
- "section": "Introduction",
90
- "sec_num": null
91
- },
92
- {
93
- "text": "As the first step in building a large-scale relation and event extraction system, we developed ontologies of the relations and events to be extracted. These ontologies represent a wide variety of domains: political, financial, business, military, and life-related events and relations. \"Relations\" covers what in MUC-7 are called Template Elements (TEs) and Template Relations (TRs). There are 39 types of relations. While MUC TE's only dealt with singular entities, REES extracts both singular and plural entities (e.g., \"five executives\"). The TR relations are shown in italic in the table below. Table 1 : Relation Ontology \"Events\" are extracted along with their event participants, e.g., \"who did what to whom when and where?\" For example, for a BUYING event, REES extracts the buyer, the artifact, the seller, and the time and location of the BUYING event.",
94
- "cite_spans": [],
95
- "ref_spans": [
96
- {
97
- "start": 599,
98
- "end": 606,
99
- "text": "Table 1",
100
- "ref_id": null
101
- }
102
- ],
103
- "eq_spans": [],
104
- "section": "Relation and Event Ontologies",
105
- "sec_num": "1"
106
- },
107
- {
108
- "text": "REES currently covers 61 types of events, as shown below. \"May 17, 1987\" PLACE:",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "Person-OtherRelative Person-BirthPlace Person-BirthDate",
113
- "sec_num": null
114
- },
115
- {
116
- "text": "[TE for \"the gulf'] COMMENT: \"attacked\" REES consists of three main components: a tagging component (cf. Section 2.1), a co-reference resolution module (cf. Section 2.2), and a template generation module (cf. Section 2.3). Figure 3 also illustrates that the user may run REES from a Graphical User Interface (GUI) called TemplateTool (cf. Section 2.4).",
117
- "cite_spans": [],
118
- "ref_spans": [
119
- {
120
- "start": 223,
121
- "end": 231,
122
- "text": "Figure 3",
123
- "ref_id": null
124
- }
125
- ],
126
- "eq_spans": [],
127
- "section": "Events",
128
- "sec_num": null
129
- },
130
- {
131
- "text": "The tagging component consists of three modules as shown in Figure 3 : NameTagger, NPTagger and EventTagger. Each module relies on the same pattern-based extraction engine, but uses different sets of patterns. The NameTagger recognizes names of people, organizations, places, and artifacts (currently only vehicles).",
132
- "cite_spans": [],
133
- "ref_spans": [
134
- {
135
- "start": 60,
136
- "end": 68,
137
- "text": "Figure 3",
138
- "ref_id": null
139
- }
140
- ],
141
- "eq_spans": [],
142
- "section": "Tagging Modules",
143
- "sec_num": "2.1"
144
- },
145
- {
146
- "text": "remplateroot //v -':.v\" . .......",
147
- "cite_spans": [],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "Tagging Modules",
151
- "sec_num": "2.1"
152
- },
153
- {
154
- "text": "The NPTagger then takes the XML-tagged output of the NameTagger through two phases. First, it recognizes non-recursive Base Noun Phrase (BNP) (our specifications for BNP resemble those in Ramshaw and Marcus 1995) . Second, it recognizes complex NPs for only the four main semantic types of NPs, i.e., Person, Organization, Location, and Artifact (vehicle, drug and weapon). It makes postmodifier attachment decisions only for those NPs that are crucial to the extraction at hand. During this second phase, relations which can be recognized locally (e.g., Age, Affiliation, Maker) are also recognized and stored using the XML attributes for the NPs. For instance, the XML tag for \"President of XYZ Corp.\"",
155
- "cite_spans": [
156
- {
157
- "start": 188,
158
- "end": 212,
159
- "text": "Ramshaw and Marcus 1995)",
160
- "ref_id": "BIBREF2"
161
- }
162
- ],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "Figure 3: The REES System Architecture",
166
- "sec_num": null
167
- },
168
- {
169
- "text": "below holds an AFFILIATION attribute with the ID for \"XYZ Corp.\"",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "Figure 3: The REES System Architecture",
174
- "sec_num": null
175
- },
176
- {
177
- "text": "<PNP ID=\"03\" AFFILIATION=\"O4\">President of <ENTITY ID=\"04\">XYZ Corp.</ENTITY>",
178
- "cite_spans": [],
179
- "ref_spans": [],
180
- "eq_spans": [],
181
- "section": "Figure 3: The REES System Architecture",
182
- "sec_num": null
183
- },
184
- {
185
- "text": "Building upon the XML output of the NPTagger, the EventTagger recognizes events applying its lexicon-driven, syntactically-based generic patterns. These patterns tag events in the presence of at least one of the arguments specified in the lexical entry for a predicate. Subsequent pattems try to find additional arguments as well as place and time adjunct information for the tagged event. As an example of the EventTagger's generic patterns, consider the simplified pattern below. This pattem matches on an event-denoting verb that requires a direct object of type weapon (e.g., \"fire a gun\")",
186
- "cite_spans": [],
187
- "ref_spans": [],
188
- "eq_spans": [],
189
- "section": "</PNP>",
190
- "sec_num": null
191
- },
192
- {
193
- "text": "(& {AND $VP {ARG2_SYN=DO} {ARG2_SEM=WEAPON } } {AND $ARTIFACT {SUBTYPE=WEAPON} })1",
194
- "cite_spans": [],
195
- "ref_spans": [],
196
- "eq_spans": [],
197
- "section": "</PNP>",
198
- "sec_num": null
199
- },
200
- {
201
- "text": "The important aspect of REES is its declarative, lexicon-driven approach. This approach requires a lexicon entry for each event-denoting word, which is generally a I &=concatenation, AND=Boolean operator, $VP and SARTIFACT are macro references for complex phrases.",
202
- "cite_spans": [],
203
- "ref_spans": [],
204
- "eq_spans": [],
205
- "section": "</PNP>",
206
- "sec_num": null
207
- },
208
- {
209
- "text": "71:1 verb. The lexicon entry specifies the syntactic and semantic restrictions on the verb's arguments. For instance, the following lexicon entry is for the verb \"attack.\" It indicates that the verb \"attack\" belongs to the CONFLICT ontology and to the ATTACK_TARGET type. The first argument for the verb \"attack\" is semantically an organization, location, person, or artifact (ARGI_SEM), and syntactically a subject (ARGI_SYN). The second argument is semantically an organization, location, person or artifact, and syntactically a direct object. The third argument is semantically a weapon and syntactically a prepositional phrase introduced by the preposition \"with\". This generic, lexicon-driven event extraction approach makes REES easily portable because new types of events can be extracted by just adding new verb entries to the lexicon. No new patterns are required. Moreover, this approach allows for easy customization capability: a person with no knowledge of the pattern language would be able to configure the system to extract new events.",
210
- "cite_spans": [],
211
- "ref_spans": [],
212
- "eq_spans": [],
213
- "section": "</PNP>",
214
- "sec_num": null
215
- },
216
- {
217
- "text": "While the tagging component is similar to other pattern-based IE systems (e.g., Appelt et al. 1995; Aone et al. 1998, Yangarber and Grishman 1998) , our EventTagger is more portable through a lexicon-driven approach.",
218
- "cite_spans": [
219
- {
220
- "start": 80,
221
- "end": 99,
222
- "text": "Appelt et al. 1995;",
223
- "ref_id": "BIBREF1"
224
- },
225
- {
226
- "start": 100,
227
- "end": 131,
228
- "text": "Aone et al. 1998, Yangarber and",
229
- "ref_id": null
230
- },
231
- {
232
- "start": 132,
233
- "end": 146,
234
- "text": "Grishman 1998)",
235
- "ref_id": "BIBREF3"
236
- }
237
- ],
238
- "ref_spans": [],
239
- "eq_spans": [],
240
- "section": "</PNP>",
241
- "sec_num": null
242
- },
243
- {
244
- "text": "After the tagging phase, REES sends the XML output through a rule-based co-reference resolution module that resolves:",
245
- "cite_spans": [],
246
- "ref_spans": [],
247
- "eq_spans": [],
248
- "section": "Co-reference Resolution",
249
- "sec_num": "2.2"
250
- },
251
- {
252
- "text": "\u2022 definite noun phrases of Organization, Person, and Location types, and",
253
- "cite_spans": [],
254
- "ref_spans": [],
255
- "eq_spans": [],
256
- "section": "Co-reference Resolution",
257
- "sec_num": "2.2"
258
- },
259
- {
260
- "text": "\u2022 singular person pronouns: he and she.",
261
- "cite_spans": [],
262
- "ref_spans": [],
263
- "eq_spans": [],
264
- "section": "Co-reference Resolution",
265
- "sec_num": "2.2"
266
- },
267
- {
268
- "text": "Only \"high-precision\" rules are currently applied to selected types of anaphora. That is, we resolve only those cases of anaphora whose antecedents the module can identify with high confidence. For example, the pronoun rules look for the antecedents only within 3 sentences, and the definite NP rules rely heavily on the head noun matches. Our highprecision approach results from our observation that unless the module is very accurate (above 80% precision), the coreference module can hurt the overall extraction results by over-merging templates.",
269
- "cite_spans": [],
270
- "ref_spans": [],
271
- "eq_spans": [],
272
- "section": "Co-reference Resolution",
273
- "sec_num": "2.2"
274
- },
275
- {
276
- "text": "A typical template generation module is a hard-coded post-processing module which has to be written for each type of template. By contrast, our Template Generation module is unique as it uses declarative rules to generate and merge templates automatically so as to achieve portability.",
277
- "cite_spans": [],
278
- "ref_spans": [],
279
- "eq_spans": [],
280
- "section": "Template Generation Module",
281
- "sec_num": "2.3"
282
- },
283
- {
284
- "text": "REES outputs the extracted information in the form of either MUC-style templates, as illustrated in Figure 1 and 2, or XML. A crucial part of a portable, scalable system is to be able to output different types of relations and events without changing the template generation code. REES maps XML-tagged output of the co-reference module to templates using declarative template definitions, which specifies the template label (e.g., ATTACK_TARGET), XML attribute names (e.g., ARGUMENT l), corresponding template slot names (e.g., ATTACKER), and the type restrictions on slot values (e.g., string).",
285
- "cite_spans": [],
286
- "ref_spans": [
287
- {
288
- "start": 100,
289
- "end": 108,
290
- "text": "Figure 1",
291
- "ref_id": null
292
- }
293
- ],
294
- "eq_spans": [],
295
- "section": "Declarative Template Generation",
296
- "sec_num": "2.3.1"
297
- },
298
- {
299
- "text": "One of the challenges of event extraction is to be able to recognize and merge those event descriptions which refer to the same event. The Template Generation module uses a set of declarative, customizable rules to merge coreferring events into a single event. Often, the rules reflect pragmatic knowledge of the world. For example, consider the rule below for the DYING event type. This rule establishes that if two die events have the same subject, then they refer to the same event (i.e., a person cannot die more than once ",
300
- "cite_spans": [],
301
- "ref_spans": [],
302
- "eq_spans": [],
303
- "section": "Event Merging",
304
- "sec_num": "2.3.2"
305
- },
306
- {
307
- "text": "For some applications such as database population, the user may want to validate the system output. REES is provided with a Javabased Graphical User Interface that allows the user to run REES and display, delete, or modify the system output. As illustrated in Figure 4 , the tool displays the templates on the bottom half of the screen, and the user can choose which template to display. The top half of the screen displays the input document with extracted phrases in different colors. The user can select any slot value, and the tool will highlight the portion of the input text responsible for the slot value. This feature is very useful in efficiently verifying system output. Once the system's output has been verified, the resulting templates can be saved and used to populate a database.",
308
- "cite_spans": [],
309
- "ref_spans": [
310
- {
311
- "start": 260,
312
- "end": 268,
313
- "text": "Figure 4",
314
- "ref_id": null
315
- }
316
- ],
317
- "eq_spans": [],
318
- "section": "Graphical User Interface (GUI)",
319
- "sec_num": "2.4"
320
- },
321
- {
322
- "text": "The The blind set F-Measure for 31 types of relations (73.95%) exceeded our initial goal of 70%. While the blind set F-Measure for 61 types of events was 53.75%, it is significant to note that 26 types of events achieved an F-Measure over 70%, and 37 types over 60% (cf . Table 4 ). For reference, though not exactly comparable, the best-performing MUC-7 system achieved 87% in TE, 76% in TR, and 51% in event extraction. Regarding relation extraction, the difference in the score between the training and blind sets was very small. In fact, the total F-Measure on the blind set is less than 2 points lower than that of the training set. It is also interesting to note that for 8 of the 12 relation types where the F-Measure dropped more than 10 points, the training set includes less than 20 instances. In other words, there seems to be a natural correlation between low number of instances in the training set and low performance in the blind set.",
323
- "cite_spans": [],
324
- "ref_spans": [
325
- {
326
- "start": 270,
327
- "end": 280,
328
- "text": ". Table 4",
329
- "ref_id": "TABREF7"
330
- }
331
- ],
332
- "eq_spans": [],
333
- "section": "Graphical User Interface (GUI)",
334
- "sec_num": "2.4"
335
- },
336
- {
337
- "text": "There was a significant drop between the training and blind sets in event extraction: 11 points. We believe that the main reason is that the total number of events in the training set is fairly low: 801 instances of 61 types of events (an average of 13/event), where 35 of the event types had fewer than 10 instances. In fact, 9 out of the 14 event types which scored lower than 40% F-Measure had fewer than I0 examples. In comparison, there were 34,000 instances of 39 types of relations in the training set.",
338
- "cite_spans": [],
339
- "ref_spans": [],
340
- "eq_spans": [],
341
- "section": "Graphical User Interface (GUI)",
342
- "sec_num": "2.4"
343
- },
344
- {
345
- "text": "The contribution of the co-reference module is illustrated in the table below. Co-reference resolution consistently improves F-Measures both in training and blind sets. Its impact is larger in relation than event extraction. In the next two sections, we analyze both false positives and false negatives.",
346
- "cite_spans": [],
347
- "ref_spans": [],
348
- "eq_spans": [],
349
- "section": "Graphical User Interface (GUI)",
350
- "sec_num": "2.4"
351
- },
352
- {
353
- "text": "REES produced precision errors following cases:",
354
- "cite_spans": [],
355
- "ref_spans": [],
356
- "eq_spans": [],
357
- "section": "False Positives (or Precision Errors)",
358
- "sec_num": "3.1"
359
- },
360
- {
361
- "text": "\u2022 Most of the errors were due in the to overgeneration of templates. These are mostly cases of co-referring noun phrases that the system failed to resolve. For example: \"Panama ... the nation ... this country.., his",
362
- "cite_spans": [],
363
- "ref_spans": [],
364
- "eq_spans": [],
365
- "section": "False Positives (or Precision Errors)",
366
- "sec_num": "3.1"
367
- },
368
- {
369
- "text": "Rules for the co-reference module are still under development, and at present REES handles only limited types of plural noun phrase anaphora. Spurious events resulted from verbs in conditional constructions (e.g., \"if ... then...\") or from ambiguous predicates. For instance, \"appoint\" as a POLITICAL event vs. a PERSONNEL CHANGE event. The subject of a verb was misidentified. This is particularly frequent in reduced relative clauses. Kabul radio said the latest deaths brought to 38 the number of people killed in the three car bomb explosions, (Wrong subject: \"the number of people\" as the KILLER instead of the victim)",
370
- "cite_spans": [],
371
- "ref_spans": [],
372
- "eq_spans": [],
373
- "section": "country\"",
374
- "sec_num": null
375
- },
376
- {
377
- "text": "Below, we list the most frequent recall errors in the training set.",
378
- "cite_spans": [],
379
- "ref_spans": [],
380
- "eq_spans": [],
381
- "section": "False Negatives (or Recall Errors)",
382
- "sec_num": "3.2"
383
- },
384
- {
385
- "text": "\u2022 Some event arguments are mentioned with event nouns instead of event verbs. The current system does not handle noun-based event extraction. India's acquisition last month of the nuclear submarine from the Soviet Union... (SELLER=\"Soviet Union\" and TIME=\"last month'\" come with the nounbased event \"acquisition.\") \u2022 Pronouns \"it\" and \"they,\" which carry little semantic information, are currently not resolved by the co-reference module. We asked a person who is not involved in the development of REES to review the event extraction output for the blind set. This person reported that:",
386
- "cite_spans": [],
387
- "ref_spans": [],
388
- "eq_spans": [],
389
- "section": "False Negatives (or Recall Errors)",
390
- "sec_num": "3.2"
391
- },
392
- {
393
- "text": "\u2022 In 35% of the cases where the REES system completely missed an event, it was because the lexicon was missing the predicate. REES's event predicate lexicon is rather small at present (a total of 140 verbs for 61 event types) and is mostly based on the examples found in the training set, \u2022 In 30% of the cases, the subject or object was elliptical. The system does not currently handle ellipsis.",
394
- "cite_spans": [],
395
- "ref_spans": [],
396
- "eq_spans": [],
397
- "section": "False Negatives (or Recall Errors)",
398
- "sec_num": "3.2"
399
- },
400
- {
401
- "text": "\u2022 In 25% of the cases, syntactic/semantic argument structures were missing from existing lexical entries. It is quite encouraging that simply adding additional predicates and predicate argument structures to the lexicon could significantly increase the blind set performance.",
402
- "cite_spans": [],
403
- "ref_spans": [],
404
- "eq_spans": [],
405
- "section": "False Negatives (or Recall Errors)",
406
- "sec_num": "3.2"
407
- },
408
- {
409
- "text": "We believe that improving co-reference resolution and adding noun-based event extraction capability are critical to achieving our ultimate goal of at least 80% F-Measure for relations and 70% for events.",
410
- "cite_spans": [],
411
- "ref_spans": [],
412
- "eq_spans": [],
413
- "section": "Future Directions",
414
- "sec_num": "4"
415
- },
416
- {
417
- "text": "As discussed in Section 3.1 and 3.2, accurate co-reference resolution is crucial to improving the accuracy of extraction, both in terms of recall and precision. In particular, we identified two types of high-payoff coreference resolution:",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "Co-reference Resolution",
422
- "sec_num": "4.1"
423
- },
424
- {
425
- "text": "\u2022 definite noun phrase resolution, especially plural noun phrases",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "Co-reference Resolution",
430
- "sec_num": "4.1"
431
- },
432
- {
433
- "text": "\u2022 3 rd person neutral pronouns \"it\" and \"they.\"",
434
- "cite_spans": [],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "Co-reference Resolution",
438
- "sec_num": "4.1"
439
- },
440
- {
441
- "text": "REES currently handles only verb-based events. Noun-based event extraction adds more complexity because:",
442
- "cite_spans": [],
443
- "ref_spans": [],
444
- "eq_spans": [],
445
- "section": "Noun-based Event Extraction",
446
- "sec_num": "4.2"
447
- },
448
- {
449
- "text": "Nouns are often used in a generic, nonreferential manner (e.g., \"We see a merger as being in the consumer's interest\"), and",
450
- "cite_spans": [],
451
- "ref_spans": [],
452
- "eq_spans": [],
453
- "section": "Noun-based Event Extraction",
454
- "sec_num": "4.2"
455
- },
456
- {
457
- "text": "When referential, nouns often refer to verb-based events, thus requiring nounverb co-reference resolution (\"An F-14 crashed shortly after takeoff... The crash\").",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "Noun-based Event Extraction",
462
- "sec_num": "4.2"
463
- },
464
- {
465
- "text": "However, noun-based events are crucial because they often introduce additional key information, as the underlined phrases below indicate: While Bush's meetings with prominent antiapartheid leaders such as Archbishop Desmond Tutu and Albertina Sisulu are important...",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Noun-based Event Extraction",
470
- "sec_num": "4.2"
471
- },
472
- {
473
- "text": "We plan to develop a generic set of patterns for noun-based event extraction to complement the set of generic verb-based extraction patterns.",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "Noun-based Event Extraction",
478
- "sec_num": "4.2"
479
- },
480
- {
481
- "text": "In this paper, we reported on a fast, portable, large-scale event and relation extraction system REES. To the best of our knowledge, this is the first attempt to develop an IE system which can extract such a wide range of relations and events with high accuracy.",
482
- "cite_spans": [],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "Conclusions",
486
- "sec_num": "5"
487
- },
488
- {
489
- "text": "It performs particularly well on relation extraction, and it achieves 70% or higher F-Measure for 26 types of events already. In addition, the design of REES is highly portable for future addition of new relations and events.",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "Conclusions",
494
- "sec_num": "5"
495
- },
496
- {
497
- "text": "System Evaluation",
498
- "cite_spans": [],
499
- "ref_spans": [],
500
- "eq_spans": [],
501
- "section": "",
502
- "sec_num": null
503
- }
504
- ],
505
- "back_matter": [
506
- {
507
- "text": "This project would have not been possible without the contributions of Arcel Castillo, Lauren Halverson, and Sandy Shinn. Our thanks also to Brandon Kennedy, who prepared the hand-tagged data.",
508
- "cite_spans": [],
509
- "ref_spans": [],
510
- "eq_spans": [],
511
- "section": "Acknowledgements",
512
- "sec_num": null
513
- }
514
- ],
515
- "bib_entries": {
516
- "BIBREF0": {
517
- "ref_id": "b0",
518
- "title": "SRA: Description of the IE 2 System Used for MUC-7",
519
- "authors": [
520
- {
521
- "first": "Chinatsu",
522
- "middle": [],
523
- "last": "Aone",
524
- "suffix": ""
525
- },
526
- {
527
- "first": "Lauren",
528
- "middle": [],
529
- "last": "Halverson",
530
- "suffix": ""
531
- },
532
- {
533
- "first": "Tom",
534
- "middle": [],
535
- "last": "Hampton",
536
- "suffix": ""
537
- },
538
- {
539
- "first": "Mila",
540
- "middle": [],
541
- "last": "Ramos-Santacruz",
542
- "suffix": ""
543
- }
544
- ],
545
- "year": 1998,
546
- "venue": "Proceedings of the 7thMessage Understanding Conference",
547
- "volume": "",
548
- "issue": "",
549
- "pages": "",
550
- "other_ids": {},
551
- "num": null,
552
- "urls": [],
553
- "raw_text": "Aone, Chinatsu, Lauren Halverson, Tom Hampton, and Mila Ramos-Santacruz. 1998. \"SRA: Description of the IE 2 System Used for MUC-7.\" In Proceedings of the 7thMessage Understanding Conference (MUC-7).",
554
- "links": null
555
- },
556
- "BIBREF1": {
557
- "ref_id": "b1",
558
- "title": "SRI International FASTUS System: MUC-6 Test Results and Analysis",
559
- "authors": [
560
- {
561
- "first": "Douglas",
562
- "middle": [
563
- "E"
564
- ],
565
- "last": "Appelt",
566
- "suffix": ""
567
- },
568
- {
569
- "first": "R",
570
- "middle": [],
571
- "last": "Jerry",
572
- "suffix": ""
573
- },
574
- {
575
- "first": "John",
576
- "middle": [],
577
- "last": "Hobbs",
578
- "suffix": ""
579
- },
580
- {
581
- "first": "David",
582
- "middle": [],
583
- "last": "Bear",
584
- "suffix": ""
585
- },
586
- {
587
- "first": "Megumi",
588
- "middle": [],
589
- "last": "Israel",
590
- "suffix": ""
591
- },
592
- {
593
- "first": "Andy",
594
- "middle": [],
595
- "last": "Kameyama",
596
- "suffix": ""
597
- },
598
- {
599
- "first": "David",
600
- "middle": [],
601
- "last": "Kehler",
602
- "suffix": ""
603
- },
604
- {
605
- "first": "Karen",
606
- "middle": [],
607
- "last": "Martin",
608
- "suffix": ""
609
- },
610
- {
611
- "first": "Mabry",
612
- "middle": [],
613
- "last": "Myers",
614
- "suffix": ""
615
- },
616
- {
617
- "first": "",
618
- "middle": [],
619
- "last": "Tyson",
620
- "suffix": ""
621
- }
622
- ],
623
- "year": 1995,
624
- "venue": "Proceedings of the 6 th Message Understanding Conference",
625
- "volume": "",
626
- "issue": "",
627
- "pages": "",
628
- "other_ids": {},
629
- "num": null,
630
- "urls": [],
631
- "raw_text": "Appelt, Douglas E., Jerry R Hobbs, John Bear, David Israel, Megumi Kameyama, Andy Kehler, David Martin, Karen Myers, and Mabry Tyson. 1995. \"SRI International FASTUS System: MUC- 6 Test Results and Analysis.\" In Proceedings of the 6 th Message Understanding Conference (MUC-6).",
632
- "links": null
633
- },
634
- "BIBREF2": {
635
- "ref_id": "b2",
636
- "title": "Text Chunking Using Transformation-Based Learning",
637
- "authors": [
638
- {
639
- "first": "Lance",
640
- "middle": [
641
- "A"
642
- ],
643
- "last": "Ramshaw",
644
- "suffix": ""
645
- },
646
- {
647
- "first": "Mitchell",
648
- "middle": [
649
- "P"
650
- ],
651
- "last": "Marcus",
652
- "suffix": ""
653
- }
654
- ],
655
- "year": 1995,
656
- "venue": "Proceedings of the 3 rd ACL Workshop on Very Large Corpora (WVLC95)",
657
- "volume": "",
658
- "issue": "",
659
- "pages": "",
660
- "other_ids": {},
661
- "num": null,
662
- "urls": [],
663
- "raw_text": "Ramshaw, Lance A., and Mitchell P. Marcus. 1995. \"Text Chunking Using Transformation-Based Learning\". In Proceedings of the 3 rd ACL Workshop on Very Large Corpora (WVLC95).",
664
- "links": null
665
- },
666
- "BIBREF3": {
667
- "ref_id": "b3",
668
- "title": "NYU: Description of the Proteus~PET System as Used for MUC-7 ST",
669
- "authors": [
670
- {
671
- "first": "Roman",
672
- "middle": [],
673
- "last": "Yangarber",
674
- "suffix": ""
675
- },
676
- {
677
- "first": "Ralph",
678
- "middle": [],
679
- "last": "Grishman",
680
- "suffix": ""
681
- }
682
- ],
683
- "year": 1998,
684
- "venue": "Proceedings of the 6 th Message Understanding Conference",
685
- "volume": "",
686
- "issue": "",
687
- "pages": "",
688
- "other_ids": {},
689
- "num": null,
690
- "urls": [],
691
- "raw_text": "Yangarber, Roman and Ralph Grishman. 1998. \"NYU: Description of the Proteus~PET System as Used for MUC-7 ST.\" In Proceedings of the 6 th Message Understanding Conference (MUC-7).",
692
- "links": null
693
- }
694
- },
695
- "ref_entries": {
696
- "FIGREF0": {
697
- "type_str": "figure",
698
- "text": "Figure 2: Example of Event Template",
699
- "uris": null,
700
- "num": null
701
- },
702
- "TABREF1": {
703
- "text": "",
704
- "html": null,
705
- "type_str": "table",
706
- "content": "<table><tr><td colspan=\"3\">Figures 1 and 2 show sample relation and event</td></tr><tr><td colspan=\"3\">templates. Figure 1 shows a Person-Affiliation</td></tr><tr><td colspan=\"3\">relation template for \"Frank Ashley, a</td></tr><tr><td colspan=\"3\">spokesman for Occidental Petroleum Corp.'\"</td></tr><tr><td colspan=\"3\">&lt;PERSON AFFILIATION-AP8802230207-54&gt; :=</td></tr><tr><td>TYPE:</td><td colspan=\"2\">PERSON AFFILIATION</td></tr><tr><td colspan=\"3\">PERSON: [TE for\"Frank Ashley\"]</td></tr><tr><td>ORG:</td><td colspan=\"2\">[TE for \"Occidental Petroleum\"]</td></tr><tr><td colspan=\"3\">Figure 1: Example of Relation Template</td></tr><tr><td colspan=\"3\">Figure 2 shows an Attack Target event template</td></tr><tr><td colspan=\"3\">for the sentence \"an Iraqi warplane attacked the</td></tr><tr><td colspan=\"3\">frigate Stark with missiles May 17, 1987. \"</td></tr><tr><td colspan=\"3\">&lt;ATTACK TARGET-AP8804160078-12&gt;: = i</td></tr><tr><td>TYPE:</td><td/><td>CONFLICT</td></tr><tr><td colspan=\"2\">SUBTYPE:</td><td>ATTACK TARGET</td></tr><tr><td colspan=\"3\">ATTACKER: [TE for \"an Iraqi warplane\"]</td></tr><tr><td>TARGET:</td><td/><td>[TE for \"the frigate Stark\"]</td></tr><tr><td colspan=\"2\">WEAPON:</td><td>[TE for \"missiles\"]</td></tr><tr><td>TIME:</td><td/></tr></table>",
707
- "num": null
708
- },
709
- "TABREF4": {
710
- "text": "table below shows the system's recall, precision, and F-Measure scores for the training set (200 texts) and the blind set (208 texts) from about a dozen news sources. Each set contains at least 3 examples of each type of relations and events. As we mentioned earlier, \"relations\" includes MUC-style TEs and TRs.",
711
- "html": null,
712
- "type_str": "table",
713
- "content": "<table><tr><td>Text</td><td>Task</td><td>Templates</td><td>R</td><td>P</td><td>F-M</td></tr><tr><td>Set</td><td/><td>in keys</td><td/><td/><td/></tr><tr><td/><td>Rel.</td><td>9955</td><td colspan=\"3\">76 74 75.35</td></tr><tr><td colspan=\"2\">Train Events</td><td>2525</td><td colspan=\"3\">57 74 64.57</td></tr><tr><td/><td>Rel. &amp;</td><td>10707</td><td colspan=\"3\">74 74 73.95</td></tr><tr><td/><td>Events</td><td/><td/><td/><td/></tr><tr><td/><td>Rel.</td><td>8938</td><td colspan=\"3\">74 74 73.74</td></tr><tr><td colspan=\"2\">Blind Events</td><td>2020</td><td colspan=\"3\">42 75 53.75</td></tr><tr><td/><td>Rel. &amp;</td><td>9526</td><td colspan=\"3\">69 74 71.39</td></tr><tr><td/><td>Events</td><td/><td/><td/><td/></tr></table>",
714
- "num": null
715
- },
716
- "TABREF5": {
717
- "text": "",
718
- "html": null,
719
- "type_str": "table",
720
- "content": "<table/>",
721
- "num": null
722
- },
723
- "TABREF7": {
724
- "text": "",
725
- "html": null,
726
- "type_str": "table",
727
- "content": "<table/>",
728
- "num": null
729
- },
730
- "TABREF9": {
731
- "text": "",
732
- "html": null,
733
- "type_str": "table",
734
- "content": "<table/>",
735
- "num": null
736
- }
737
- }
738
- }
739
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1012.json DELETED
@@ -1,1015 +0,0 @@
1
- {
2
- "paper_id": "A00-1012",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:11.893957Z"
6
- },
7
- "title": "Experiments on Sentence Boundary Detection",
8
- "authors": [
9
- {
10
- "first": "Mark",
11
- "middle": [],
12
- "last": "Stevenson",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Sheffield Regent Court",
17
- "location": {
18
- "addrLine": "211 Portobello Street",
19
- "postCode": "S1 4DP",
20
- "settlement": "Sheffield",
21
- "country": "United Kingdom"
22
- }
23
- },
24
- "email": "[email protected]"
25
- },
26
- {
27
- "first": "Robert",
28
- "middle": [],
29
- "last": "Gaizauskas",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "",
33
- "institution": "University of Sheffield Regent Court",
34
- "location": {
35
- "addrLine": "211 Portobello Street",
36
- "postCode": "S1 4DP",
37
- "settlement": "Sheffield",
38
- "country": "United Kingdom"
39
- }
40
- },
41
- "email": "robertg@[email protected]"
42
- }
43
- ],
44
- "year": "",
45
- "venue": null,
46
- "identifiers": {},
47
- "abstract": "This paper explores the problem of identifying sentence boundaries in the transcriptions produced by automatic speech recognition systems. An experiment which determines the level of human performance for this task is described as well as a memorybased computational approach to the problem.",
48
- "pdf_parse": {
49
- "paper_id": "A00-1012",
50
- "_pdf_hash": "",
51
- "abstract": [
52
- {
53
- "text": "This paper explores the problem of identifying sentence boundaries in the transcriptions produced by automatic speech recognition systems. An experiment which determines the level of human performance for this task is described as well as a memorybased computational approach to the problem.",
54
- "cite_spans": [],
55
- "ref_spans": [],
56
- "eq_spans": [],
57
- "section": "Abstract",
58
- "sec_num": null
59
- }
60
- ],
61
- "body_text": [
62
- {
63
- "text": "This paper addresses the problem of identifying sentence boundaries in the transcriptions produced by automatic speech recognition (ASR) systems. This is unusual in the field of text processing which has generally dealt with well-punctuated text: some of the most commonly used texts in NLP are machine readable versions of highly edited documents such as newspaper articles or novels. However, there are many types of text which are not so-edited and the example which we concentrate on in this paper is the output from ASR systems. These differ from the sort of texts normally used in NLP in a number of ways; the text is generally in single case (usually upper) , unpunctuated and may contain transcription errors. 1 Figure 1 compares a short text in the format which would be produced by an ASR system with a fully punctuated version which includes case information. For the remainder of this paper errorfree texts such as newspaper articles or novels shall be referred to as \"standard text\" and the output from a speech recognition system as \"ASR text\".",
64
- "cite_spans": [
65
- {
66
- "start": 649,
67
- "end": 664,
68
- "text": "(usually upper)",
69
- "ref_id": null
70
- }
71
- ],
72
- "ref_spans": [
73
- {
74
- "start": 720,
75
- "end": 728,
76
- "text": "Figure 1",
77
- "ref_id": null
78
- }
79
- ],
80
- "eq_spans": [],
81
- "section": "The Problem",
82
- "sec_num": "1"
83
- },
84
- {
85
- "text": "There are many possible situations in which an NLP system may be required to process ASR text. The most obvious examples are NLP systems which take speech input (eg. Moore et al. (1997) ). Also, dictation software programs do not punctuate or capitalise their output but, if this information could be added to ASR text, the results would be far more usable. One of the most important pieces of inform-1 Speech recognition systems are often evaluated in terms of word error rate (WER), the percentage of tokens which are wrongly transcribed. For large vocabulary tasks and speakerindependent systems, WER varies between 7% and 50%, depending upon the quality of the recording being recognised. See, e.g., Cole (1996) .",
86
- "cite_spans": [
87
- {
88
- "start": 166,
89
- "end": 185,
90
- "text": "Moore et al. (1997)",
91
- "ref_id": "BIBREF11"
92
- },
93
- {
94
- "start": 704,
95
- "end": 715,
96
- "text": "Cole (1996)",
97
- "ref_id": null
98
- }
99
- ],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "The Problem",
103
- "sec_num": "1"
104
- },
105
- {
106
- "text": "Gi~nni Versace, one of the world's leading fashion designers, has been murdered in Miami.",
107
- "cite_spans": [],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "Good evening.",
111
- "sec_num": null
112
- },
113
- {
114
- "text": "Police say it was a planned killing carried out like an execution. Schools inspections are going to be tougher to force bad teachers out. And the four thousand couples who shared the Queen's golden day. Figure 1 : Example text shown in standard and ASR format ation which is not available in ASR output is sentence boundary information. However, knowledge of sentence boundaries is required by many NLP technologies. Part of speech taggers typically require input in the format of a single sentence per line (for example Brill's tagger (Brill, 1992) ) and parsers generally aim to produce a tree spanning each sentence.",
115
- "cite_spans": [
116
- {
117
- "start": 536,
118
- "end": 549,
119
- "text": "(Brill, 1992)",
120
- "ref_id": "BIBREF1"
121
- }
122
- ],
123
- "ref_spans": [
124
- {
125
- "start": 203,
126
- "end": 211,
127
- "text": "Figure 1",
128
- "ref_id": null
129
- }
130
- ],
131
- "eq_spans": [],
132
- "section": "Good evening.",
133
- "sec_num": null
134
- },
135
- {
136
- "text": "Only the most trivial linguistic analysis can be carried out on text which is not split into sentences. It is worth mentioning that not all transcribed speech can be sensibly divided into sentences. It has been argued by Gotoh and Renals (2000) that the main unit in spoken language is the phrase rather than the sentence. However, there are situations in which it is appropriate to consider spoken language to be made up from sentences. One example is broadcast news: radio and television news programs. The DARPA HUB4 broadcast news evaluation (Chinchor et al., 1998) focussed on information extraction from ASR text from news programs. Although news programs are scripted there are often deviations from the script and they cannot be relied upon as accurate transcriptions of the news program. The spoken portion of the British National Corpus (Burnard, 1995) contains 10 million words and was manually marked with sentence boundaries. A technology which identifies sentence boundaries could be used to speed up the process of creating any future corpus of this type.",
137
- "cite_spans": [
138
- {
139
- "start": 221,
140
- "end": 244,
141
- "text": "Gotoh and Renals (2000)",
142
- "ref_id": "BIBREF9"
143
- },
144
- {
145
- "start": 546,
146
- "end": 569,
147
- "text": "(Chinchor et al., 1998)",
148
- "ref_id": "BIBREF4"
149
- },
150
- {
151
- "start": 847,
152
- "end": 862,
153
- "text": "(Burnard, 1995)",
154
- "ref_id": "BIBREF2"
155
- }
156
- ],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "Good evening.",
160
- "sec_num": null
161
- },
162
- {
163
- "text": "It is important to distinguish the problem just mentioned and another problem sometimes called \"sentence splitting\". This problem aims to identify sentence boundaries in standard text but since this includes punctuation the problem is effectively reduced to deciding which of the symbols which potentially denote sentence boundaries (., !, ?) actually do. This problem is not trivial since these punctuation symbols do not always occur at the end of sentences. For example in the sentence \"Dr. Jones lectures at U.C.L.A.\" only the final full stop denotes the end of a sentence. For the sake of clarity we shall refer to the process of discovering sentence boundaries in standard punctuated text as \"punctuation disambiguation\" and that of finding them in unpunctuated ASR text as \"sentence boundary detection\".",
164
- "cite_spans": [],
165
- "ref_spans": [],
166
- "eq_spans": [],
167
- "section": "Good evening.",
168
- "sec_num": null
169
- },
170
- {
171
- "text": "Despite the potential application of technology which can carry out the sentence boundary detection task, there has been little research into the area. However, there has been work in the related field of punctuation disambiguation. Palmer and Hearst (1994) applied a neural network to the problem. They used the Brown Corpus for training and evaluation, noting that 90% of the full stops in this text indicate sentence boundaries. They used the part of speech information for the words surrounding a punctuation symbol as the input to a feed-forward neural network. But, as we mentioned, most part of speech taggers require sentence boundaries to be pre-determined and this potential circularity is avoided by using the prior probabilities for each token, determined from the Brown corpus markup. The network was trained on 573 potential sentence ending marks from the Wall Street Journal and tested on 27,294 items from the same corpus. 98.5% of punctuation marks were correctly disambiguated.",
172
- "cite_spans": [
173
- {
174
- "start": 233,
175
- "end": 257,
176
- "text": "Palmer and Hearst (1994)",
177
- "ref_id": "BIBREF12"
178
- }
179
- ],
180
- "ref_spans": [],
181
- "eq_spans": [],
182
- "section": "Related Work",
183
- "sec_num": "2"
184
- },
185
- {
186
- "text": "Reynar and Ratnaparkhi (1997) applied a maximum entropy approach to the problem. Their system considered only the first word to the left and right of any potential sentence boundary and claimed that examining wider context did not help. For both these words the prefix, suffix, presence of particular characters in the prefix or suffix, whether the candidate is honorific (Mr., Dr. etc.) and whether the candidate is a corporate designator (eg. Corp.) are features that are considered. This system was tested on the same corpus as Palmer and Hearst's system and correctly identified 98.8% of sentence boundaries. Mikheev (1998) optimised this approach and evaluated it on the same test corpus. An accuracy of 99.2477% was reported, to our knowledge this is the highest quoted result for this test set.",
187
- "cite_spans": [
188
- {
189
- "start": 613,
190
- "end": 627,
191
- "text": "Mikheev (1998)",
192
- "ref_id": "BIBREF10"
193
- }
194
- ],
195
- "ref_spans": [],
196
- "eq_spans": [],
197
- "section": "Related Work",
198
- "sec_num": "2"
199
- },
200
- {
201
- "text": "These three systems achieve very high results for the punctuation disambiguation task. It would seem, then, that this problem has largely been solved. However, it is not clear that these techniques will be as successful for ASR text. We now go on to describe a system which attempts a task similar to sentence boundary detection of ASR text.",
202
- "cite_spans": [],
203
- "ref_spans": [],
204
- "eq_spans": [],
205
- "section": "Related Work",
206
- "sec_num": "2"
207
- },
208
- {
209
- "text": "Beeferman et al. (1998) produced a system, \"CY-BERPUNC\", which added intra-sentence punctuation (i.e. commas) to the output of an ASR system. They mention that the comma is the most frequently used punctuation symbol and its correct insertion can make a text far more legible. CYBERPUNC operated by augmenting a standard trigram speech recognition model with information about commas; it accesses only lexical information. CYBERPUNC was tested by separating the trigram model from the ASR system and applying it to 2,317 sentences from the Wall Street Journal. The system achieved a precision of 75.6% and recall of 65.6% compared against the original punctuation in the text. 2 A further qualitative evaluation was carried out using 100 randomly-drawn output sentences from the system and 100 from the Wall Street Journal. Six human judges blindly marked each sentence as either acceptable or unacceptable. It was found that the Penn TreeBank sentences were 86% correct and the system output 66% correct. It is interesting that the human judges do not agree completely on the acceptability of many sentences from the Wall Street Journal.",
210
- "cite_spans": [],
211
- "ref_spans": [],
212
- "eq_spans": [],
213
- "section": "Related Work",
214
- "sec_num": "2"
215
- },
216
- {
217
- "text": "In the next section we go on to describe experiments which quantify the level of agreement that can be expected when humans carry out sentence boundary detection. Section 4 goes on to describe a computational approach to the problem.",
218
- "cite_spans": [],
219
- "ref_spans": [],
220
- "eq_spans": [],
221
- "section": "Related Work",
222
- "sec_num": "2"
223
- },
224
- {
225
- "text": "Beeferman et. al.'s experiments demonstrated that humans do not always agree on the acceptability of comma insertion and therefore it may be useful to determine how often they agree on the placing of sentence boundaries. To do this we carried out experiments using transcriptions of news programmes, specifically the transcriptions of two editions of the ~Precision and recall are complementary evaluation metrics commonly used in Information Retrieval (van Rijsbergen, 1979) . In this case precision is the percentage of commas proposed by the system which are correct while recall is the percentage of the commas occurring in the test corpus which the system identified.",
226
- "cite_spans": [
227
- {
228
- "start": 453,
229
- "end": 475,
230
- "text": "(van Rijsbergen, 1979)",
231
- "ref_id": "BIBREF16"
232
- }
233
- ],
234
- "ref_spans": [],
235
- "eq_spans": [],
236
- "section": "Determining Human Ability",
237
- "sec_num": "3"
238
- },
239
- {
240
- "text": "BBC television program \"The Nine O'Clock News\" .3 The transcriptions consisted of punctuated mixed case text with sentences boundaries marked using a reserved character (\"; \"). These texts were produced by trained transcribers listening to the original program broadcast.",
241
- "cite_spans": [],
242
- "ref_spans": [],
243
- "eq_spans": [],
244
- "section": "Determining Human Ability",
245
- "sec_num": "3"
246
- },
247
- {
248
- "text": "Six experimental subjects were recruited. All subjects were educated to at least Bachelor's degree level and are either native English speakers or fluent second language speakers. Each subject was presented with the same text from which the sentence boundaries had been removed. The texts were transcriptions of two editions of the news program from 1997, containing 534 sentences and represented around 50 minutes of broadcast news. The subjects were randomly split into two groups. The subjects in the first group (subjects 1-3) were presented with the text stripped of punctuation and converted to upper case. This text simulated ASR text with no errors in the transcription. The remaining three subjects (4-6) were presented with the same text with punctuation removed but case information retained (i.e. mixed case text). This simulated unpunctuated standard text. All subjects were asked to add sentence boundaries to the text whenever they thought they occurred.",
249
- "cite_spans": [],
250
- "ref_spans": [],
251
- "eq_spans": [],
252
- "section": "Determining Human Ability",
253
- "sec_num": "3"
254
- },
255
- {
256
- "text": "The process of determining human ability at some linguistic task is generally made difficult by the lack of an appropriate reference. Often all we have to compare one person's judgement with is that of another. For example, there have been attempts to determine the level of performance which can be expected when humans perform word sense disambiguation (Fellbaum et al., 1998) but these have simply compared some human judgements against others with one being chosen as the \"expert\". We have already seen, in Section 2, that there is a significant degree of human disagreement over the acceptability of intra-sentential punctuation. The human transcribers of the \"Nine O'Clock News\" have access to the original news story which contains more information than just the transcription. Under these conditions it is reasonable to consider their opinion as expert. Table 1 shows the performance of the human subjects compared to the reference transcripts. 4",
257
- "cite_spans": [
258
- {
259
- "start": 355,
260
- "end": 378,
261
- "text": "(Fellbaum et al., 1998)",
262
- "ref_id": "BIBREF7"
263
- }
264
- ],
265
- "ref_spans": [
266
- {
267
- "start": 862,
268
- "end": 869,
269
- "text": "Table 1",
270
- "ref_id": null
271
- }
272
- ],
273
- "eq_spans": [],
274
- "section": "Determining Human Ability",
275
- "sec_num": "3"
276
- },
277
- {
278
- "text": "An algorithm was implemented to provide a baseline tagging of the text. The average length of sentences in our text is 19 words and the baseline algorithm randomly assigns a sentence break at each word boundary with a probability of ~. The two annotators labelled \"random\" show the results when this algorithm is applied. This method produced a 3This is a 25 minute long television news program broadcast in the United Kingdom on Monday to Friday evenings.",
279
- "cite_spans": [],
280
- "ref_spans": [],
281
- "eq_spans": [],
282
- "section": "Determining Human Ability",
283
- "sec_num": "3"
284
- },
285
- {
286
- "text": "4F-measure (F) is a weighted harmonic combining precision (P) and recall (R) The performance of the human annotators on the upper case text is quite significantly lower than the reported performance of the algorithms which performed punctuation disambiguation on standard text as described in Section 2. This suggests that the performance which may be obtained for this task may be lower than has been achieved for standard text. ~Sarther insight into the task can be gained from determining the degree to which the subjects agreed. Carletta (1996) argues that the kappa statistic (a) should be adopted to judge annotator consistency for classification tasks in the area of discourse and dialogue analysis. It is worth noting that the problem of sentence boundary detection presented so far in this paper has been formulated as a classification task in which each token boundary has to be classifted as either being a sentence boundary or not. Carletta argues that several incompatible measures of annotator agreement have been used in discourse analysis, making comparison impossible. Her solution is to look to the field of content analysis, which has already experienced these problems, and adopt their solution of using the kappa statistic. This determines the difference between the observed agreement for a linguistic task and that which would be expected by chance. It is calculated according to formula 1, where Pr(A) is the proportion of times the annotators agree and Pr(E) the proportion which would be expected by chance. Detailed instructions on calculating these probabilities are described by Siegel and Castellan (1988) .",
287
- "cite_spans": [
288
- {
289
- "start": 73,
290
- "end": 76,
291
- "text": "(R)",
292
- "ref_id": null
293
- },
294
- {
295
- "start": 533,
296
- "end": 548,
297
- "text": "Carletta (1996)",
298
- "ref_id": "BIBREF3"
299
- },
300
- {
301
- "start": 1608,
302
- "end": 1635,
303
- "text": "Siegel and Castellan (1988)",
304
- "ref_id": "BIBREF14"
305
- }
306
- ],
307
- "ref_spans": [],
308
- "eq_spans": [],
309
- "section": "Determining Human Ability",
310
- "sec_num": "3"
311
- },
312
- {
313
- "text": "(1) 1 -Pr (E) The value of the kappa statistic ranges between 1 (perfect agreement) and 0 (the level which would be expected by chance). It has been claimed that content analysis researchers usually regard a > .8 to demonstrate good reliability and .67 < ~ < .8 al-lows tentative conclusions to be drawn (see Carletta (1996) ).",
314
- "cite_spans": [
315
- {
316
- "start": 10,
317
- "end": 13,
318
- "text": "(E)",
319
- "ref_id": null
320
- },
321
- {
322
- "start": 309,
323
- "end": 324,
324
- "text": "Carletta (1996)",
325
- "ref_id": "BIBREF3"
326
- }
327
- ],
328
- "ref_spans": [],
329
- "eq_spans": [],
330
- "section": "Pr(A) -Pr(E) =",
331
- "sec_num": null
332
- },
333
- {
334
- "text": "We began to analyse the data by computing the kappa statistic for both sets of annotators. Among the two annotators who marked the mixed case (subjects 4 and 5) there was an observed kappa value of 0.98, while there was a measure of 0.91 for the three subjects who annotated the single case text. These values are high and suggest a strong level of agreement between the annotators. However, manual analysis of the annotated texts suggested that the subjects did not agree on many cases. We then added the texts annotated by the \"random\" annotation algorithm and calculated the new ~ values. It was found that the mixed case test produced a kappa value of 0.92 and the upper case text 0.91. These values would still suggest a high level of agreement although the sentences produced by our random algorithm were nonsensical.",
335
- "cite_spans": [],
336
- "ref_spans": [],
337
- "eq_spans": [],
338
- "section": "Pr(A) -Pr(E) =",
339
- "sec_num": null
340
- },
341
- {
342
- "text": "The problem seems to be that most word boundaries in a text are not sentence boundaries. Therefore we could compare the subjects' annotations who had not agreed on any sentence boundaries but find that they agreed most word boundaries were not sentence boundaries. The same problem will effect other standard measures of inter-annotator agreement such as the Cramer, Phi and Kendall coefficients (see Siegel and Castellan (1988) ). Carletta mentions this problem, asking what the difference would be if the kappa statistic were computed across \"clause boundaries, transcribed word boundaries, and transcribed phoneme boundaries\" (Carletta, 1996, p. 252) rather than the sentence boundaries she suggested. It seems likely that more meaningful ~ values would be obtained if we restricted to the boundaries between clauses rather than all token boundaries. However, it is difficult to imagine how clauses could be identified without parsing and most parsers require part of speech tagged input text. But, as we already mentioned, part of speech taggers often require input text split into sentences. Consequently, there is a lack of available systems for splitting ASR text into grammatical clauses.",
343
- "cite_spans": [
344
- {
345
- "start": 401,
346
- "end": 428,
347
- "text": "Siegel and Castellan (1988)",
348
- "ref_id": "BIBREF14"
349
- },
350
- {
351
- "start": 629,
352
- "end": 653,
353
- "text": "(Carletta, 1996, p. 252)",
354
- "ref_id": null
355
- }
356
- ],
357
- "ref_spans": [],
358
- "eq_spans": [],
359
- "section": "Pr(A) -Pr(E) =",
360
- "sec_num": null
361
- },
362
- {
363
- "text": "The remainder of this paper describes an implemented program which attempts sentence boundary detection. The approach is based around the Timbl memory-based learning algorithm (Daelemans et al., 1999) which we previously found to be very successful when applied to the word sense disambiguation problem (Stevenson and Wilks, 1999) . Memory-based learning, also known as case-based and lazy learning, operates by memorising a set of training examples and categorising new cases by assigning them the class of the most similar learned example. We apply this methodology to the sentence boundary detection task by presenting Timbl with examples of word boundaries from a training text, each of which is categorised as either sentence_boundary or no_boundary. Unseen examples are then compared and categorised with the class of the most similar example. We shall not discuss the method by which Timbl determines the most similar training example which is described by Daelemans et al. (1999) .",
364
- "cite_spans": [
365
- {
366
- "start": 176,
367
- "end": 200,
368
- "text": "(Daelemans et al., 1999)",
369
- "ref_id": "BIBREF6"
370
- },
371
- {
372
- "start": 303,
373
- "end": 330,
374
- "text": "(Stevenson and Wilks, 1999)",
375
- "ref_id": "BIBREF15"
376
- },
377
- {
378
- "start": 964,
379
- "end": 987,
380
- "text": "Daelemans et al. (1999)",
381
- "ref_id": "BIBREF6"
382
- }
383
- ],
384
- "ref_spans": [],
385
- "eq_spans": [],
386
- "section": "A Computational Approach to Sentence Boundary Detection",
387
- "sec_num": "4"
388
- },
389
- {
390
- "text": "Following the work done on punctuation disambiguation and that of Beeferman et. al. on comma insertion (Section 2), we used the Wall Street Journal text for this experiment. These texts are reliably part of speech tagged 5 and sentence boundaries can be easily derived from the corpus. This text was initially altered so as to remove all punctuation and map all characters into upper case. 90% of the corpus, containing 965 sentence breaks, was used as a training corpus with the remainder, which contained 107 sentence breaks, being held-back as unseen test data. The first stage was to extract some statistics from the training corpus. We examined the training corpus and computed, for each word in the text, the probability that it started a sentence and the probability that it ended a sentence. In addition, for each part of speech tag we also computed the probability that it is assigned to the first word in a sentence and the probability that it is assigned to the last word. 6 Each word boundary in the corpus was translated to a feature-vector representation consisting of 13 elements, shown in Table 2 . Vectors in the test corpus are in a similar format, the difference being that the classification (feature 13) is not included.",
391
- "cite_spans": [],
392
- "ref_spans": [
393
- {
394
- "start": 1105,
395
- "end": 1112,
396
- "text": "Table 2",
397
- "ref_id": "TABREF1"
398
- }
399
- ],
400
- "eq_spans": [],
401
- "section": "A Computational Approach to Sentence Boundary Detection",
402
- "sec_num": "4"
403
- },
404
- {
405
- "text": "The results obtained are shown in the top row of Table 3 . Both precision and recall are quite promising under these conditions. However, this text is different from ASR text in one important way: the text is mixed case. The experimented was repeated with capitalisation information removed; that is, features 6 and 12 were removed from the featurevectors. The results form this experiment are shown in the bottom row of Table 3 . It can be seen that the recorded performance is far lower when capitalisation information is not used, indicating that this is an important feature for the task.",
406
- "cite_spans": [],
407
- "ref_spans": [
408
- {
409
- "start": 49,
410
- "end": 56,
411
- "text": "Table 3",
412
- "ref_id": null
413
- },
414
- {
415
- "start": 421,
416
- "end": 428,
417
- "text": "Table 3",
418
- "ref_id": null
419
- }
420
- ],
421
- "eq_spans": [],
422
- "section": "A Computational Approach to Sentence Boundary Detection",
423
- "sec_num": "4"
424
- },
425
- {
426
- "text": "These experiments have shown that it is much easier to add sentence boundary information to mixed case test, which is essentially standard text with punctuation removed, than ASR text, even as-5Applying a priori tag probability distributions could have been used rather than the tagging in the corpus as such reliable annotations may not be available for the output of an ASR system. Thus, the current experiments should be viewed as making an optimistic assumption. eWe attempted to smooth these probabilities using Good-Turing frequency estimation (Gale and Sampson, 1996) but found that it had no effect on the final results. Feature 1 2 3 4 5 6 7 8 9 10 11 12 13 Preceding word Probability preceding word ends a sentence Part of speech tag assigned to preceding word Probability that part of speech tag (feature 3) is assigned to last word in a sentence Flag indicating whether preceding word is a stop word Flag indicating whether preceding word is capitalised Following word Probability following word begins a sentence Part of speech tag assigned to following word Probability that part of speech (feature 9) is assigned to first word in a sentence Flag indicating whether following word is a stop word Flag indicating whether following word is capitalised word sentence_boundary or no_boundary Table 3 : Results of the sentence boundary detection program suming a zero word error rate. This result is in agreement with the results from the human annotation experiments described in Section 3. However, there is a far greater difference between the automatic system's performance on standard and ASR text than the human annotators.",
427
- "cite_spans": [
428
- {
429
- "start": 550,
430
- "end": 574,
431
- "text": "(Gale and Sampson, 1996)",
432
- "ref_id": "BIBREF8"
433
- }
434
- ],
435
- "ref_spans": [
436
- {
437
- "start": 629,
438
- "end": 679,
439
- "text": "Feature 1 2 3 4 5 6 7 8 9 10 11 12 13",
440
- "ref_id": "TABREF1"
441
- },
442
- {
443
- "start": 1315,
444
- "end": 1322,
445
- "text": "Table 3",
446
- "ref_id": null
447
- }
448
- ],
449
- "eq_spans": [],
450
- "section": "A Computational Approach to Sentence Boundary Detection",
451
- "sec_num": "4"
452
- },
453
- {
454
- "text": "Reynar and Ratnaparkhi (1997) (Section 2) argued that a context of one word either side is sufficient for the punctuation disambiguation problem. However, the results of our system suggest that this may be insufficient for the sentence boundary detection problem even assuming reliable part of speech tags (cf note 5).",
455
- "cite_spans": [],
456
- "ref_spans": [],
457
- "eq_spans": [],
458
- "section": "Position",
459
- "sec_num": null
460
- },
461
- {
462
- "text": "These experiments do not make use of prosodic information which may be included as part of the ASR output. Such information includes pause length, pre-pausal lengthening and pitch declination. If this information was made available in the form of extra features to a machine learning algorithm then it is possible that the results will improve.",
463
- "cite_spans": [],
464
- "ref_spans": [],
465
- "eq_spans": [],
466
- "section": "Position",
467
- "sec_num": null
468
- },
469
- {
470
- "text": "This paper has introduced the problem of sentence boundary detection on the text produced by an ASR system as an area of application for NLP technology. An attempt was made to determine the level of human performance which could be expected for the task. It was found that there was a noticeable difference between the observed performance for mixed and upper case text. It was found that the kappa statistic, a commonly used method for calculating inter-annotator agreement, could not be applied directly in this situation.",
471
- "cite_spans": [],
472
- "ref_spans": [],
473
- "eq_spans": [],
474
- "section": "Conclusion",
475
- "sec_num": "5"
476
- },
477
- {
478
- "text": "A memory-based system for identifying sentence boundaries in ASR text was implemented. There was a noticeable difference when the same system was applied to text which included case information demonstrating that this is an important feature for the problem.",
479
- "cite_spans": [],
480
- "ref_spans": [],
481
- "eq_spans": [],
482
- "section": "Conclusion",
483
- "sec_num": "5"
484
- },
485
- {
486
- "text": "This paper does not propose to offer a solution to the sentence boundary detection problem for ASR transcripts. However, our aim has been to highlight the problem as one worthy of further exploration within the field of NLP and to establish some baselines (human and algorithmic) against which further work may be compared.",
487
- "cite_spans": [],
488
- "ref_spans": [],
489
- "eq_spans": [],
490
- "section": "Conclusion",
491
- "sec_num": "5"
492
- }
493
- ],
494
- "back_matter": [
495
- {
496
- "text": "The authors would like to thank Steve Renals and Yoshihiko Gotoh for providing the data for human annotation experiments and for several useful conversations. They are also grateful to the following people who took part in the annotation experiment: Paul Clough, George Demetriou, Lisa Ferry, Michael Oakes and Andrea Setzer.",
497
- "cite_spans": [],
498
- "ref_spans": [],
499
- "eq_spans": [],
500
- "section": "Acknowledgements",
501
- "sec_num": null
502
- }
503
- ],
504
- "bib_entries": {
505
- "BIBREF0": {
506
- "ref_id": "b0",
507
- "title": "CY-BERPUNC: A lightweight punctuation annotation system for speech",
508
- "authors": [
509
- {
510
- "first": "D",
511
- "middle": [],
512
- "last": "Beeferman",
513
- "suffix": ""
514
- },
515
- {
516
- "first": "A",
517
- "middle": [],
518
- "last": "Berger",
519
- "suffix": ""
520
- },
521
- {
522
- "first": "J",
523
- "middle": [],
524
- "last": "Lafferty",
525
- "suffix": ""
526
- }
527
- ],
528
- "year": 1998,
529
- "venue": "Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing",
530
- "volume": "",
531
- "issue": "",
532
- "pages": "689--692",
533
- "other_ids": {},
534
- "num": null,
535
- "urls": [],
536
- "raw_text": "D. Beeferman, A. Berger, and J. Lafferty. 1998. CY- BERPUNC: A lightweight punctuation annota- tion system for speech. In Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing, pages 689-692, Seattle, WA.",
537
- "links": null
538
- },
539
- "BIBREF1": {
540
- "ref_id": "b1",
541
- "title": "A simple rule-based part of speech tagger",
542
- "authors": [
543
- {
544
- "first": "",
545
- "middle": [],
546
- "last": "Brill",
547
- "suffix": ""
548
- }
549
- ],
550
- "year": 1992,
551
- "venue": "Proceeding of the Third Conference on Applied Natural Language Processing (ANLP-92)",
552
- "volume": "",
553
- "issue": "",
554
- "pages": "152--155",
555
- "other_ids": {},
556
- "num": null,
557
- "urls": [],
558
- "raw_text": "Brill. 1992. A simple rule-based part of speech tagger. In Proceeding of the Third Conference on Applied Natural Language Processing (ANLP-92), pages 152-155, Trento, Italy.",
559
- "links": null
560
- },
561
- "BIBREF2": {
562
- "ref_id": "b2",
563
- "title": "Users Reference Guide for the British National Corpus",
564
- "authors": [
565
- {
566
- "first": "L",
567
- "middle": [],
568
- "last": "Burnard",
569
- "suffix": ""
570
- }
571
- ],
572
- "year": 1995,
573
- "venue": "",
574
- "volume": "",
575
- "issue": "",
576
- "pages": "",
577
- "other_ids": {},
578
- "num": null,
579
- "urls": [],
580
- "raw_text": "L. Burnard, 1995. Users Reference Guide for the British National Corpus. Oxford University Com- puting Services.",
581
- "links": null
582
- },
583
- "BIBREF3": {
584
- "ref_id": "b3",
585
- "title": "Assessing agreement on classification tasks: the kappa statistic",
586
- "authors": [
587
- {
588
- "first": "J",
589
- "middle": [],
590
- "last": "Carletta",
591
- "suffix": ""
592
- }
593
- ],
594
- "year": 1996,
595
- "venue": "Computational Linguistics",
596
- "volume": "22",
597
- "issue": "2",
598
- "pages": "249--254",
599
- "other_ids": {},
600
- "num": null,
601
- "urls": [],
602
- "raw_text": "J. Carletta. 1996. Assessing agreement on classific- ation tasks: the kappa statistic. Computational Linguistics, 22(2):249-254.",
603
- "links": null
604
- },
605
- "BIBREF4": {
606
- "ref_id": "b4",
607
- "title": "HUB-4 Named Entity Task Definition (version 4.8)",
608
- "authors": [
609
- {
610
- "first": "N",
611
- "middle": [],
612
- "last": "Chinchor",
613
- "suffix": ""
614
- },
615
- {
616
- "first": "P",
617
- "middle": [],
618
- "last": "Robinson",
619
- "suffix": ""
620
- },
621
- {
622
- "first": "E",
623
- "middle": [],
624
- "last": "Brown",
625
- "suffix": ""
626
- }
627
- ],
628
- "year": 1998,
629
- "venue": "",
630
- "volume": "",
631
- "issue": "",
632
- "pages": "",
633
- "other_ids": {},
634
- "num": null,
635
- "urls": [],
636
- "raw_text": "N. Chinchor, P. Robinson, and E. Brown. 1998. HUB-4 Named Entity Task Defini- tion (version 4.8). Technical report, SAIC. http ://www. nist. gov/speech/hub4-98.",
637
- "links": null
638
- },
639
- "BIBREF5": {
640
- "ref_id": "b5",
641
- "title": "Survey of the State of the Art in Human Language Technology",
642
- "authors": [],
643
- "year": 1996,
644
- "venue": "",
645
- "volume": "",
646
- "issue": "",
647
- "pages": "",
648
- "other_ids": {},
649
- "num": null,
650
- "urls": [],
651
- "raw_text": "R. Cole, editor. 1996. Survey of the State of the Art in Human Language Technology. Available at: http://cslu.cse.ogi.edu/HLTsurvey/HLTsurvey.html. Site visited 17/11/99.",
652
- "links": null
653
- },
654
- "BIBREF6": {
655
- "ref_id": "b6",
656
- "title": "TiMBL: Tilburg memory based learner version 2.0, reference guide",
657
- "authors": [
658
- {
659
- "first": "W",
660
- "middle": [],
661
- "last": "Daelemans",
662
- "suffix": ""
663
- },
664
- {
665
- "first": "J",
666
- "middle": [],
667
- "last": "Zavrel",
668
- "suffix": ""
669
- },
670
- {
671
- "first": "K",
672
- "middle": [],
673
- "last": "Van Der Sloot",
674
- "suffix": ""
675
- },
676
- {
677
- "first": "A",
678
- "middle": [],
679
- "last": "Van Den",
680
- "suffix": ""
681
- },
682
- {
683
- "first": "",
684
- "middle": [],
685
- "last": "Bosch",
686
- "suffix": ""
687
- }
688
- ],
689
- "year": 1999,
690
- "venue": "",
691
- "volume": "",
692
- "issue": "",
693
- "pages": "",
694
- "other_ids": {},
695
- "num": null,
696
- "urls": [],
697
- "raw_text": "W. Daelemans, J. Zavrel, K. van der Sloot, and A. van den Bosch. 1999. TiMBL: Tilburg memory based learner version 2.0, reference guide. Technical report, ILK Technical Report 98-03. ILK Reference Report 99-01, Available from http ://ilk. kub. nl/\" ilk/papers/ilk9901, ps. gz.",
698
- "links": null
699
- },
700
- "BIBREF7": {
701
- "ref_id": "b7",
702
- "title": "Matching words to senses in Word-Net: Naive vs. expert differentiation of senses",
703
- "authors": [
704
- {
705
- "first": "C",
706
- "middle": [],
707
- "last": "Fellbaum",
708
- "suffix": ""
709
- },
710
- {
711
- "first": "J",
712
- "middle": [],
713
- "last": "Grabowski",
714
- "suffix": ""
715
- },
716
- {
717
- "first": "S",
718
- "middle": [],
719
- "last": "Landes",
720
- "suffix": ""
721
- },
722
- {
723
- "first": "A",
724
- "middle": [],
725
- "last": "Banmann",
726
- "suffix": ""
727
- }
728
- ],
729
- "year": 1998,
730
- "venue": "",
731
- "volume": "",
732
- "issue": "",
733
- "pages": "",
734
- "other_ids": {},
735
- "num": null,
736
- "urls": [],
737
- "raw_text": "C. Fellbaum, J. Grabowski, S. Landes, and A. Ban- mann. 1998. Matching words to senses in Word- Net: Naive vs. expert differentiation of senses. In C. Fellbaum, editor, WordNet: An electronic lex- ieal database and some applications. MIT Press, Cambridge, MA.",
738
- "links": null
739
- },
740
- "BIBREF8": {
741
- "ref_id": "b8",
742
- "title": "Good-Turing frequency estimation without tears",
743
- "authors": [
744
- {
745
- "first": "W",
746
- "middle": [],
747
- "last": "Gale",
748
- "suffix": ""
749
- },
750
- {
751
- "first": "G",
752
- "middle": [],
753
- "last": "Sampson",
754
- "suffix": ""
755
- }
756
- ],
757
- "year": 1996,
758
- "venue": "Journal of Quantitave Linguistics",
759
- "volume": "2",
760
- "issue": "3",
761
- "pages": "217--254",
762
- "other_ids": {},
763
- "num": null,
764
- "urls": [],
765
- "raw_text": "W. Gale and G. Sampson. 1996. Good-Turing frequency estimation without tears. Journal of Quantitave Linguistics, 2(3):217-37.",
766
- "links": null
767
- },
768
- "BIBREF9": {
769
- "ref_id": "b9",
770
- "title": "Information extraction from broadcast news",
771
- "authors": [
772
- {
773
- "first": "Y",
774
- "middle": [],
775
- "last": "Gotoh",
776
- "suffix": ""
777
- },
778
- {
779
- "first": "S",
780
- "middle": [],
781
- "last": "Renals",
782
- "suffix": ""
783
- }
784
- ],
785
- "year": 2000,
786
- "venue": "Philosophical Transactions of the Royal Society of London, series A: Mathematical, Physical and Engineering Sciences",
787
- "volume": "",
788
- "issue": "",
789
- "pages": "",
790
- "other_ids": {},
791
- "num": null,
792
- "urls": [],
793
- "raw_text": "Y. Gotoh and S. Renals. 2000. Information extrac- tion from broadcast news. Philosophical Trans- actions of the Royal Society of London, series A: Mathematical, Physical and Engineering Sciences. (to appear).",
794
- "links": null
795
- },
796
- "BIBREF10": {
797
- "ref_id": "b10",
798
- "title": "Feature lattices for maximum entropy modelling",
799
- "authors": [
800
- {
801
- "first": "A",
802
- "middle": [],
803
- "last": "Mikheev",
804
- "suffix": ""
805
- }
806
- ],
807
- "year": 1998,
808
- "venue": "Proceedings of the 36th Meeting of the Association for Computational Linguistics (COLING-ACL-98)",
809
- "volume": "",
810
- "issue": "",
811
- "pages": "848--854",
812
- "other_ids": {},
813
- "num": null,
814
- "urls": [],
815
- "raw_text": "A. Mikheev. 1998. Feature lattices for maximum en- tropy modelling. In Proceedings of the 36th Meet- ing of the Association for Computational Linguist- ics (COLING-ACL-98), pages 848-854, Montreal, Canada.",
816
- "links": null
817
- },
818
- "BIBREF11": {
819
- "ref_id": "b11",
820
- "title": "CommandTalk: A Spokcaa-Language Interface to Battlefield Simulations",
821
- "authors": [
822
- {
823
- "first": "J",
824
- "middle": [],
825
- "last": "Moore",
826
- "suffix": ""
827
- },
828
- {
829
- "first": "H",
830
- "middle": [],
831
- "last": "Dowding",
832
- "suffix": ""
833
- },
834
- {
835
- "first": "J",
836
- "middle": [],
837
- "last": "Bratt",
838
- "suffix": ""
839
- },
840
- {
841
- "first": "Y",
842
- "middle": [],
843
- "last": "Gawron",
844
- "suffix": ""
845
- },
846
- {
847
- "first": "A",
848
- "middle": [],
849
- "last": "Gorfu",
850
- "suffix": ""
851
- },
852
- {
853
- "first": "",
854
- "middle": [],
855
- "last": "Cheyer",
856
- "suffix": ""
857
- }
858
- ],
859
- "year": 1997,
860
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
861
- "volume": "",
862
- "issue": "",
863
- "pages": "1--7",
864
- "other_ids": {},
865
- "num": null,
866
- "urls": [],
867
- "raw_text": "Moore, J. Dowding, H. Bratt, J. Gawron, Y. Gorfu, and A. Cheyer. 1997. CommandTalk: A Spokcaa-Language Interface to Battlefield Simu- lations. In Proceedings of the Fifth Conference on Applied Natural Language Processing, pages 1-7, Washington, DC.",
868
- "links": null
869
- },
870
- "BIBREF12": {
871
- "ref_id": "b12",
872
- "title": "Adaptive sentence boundary disambiguation",
873
- "authors": [
874
- {
875
- "first": "D",
876
- "middle": [],
877
- "last": "Palmer",
878
- "suffix": ""
879
- },
880
- {
881
- "first": "M",
882
- "middle": [],
883
- "last": "Hearst",
884
- "suffix": ""
885
- }
886
- ],
887
- "year": 1994,
888
- "venue": "Proceedings of the 1994 Conference on Applied Natural Language Processing",
889
- "volume": "",
890
- "issue": "",
891
- "pages": "78--83",
892
- "other_ids": {},
893
- "num": null,
894
- "urls": [],
895
- "raw_text": "D. Palmer and M. Hearst. 1994. Adaptive sen- tence boundary disambiguation. In Proceedings of the 1994 Conference on Applied Natural Language Processing, pages 78-83, Stutgart, Germany.",
896
- "links": null
897
- },
898
- "BIBREF13": {
899
- "ref_id": "b13",
900
- "title": "A maximum entropy approach to identifying sentence boundries",
901
- "authors": [
902
- {
903
- "first": "J",
904
- "middle": [],
905
- "last": "Reynar",
906
- "suffix": ""
907
- },
908
- {
909
- "first": "A",
910
- "middle": [],
911
- "last": "Ratnaparkhi",
912
- "suffix": ""
913
- }
914
- ],
915
- "year": 1997,
916
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
917
- "volume": "",
918
- "issue": "",
919
- "pages": "16--19",
920
- "other_ids": {},
921
- "num": null,
922
- "urls": [],
923
- "raw_text": "J. Reynar and A. Ratnaparkhi. 1997. A max- imum entropy approach to identifying sentence boundries. In Proceedings of the Fifth Conference on Applied Natural Language Processing, pages 16-19, Washington, D.C.",
924
- "links": null
925
- },
926
- "BIBREF14": {
927
- "ref_id": "b14",
928
- "title": "Nonparametrie Statistics for the Behavioural Sciences",
929
- "authors": [
930
- {
931
- "first": "S",
932
- "middle": [],
933
- "last": "Siegel",
934
- "suffix": ""
935
- },
936
- {
937
- "first": "N",
938
- "middle": [],
939
- "last": "Castellan",
940
- "suffix": ""
941
- }
942
- ],
943
- "year": 1988,
944
- "venue": "",
945
- "volume": "",
946
- "issue": "",
947
- "pages": "",
948
- "other_ids": {},
949
- "num": null,
950
- "urls": [],
951
- "raw_text": "S. Siegel and N. Castellan. 1988. Nonparametrie Statistics for the Behavioural Sciences. McGraw- Hill, second edition.",
952
- "links": null
953
- },
954
- "BIBREF15": {
955
- "ref_id": "b15",
956
- "title": "Combining weak knowledge sources for sense disambiguation",
957
- "authors": [
958
- {
959
- "first": "M",
960
- "middle": [],
961
- "last": "Stevenson",
962
- "suffix": ""
963
- },
964
- {
965
- "first": "Y",
966
- "middle": [],
967
- "last": "Wilks",
968
- "suffix": ""
969
- }
970
- ],
971
- "year": 1999,
972
- "venue": "Proceedings of the Sixteenth International Joint Conference on Artificial Intelligence",
973
- "volume": "",
974
- "issue": "",
975
- "pages": "884--889",
976
- "other_ids": {},
977
- "num": null,
978
- "urls": [],
979
- "raw_text": "M. Stevenson and Y. Wilks. 1999. Combining weak knowledge sources for sense disambiguation. In Proceedings of the Sixteenth International Joint Conference on Artificial Intelligence, pages 884- 889. Stockholm, Sweden.",
980
- "links": null
981
- },
982
- "BIBREF16": {
983
- "ref_id": "b16",
984
- "title": "Information Retrieval",
985
- "authors": [
986
- {
987
- "first": "C",
988
- "middle": [],
989
- "last": "Van Rijsbergen",
990
- "suffix": ""
991
- }
992
- ],
993
- "year": 1979,
994
- "venue": "",
995
- "volume": "",
996
- "issue": "",
997
- "pages": "",
998
- "other_ids": {},
999
- "num": null,
1000
- "urls": [],
1001
- "raw_text": "C. van Rijsbergen. 1979. Information Retrieval. Butterworths, London.",
1002
- "links": null
1003
- }
1004
- },
1005
- "ref_entries": {
1006
- "TABREF1": {
1007
- "text": "",
1008
- "type_str": "table",
1009
- "num": null,
1010
- "content": "<table><tr><td>: Features used in Timbl representation</td></tr><tr><td>Case information [I P I R I F</td></tr><tr><td>Applied I 78 [ 75 [ 76</td></tr><tr><td>Not applied 36 35 35</td></tr></table>",
1011
- "html": null
1012
- }
1013
- }
1014
- }
1015
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1013.json DELETED
@@ -1,894 +0,0 @@
1
- {
2
- "paper_id": "A00-1013",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:00.699382Z"
6
- },
7
- "title": "DP: A Detector for Presuppositions in survey questions",
8
- "authors": [
9
- {
10
- "first": "Katja",
11
- "middle": [],
12
- "last": "Wiemer-Hastings",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Memphis Memphis",
17
- "location": {
18
- "postCode": "38152",
19
- "region": "TN"
20
- }
21
- },
22
- "email": ""
23
- },
24
- {
25
- "first": "Peter",
26
- "middle": [],
27
- "last": "Wiemer-Hastings",
28
- "suffix": "",
29
- "affiliation": {
30
- "laboratory": "",
31
- "institution": "University of Edinburgh",
32
- "location": {
33
- "addrLine": "2 Buccleuch Place Edinburgh",
34
- "postCode": "EH8 9LW",
35
- "country": "UK"
36
- }
37
- },
38
- "email": "[email protected]"
39
- },
40
- {
41
- "first": "Sonya",
42
- "middle": [],
43
- "last": "Rajan",
44
- "suffix": "",
45
- "affiliation": {
46
- "laboratory": "",
47
- "institution": "University of Memphis",
48
- "location": {
49
- "postCode": "38152",
50
- "settlement": "Memphis",
51
- "region": "TN"
52
- }
53
- },
54
- "email": "[email protected]"
55
- },
56
- {
57
- "first": "Art",
58
- "middle": [],
59
- "last": "Graesser",
60
- "suffix": "",
61
- "affiliation": {
62
- "laboratory": "",
63
- "institution": "University of Memphis",
64
- "location": {
65
- "postCode": "38152",
66
- "settlement": "Memphis",
67
- "region": "TN"
68
- }
69
- },
70
- "email": "[email protected]"
71
- },
72
- {
73
- "first": "Roger",
74
- "middle": [],
75
- "last": "Kreuz",
76
- "suffix": "",
77
- "affiliation": {
78
- "laboratory": "",
79
- "institution": "University of Memphis",
80
- "location": {
81
- "postCode": "38152",
82
- "settlement": "Memphis",
83
- "region": "TN"
84
- }
85
- },
86
- "email": "[email protected]"
87
- },
88
- {
89
- "first": "Ashish",
90
- "middle": [],
91
- "last": "Karnavat",
92
- "suffix": "",
93
- "affiliation": {
94
- "laboratory": "",
95
- "institution": "University of Memphis",
96
- "location": {
97
- "postCode": "38152",
98
- "settlement": "Memphis",
99
- "region": "TN"
100
- }
101
- },
102
- "email": "[email protected]"
103
- }
104
- ],
105
- "year": "",
106
- "venue": null,
107
- "identifiers": {},
108
- "abstract": "This paper describes and evaluates a detector of presuppositions (DP) for survey questions. Incorrect presuppositions can make it difficult to answer a question correctly. Since they can be difficult to detect, DP is a useful tool for questionnaire designer. DP performs well using local characteristics of presuppositions. It reports the presupposition to the survey methodologist who can determine whether the presupposition is valid.",
109
- "pdf_parse": {
110
- "paper_id": "A00-1013",
111
- "_pdf_hash": "",
112
- "abstract": [
113
- {
114
- "text": "This paper describes and evaluates a detector of presuppositions (DP) for survey questions. Incorrect presuppositions can make it difficult to answer a question correctly. Since they can be difficult to detect, DP is a useful tool for questionnaire designer. DP performs well using local characteristics of presuppositions. It reports the presupposition to the survey methodologist who can determine whether the presupposition is valid.",
115
- "cite_spans": [],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "Abstract",
119
- "sec_num": null
120
- }
121
- ],
122
- "body_text": [
123
- {
124
- "text": "Presuppositions are propositions that take some information as given, or as \"the logical assumptions underlying utterances\" (Dijkstra & de Smedt, 1996, p. 255 ; for a general overview, see McCawley, 1981) . Presupposed information includes state of affairs, such as being married; events., such as a graduation; possessions, such as a house, children, knowledge about something; and others. For example, the question, \"when did you graduate from college\", presupposes the event that the respondent did in fact graduate from college. The answer options may be ranges of years, such as \"between 1970 and 1980\". Someone who has never attended college can either not respond at all, or give a random (and false) reply. Thus, incorrect presuppositions cause two problems. First, the question is difficult to answer. Second, assuming that people feel obliged to answer them anyway, their answers present false information. This biases survey statistics, or, in an extreme case, makes them useless. The detector for presuppositions (DP) is part of the computer tool QUAID (Graesser, Wiemer-Hastings, Kreuz, Wiemer-Hastings & Marquis, in press) , which helps survey methodologists design questions that are easy to process. DP detects a presupposition and reports it to the survey methodologist, who can examine if the presupposition is correct.",
125
- "cite_spans": [
126
- {
127
- "start": 124,
128
- "end": 158,
129
- "text": "(Dijkstra & de Smedt, 1996, p. 255",
130
- "ref_id": null
131
- },
132
- {
133
- "start": 189,
134
- "end": 204,
135
- "text": "McCawley, 1981)",
136
- "ref_id": "BIBREF12"
137
- },
138
- {
139
- "start": 1065,
140
- "end": 1136,
141
- "text": "(Graesser, Wiemer-Hastings, Kreuz, Wiemer-Hastings & Marquis, in press)",
142
- "ref_id": null
143
- }
144
- ],
145
- "ref_spans": [],
146
- "eq_spans": [],
147
- "section": "Introduction",
148
- "sec_num": null
149
- },
150
- {
151
- "text": "QUAID is a computerized QUEST questionnaire evaluation aid. It is based on QUEST (Graesser & Franklin, 1990 ), a computational model of the cognitive processes underlying human question answering. QUAID critiques questions with respect to unfamiliar technical terms, vague terms, working memory overload, complex syntax, incorrect presuppositions, and unclear question purpose or category. These problems are a subset of potential problems that have been identified by Graesser, Bommareddy, Swamer, and Golding (1996; see also Graesser, Kennedy, Wiemer-Hastings & Ottati, 1999) . QUAID performs reliably on the first five problem categories. In comparison to these five problems, presupposition detection is even more challenging. For unfamiliar technical terms, for example, QUAID reports words with frequencies below a certain threshold. Such an elegant solution is impossible for presuppositions. Their forms vary widely across presupposition types. Therefore, their detection requires a complex set of rules, carefully tuned to identify a variety of presupposition problems. DP prints out the presuppositions of a question, and relies on the survey methodologist to make the final decision whether the presuppositions are valid.",
152
- "cite_spans": [
153
- {
154
- "start": 81,
155
- "end": 107,
156
- "text": "(Graesser & Franklin, 1990",
157
- "ref_id": "BIBREF7"
158
- },
159
- {
160
- "start": 469,
161
- "end": 517,
162
- "text": "Graesser, Bommareddy, Swamer, and Golding (1996;",
163
- "ref_id": "BIBREF6"
164
- },
165
- {
166
- "start": 518,
167
- "end": 577,
168
- "text": "see also Graesser, Kennedy, Wiemer-Hastings & Ottati, 1999)",
169
- "ref_id": null
170
- }
171
- ],
172
- "ref_spans": [],
173
- "eq_spans": [],
174
- "section": "Introduction",
175
- "sec_num": null
176
- },
177
- {
178
- "text": "We conducted a content analysis of questions with presupposition problems to construct a list of indicators for presuppositions. 22 questions containing problematic presuppositions were selected from a corpus of 550 questions, taken from questionnaires provided by the U.S. Census Bureau. The 22 questions were identified based on ratings by three human expert raters. It may seem that this problem is infrequent, but then, these questions are part of commonly used questionnaires that have been designed and revised very thoughtfully. Additionally, we randomly selected a contrast question sample of 22 questions rated unproblematic with regard to incorrect presuppositions by all three raters. Examples 1and 2are questions rated as problematic by at least two raters; examples (3) and (4) present questions that do not contain presuppositions.",
179
- "cite_spans": [],
180
- "ref_spans": [],
181
- "eq_spans": [],
182
- "section": "How to detect presuppositions",
183
- "sec_num": "1"
184
- },
185
- {
186
- "text": "(1) Is that the same place you USUALLY go when you need routine or preventive care, such as a physical examination or check up? (2) How much do your parents or parent know about your close friends' parents?",
187
- "cite_spans": [],
188
- "ref_spans": [],
189
- "eq_spans": [],
190
- "section": "How to detect presuppositions",
191
- "sec_num": "1"
192
- },
193
- {
194
- "text": "(3) From date to December 31, did you take one or more trips or outings in the United States, of at least one mile, for the PRIMARY purpose of observing, photographing, or feeding wildlife? (4) Are you now on full-time active duty with the armed forces?",
195
- "cite_spans": [],
196
- "ref_spans": [],
197
- "eq_spans": [],
198
- "section": "How to detect presuppositions",
199
- "sec_num": "1"
200
- },
201
- {
202
- "text": "Example (1) presupposes the habit of making use of routine / preventive care; (2) presupposes that the respondent has close friends. As stated above, incorrect presuppositions are infrequent in well-designed questionnaires. For example, questions about details of somebody's marriage are usually preceded by a question establishing the person's marital status. In spite of this, providing feedback about presuppositions to the survey methodologist is useful. Importantly, QUAID is designed to aid in the design process. Consider a survey on healthrelated issues. In the context of this topic, a survey methodologist may be interested in how many days of work a person missed because of illness, but not think about whether the person actually has a job. Upon entering the question \"how many days of work did you miss last year because of illness\" into the QUAID tool, DP would report that the question presupposes employment. The survey methodologist could then insert a question about employment. Second, there are subtle presuppositions that may go undetected even by a skilled survey designer. These are presuppositions about things that are likely (but not necessarily) true. For example, a question may inquire about a person's close friends (presupposing close friends) or someone's standard place for preventive care (presupposing the habit of making use of preventive care). DP does not know which presuppositions are likely to be valid or invalid, and is therefore more likely to detect such subtle incorrect presuppositions than a human expert.",
203
- "cite_spans": [],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "How to detect presuppositions",
207
- "sec_num": "1"
208
- },
209
- {
210
- "text": "We constructed a set of presupposition detection rules based on the content analysis. The rules use a wide range of linguistic information about the input sentences, including particular words (such as \"why\"), part of speech categories (e.g., whpronoun), and complex syntactic subtrees (such as a quantification clause, followed by a noun phrase).",
211
- "cite_spans": [],
212
- "ref_spans": [],
213
- "eq_spans": [],
214
- "section": "The presupposition detector (DP)",
215
- "sec_num": "1.1"
216
- },
217
- {
218
- "text": "We used Eric Brill's rule-based word tagger (1992, 1994a, 1994b) , the de facto state of the art tagging system, to break the questions down into part-ofspeech categories. Brill's tagger produces a single lexical category for each word in a sentence by first assigning tags based on the frequency of occurrence of the word in that category, and then applying a set of context-based re-tagging rules. The tagged text was then passed on to Abney's SCOL /CASS system (1996a /CASS system ( , 1996b , an extreme bottom-up parser.",
219
- "cite_spans": [
220
- {
221
- "start": 44,
222
- "end": 64,
223
- "text": "(1992, 1994a, 1994b)",
224
- "ref_id": null
225
- },
226
- {
227
- "start": 451,
228
- "end": 470,
229
- "text": "/CASS system (1996a",
230
- "ref_id": null
231
- },
232
- {
233
- "start": 471,
234
- "end": 493,
235
- "text": "/CASS system ( , 1996b",
236
- "ref_id": null
237
- }
238
- ],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "The syntactic analysis component",
242
- "sec_num": "1.1.1"
243
- },
244
- {
245
- "text": "It is designed to avoid ambiguity problems by applying grammar rules on a level-by-level basis. Each level contains rules that will only fire if they are correct with high probability. Once the parse moves on to a higher level, it will not attempt to apply lower-level rules. In this way, the parser identifies chunks of information, which it can be reasonably certain are connected, even when it cannot create a complete parse of a sentence.",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "The syntactic analysis component",
250
- "sec_num": "1.1.1"
251
- },
252
- {
253
- "text": "The presupposition indicators The indicators for presuppositions were tested against questions rated as \"unproblematic\" to eliminate items that failed to discriminate questions with versus without presuppositions. We constructed a second list of indicators that detect questions containing no presuppositions. All indicators are listed in Table 1 . These lists are certainly far from complete, but they present a good basis for evaluating of how well presuppositions can be detected by an NLP system.",
254
- "cite_spans": [],
255
- "ref_spans": [
256
- {
257
- "start": 339,
258
- "end": 346,
259
- "text": "Table 1",
260
- "ref_id": "TABREF0"
261
- }
262
- ],
263
- "eq_spans": [],
264
- "section": "1.1.2",
265
- "sec_num": null
266
- },
267
- {
268
- "text": "These rules were integrated into a decision tree structure, as illustrated in Figure 1 . ",
269
- "cite_spans": [],
270
- "ref_spans": [
271
- {
272
- "start": 78,
273
- "end": 86,
274
- "text": "Figure 1",
275
- "ref_id": "FIGREF0"
276
- }
277
- ],
278
- "eq_spans": [],
279
- "section": "1.1.2",
280
- "sec_num": null
281
- },
282
- {
283
- "text": "Different types of presuppositions can be distinguished based on particular indicators. Examples for presupposition types, such as events or possessions, were mentioned above. Table 2 presents an exhaustive overview of presupposition types identified in our analysis.",
284
- "cite_spans": [],
285
- "ref_spans": [
286
- {
287
- "start": 176,
288
- "end": 183,
289
- "text": "Table 2",
290
- "ref_id": null
291
- }
292
- ],
293
- "eq_spans": [],
294
- "section": "Classifying presuppositions",
295
- "sec_num": null
296
- },
297
- {
298
- "text": "Note that some indicators can point to more than one type of presupposition. \"how many\" NP \"where is\" NP Indexicals: \"this\" / \"that\" NP \"these\" / \"those\" NP \"such a(n)\" NP an entity: object, state, or person (NP) a shared referent or common ground (NP) \"how much\" NP ... \"how much does\" NP \"know\" \"how many\" NP ... Possessive pronouns Apostrophe 's': NP's a possession (NP); exception list: NP's that can be presupposed (name, age, etc.)",
299
- "cite_spans": [],
300
- "ref_spans": [],
301
- "eq_spans": [],
302
- "section": "Classifying presuppositions",
303
- "sec_num": null
304
- },
305
- {
306
- "text": "\"why\" S a state of affairs, fact, or assertion (S) VP infinitive an intention / a goal (infinitive / \"why\" VP NP NP VP) \"who\" VP \"When\" VP ...\"when\" NP VP an a~ent (A person who VP) an event (VP) DP reports when a presupposition is present, and it also indicates the type of presupposition that is made (e.g., a common ground presupposition or the presupposition of a habit) in order to point the question designer to the potential presupposition error. DP uses the expressions in the right column in Table 2 , selected in accordance with the indicators, and fills them into the brackets in its output (see Figure 1 ). For example, given the question \"How old is your child?\", DP would detect the possessive pronoun \"your\", and accordingly respond: \"It looks like you are presupposing a possession (child). Make sure that the presupposition is correct by consulting the previous questions.\"",
307
- "cite_spans": [],
308
- "ref_spans": [
309
- {
310
- "start": 501,
311
- "end": 508,
312
- "text": "Table 2",
313
- "ref_id": null
314
- },
315
- {
316
- "start": 607,
317
- "end": 615,
318
- "text": "Figure 1",
319
- "ref_id": "FIGREF0"
320
- }
321
- ],
322
- "eq_spans": [],
323
- "section": "Classifying presuppositions",
324
- "sec_num": null
325
- },
326
- {
327
- "text": "In this section, we report summary statistics for the human ratings of our test questions and the measures we computed based on these ratings to evaluate DP's performance.",
328
- "cite_spans": [],
329
- "ref_spans": [],
330
- "eq_spans": [],
331
- "section": "Evaluation",
332
- "sec_num": "2"
333
- },
334
- {
335
- "text": "We used human ratings as the standard against which to evaluate the performance of DP. Three raters rated about 90 questions from 12 questionnaires provided by the Census Bureau. DP currently does not use context. To have a fair test of its performance, the questions were presented to the human raters out of context, and they were instructed to rate them as isolated questions. Ratings were made on a four-point scale, indicating whether the question contained no presupposition (1), probably contained no presupposition (2), probably contained a presupposition (3), or definitely contained a presupposition (4). We transformed the ratings into Boolean ratings by combining ratings of 1 and 2 (\"no problem\") versus ratings of 3 and 4 (\"problem\"). We obtained very similar results for analyses of the ratings based on the four-point and the Boolean scale. For simplicity, we just report the results for the Boolean scale.",
336
- "cite_spans": [],
337
- "ref_spans": [],
338
- "eq_spans": [],
339
- "section": "Human ratings",
340
- "sec_num": "2.1"
341
- },
342
- {
343
- "text": "We evaluated the agreement among the raters with three measures: correlations, Cohen's kappa, and percent agreement. Correlations were significant only between two raters (r = 0. 41); the correlations of these two with the third rater produced non-significant correlations, indicating that the third rater may have used a different strategy.",
344
- "cite_spans": [],
345
- "ref_spans": [],
346
- "eq_spans": [],
347
- "section": "Agreement among the raters",
348
- "sec_num": "2.2"
349
- },
350
- {
351
- "text": "The kappa scores, similarly, were significant only for two raters (_k_ = 0.36). In terms of percent agreement, the raters with correlated ratings agreed in 67% of the cases. The percentages of agreement with rater 3 were 57% and 56%, respectively. DP ratings were significantly correlated with the ratings provided by the two human raters who agreed well (_r = 0.32 and 0.31), resulting in agreement of ratings in 63% and 66% of the questions. In other words, the agreement of ratings provided by the system and by two human raters is comparable to the highest agreement rate achieved between the human raters. Some of the human ratings diverged substantially.",
352
- "cite_spans": [],
353
- "ref_spans": [],
354
- "eq_spans": [],
355
- "section": "Agreement among the raters",
356
- "sec_num": "2.2"
357
- },
358
- {
359
- "text": "Therefore, we computed two restrictive measures based on the ratings to evaluate the performance of DP. Both scores are Boolean. The first score is \"lenient\"; it reports a presupposition only if at least two raters report a presupposition for the question (rating of 3 or 4). We call this measure P~j, a majority-based presupposition count. The second score is strict. It reports a presupposition only if all three raters report a presupposition. This measure is called Pcomp, a presupposition count based on complete agreement.",
360
- "cite_spans": [],
361
- "ref_spans": [],
362
- "eq_spans": [],
363
- "section": "Agreement among the raters",
364
- "sec_num": "2.2"
365
- },
366
- {
367
- "text": "It results in fewer detected presuppositions overall: Pcomp reports presuppositions for 29 of the questions (33%), whereas P~j reports 57 (64%).",
368
- "cite_spans": [],
369
- "ref_spans": [],
370
- "eq_spans": [],
371
- "section": "Agreement among the raters",
372
- "sec_num": "2.2"
373
- },
374
- {
375
- "text": "Evaluation of the DP DP ratings were significantly correlated only with Pcomp (0.35). DP and P~o~ ratings were in agreement for 67% of the questions. Table 3 lists hit and false alarm rates for DP, separately for P~j and P~omp. The hit rate indicates how many of the presuppositions identified by the human ratings were detected by DP.",
376
- "cite_spans": [],
377
- "ref_spans": [
378
- {
379
- "start": 150,
380
- "end": 157,
381
- "text": "Table 3",
382
- "ref_id": "TABREF2"
383
- }
384
- ],
385
- "eq_spans": [],
386
- "section": "2.3",
387
- "sec_num": null
388
- },
389
- {
390
- "text": "The false alarm rate indicates how often DP reported a presupposition when the human raters did not. The measures look better with respect to the complete agreement criterion, P~omp- Table 3 further lists recall and precision scores. The recall rate indicates how many presuppositions DP detects out of the presuppositions reported by the human rating criterion (computed as hits, divided by the sum of hits and misses). The precision score (computed as hits, divided by the sum of hits and false alarms) measures how many presuppositions reported by DP are actually present, as reported by the human ratings. ",
391
- "cite_spans": [],
392
- "ref_spans": [
393
- {
394
- "start": 183,
395
- "end": 190,
396
- "text": "Table 3",
397
- "ref_id": "TABREF2"
398
- }
399
- ],
400
- "eq_spans": [],
401
- "section": "2.3",
402
- "sec_num": null
403
- },
404
- {
405
- "text": "Based on the first results, we made a few modifications and then reevaluated DP. In particular, we added items to the possession exception list based on the new corpus and made some of the no-presupposition rules more specific. As a more drastic change, we updated the decision tree structure so that presupposition indicators overrule indicators against presuppositions, increasing the number of reported presuppositions for cases of conflicting indicators:",
406
- "cite_spans": [],
407
- "ref_spans": [],
408
- "eq_spans": [],
409
- "section": "An updated version of DP",
410
- "sec_num": "3"
411
- },
412
- {
413
- "text": "If there is evidence for a problem, report \"Problem\" Else if evidence against problem, report \"No problem\" else, report \"Probably not a problem\" Separate analyses show that the modification of the decision tree accounts for most of the performance improvement. Table 4 lists the performance measures for the updated DP. Hit and recall rate increased, but so did the false alarm rate, resulting in a lower precision score. The d' score of the updated system with respect to Pcomp (1.3) is substantially better. The recall rate for this setting is perfect, i.e., DP did not miss any presuppositions. Since survey methodologists will decide whether the presupposition is really a problem, a higher false alarm rate is preferable to missing out presupposition cases. Thus, the updated DP is an improvement over the first version. Conclusion DP can detect presuppositions, and can thereby reliably help a survey methodologist to eliminate incorrect presuppositions. The results for DP with respect to Pco~p are comparable to, and in some cases even better than, the results for the other five categories. This is a very good result, since most of the five problems allow for \"easy\" and \"elegant\" solutions, whereas DP needs to be adjusted to a variety of problems. It is interesting that the performance of DP looks so much better when compared to the complete agreement score, Pcomp than when compared to P~j.",
414
- "cite_spans": [],
415
- "ref_spans": [
416
- {
417
- "start": 261,
418
- "end": 268,
419
- "text": "Table 4",
420
- "ref_id": "TABREF3"
421
- }
422
- ],
423
- "eq_spans": [],
424
- "section": "An updated version of DP",
425
- "sec_num": "3"
426
- },
427
- {
428
- "text": "Recall that Pcomp only reports a presupposition if all the raters report one. The high agreement of the raters in these cases can presumably be explained by the salience of the presupposition problem. This indicates that DP makes use of reliable indicators for its performance. Good agreement with the other measure, Pmaj, would suggest that DP additionally reports presuppositions in cases where humans do not agree that a presupposition is present. The higher agreement with the stricter measure is thus a good result. DP currently works like the other modules of QUA]D: it reports potential problems, but leaves it to the survey methodologist to decide whether to act upon the feedback. As such, DP is a substantial addition to QUA]D.",
429
- "cite_spans": [],
430
- "ref_spans": [],
431
- "eq_spans": [],
432
- "section": "Results",
433
- "sec_num": "3.1"
434
- },
435
- {
436
- "text": "A future challenge is to turn DP into a DIP (detector of incorrect presuppositions), that is, to reduce the number of reported presuppositions to those likely to be incorrect. DP currently evaluates all questions independent of context, resulting in frequent detections. For example, 20 questions about \"this person\" may follow one question that establishes the referent.",
437
- "cite_spans": [],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": "Results",
441
- "sec_num": "3.1"
442
- },
443
- {
444
- "text": "High-frequency repetitive presupposition reports could easily get annoying. Is a DIP system feasible? At present, it is difficult for NLP systems to use information from context in the evaluation of a statement. What is required to solve this problem is a mechanism that determines whether a presupposed entity (an object, an activity, an assertion, etc.) has been established as applicable in the previous discourse (e.g., in preceding questions). The Construction Integration (CI) model by Kintsch (1998) provides a good example for how such reference ambiguity can be resolved. CI uses a semantic network that represents an entity in the discourse focus (such as \"this person\") through higher activations of its links to other concept nodes. Perhaps models such as the CI model can be integrated into the QUAID model to perform context analyses, in combination with tools like Latent Semantic Analysis (LSA, Landauer & Dumais, 1997) , which represents text units as vectors in a high-dimensional semantic space. LSA measures the semantic similarity of text units (such as questions) by computing vector cosines. This feature may make LSA a useful tool in the detection of a previous question that establishes a presupposed entity in a later question. However, questionnaires differ from connected discourse, such as coherent stories, in aspects that make the present problem rather more difficult. Most importantly, the referent for \"this person\" may have been established in question number 1, and the current question containing the presupposition \"this person\" is question number 52. A DIP system would have to handle a flexible amount of context, because the distance between questions establishing the correctness of a presupposition and a question building up on it can vary. On the one hand, one could limit the considered context to, say, three questions and risk missing the critical question. On the other hand, it is computationally expensive to keep the complete previous context in the systems \"working memory\" to evaluate the few presuppositions which may refer back over a large number of questions. Solving this problem will likely require comparing a variety of different settings.",
445
- "cite_spans": [
446
- {
447
- "start": 492,
448
- "end": 506,
449
- "text": "Kintsch (1998)",
450
- "ref_id": "BIBREF10"
451
- },
452
- {
453
- "start": 911,
454
- "end": 935,
455
- "text": "Landauer & Dumais, 1997)",
456
- "ref_id": "BIBREF11"
457
- }
458
- ],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "Results",
462
- "sec_num": "3.1"
463
- }
464
- ],
465
- "back_matter": [
466
- {
467
- "text": "This work was partially supported by the Census Bureau (43-YA-BC-802930) and by a grant from the National Science Foundation (SBR 9720314 and SBR 9977969). We wish to acknowledge three colleagues for rating the questions in our evaluation text corpus, and our collaborator Susan Goldman as well as two anonymous reviewers for helpful comments.",
468
- "cite_spans": [],
469
- "ref_spans": [],
470
- "eq_spans": [],
471
- "section": "Acknowledgements",
472
- "sec_num": null
473
- }
474
- ],
475
- "bib_entries": {
476
- "BIBREF0": {
477
- "ref_id": "b0",
478
- "title": "Partial parsing via finite-state cascades",
479
- "authors": [
480
- {
481
- "first": "S",
482
- "middle": [],
483
- "last": "Abney",
484
- "suffix": ""
485
- }
486
- ],
487
- "year": 1996,
488
- "venue": "Proceedings of the ESSLLI '96 Robust Parsing Workshop",
489
- "volume": "",
490
- "issue": "",
491
- "pages": "",
492
- "other_ids": {},
493
- "num": null,
494
- "urls": [],
495
- "raw_text": "Abney, S. (1996a). Partial parsing via finite-state cascades. In Proceedings of the ESSLLI '96 Robust Parsing Workshop.",
496
- "links": null
497
- },
498
- "BIBREF1": {
499
- "ref_id": "b1",
500
- "title": "Methods and statistical linguistics",
501
- "authors": [
502
- {
503
- "first": "S",
504
- "middle": [],
505
- "last": "Abney",
506
- "suffix": ""
507
- }
508
- ],
509
- "year": 1996,
510
- "venue": "The Balancing Act",
511
- "volume": "",
512
- "issue": "",
513
- "pages": "",
514
- "other_ids": {},
515
- "num": null,
516
- "urls": [],
517
- "raw_text": "Abney, S. (1996b). Methods and statistical linguistics. In J. Klavans & P. Resnik (Eds.), The Balancing Act. Cambridge, MA: MIT Press",
518
- "links": null
519
- },
520
- "BIBREF2": {
521
- "ref_id": "b2",
522
- "title": "A simple rule-based part of speech tagger",
523
- "authors": [
524
- {
525
- "first": "E",
526
- "middle": [],
527
- "last": "Brill",
528
- "suffix": ""
529
- }
530
- ],
531
- "year": 1992,
532
- "venue": "Proceedings of the Third Conference on Applied Natural Language Processing. ACL",
533
- "volume": "",
534
- "issue": "",
535
- "pages": "",
536
- "other_ids": {},
537
- "num": null,
538
- "urls": [],
539
- "raw_text": "Brill, E. (1992). A simple rule-based part of speech tagger. In Proceedings of the Third Conference on Applied Natural Language Processing. ACL.",
540
- "links": null
541
- },
542
- "BIBREF3": {
543
- "ref_id": "b3",
544
- "title": "A corpus-based approach to language learning",
545
- "authors": [
546
- {
547
- "first": "E",
548
- "middle": [],
549
- "last": "Brill",
550
- "suffix": ""
551
- }
552
- ],
553
- "year": 1993,
554
- "venue": "",
555
- "volume": "",
556
- "issue": "",
557
- "pages": "",
558
- "other_ids": {},
559
- "num": null,
560
- "urls": [],
561
- "raw_text": "Brill, E. (1993). A corpus-based approach to language learning. Ph.D. thesis, University of Pennsylvania, Philadelphia, PA.",
562
- "links": null
563
- },
564
- "BIBREF4": {
565
- "ref_id": "b4",
566
- "title": "Some advances in rule-based part of speech tagging",
567
- "authors": [
568
- {
569
- "first": "E",
570
- "middle": [],
571
- "last": "Brill",
572
- "suffix": ""
573
- }
574
- ],
575
- "year": 1994,
576
- "venue": "Proceedings of the Twelfth National Conference on Articial Intelligence",
577
- "volume": "",
578
- "issue": "",
579
- "pages": "",
580
- "other_ids": {},
581
- "num": null,
582
- "urls": [],
583
- "raw_text": "Brill, E. (1994). Some advances in rule-based part of speech tagging. In Proceedings of the Twelfth National Conference on Articial Intelligence. AAAI Press.",
584
- "links": null
585
- },
586
- "BIBREF5": {
587
- "ref_id": "b5",
588
- "title": "Computational psycholinguistics. AI and connectionist models of human language processing",
589
- "authors": [
590
- {
591
- "first": "T",
592
- "middle": [],
593
- "last": "Dijkstra",
594
- "suffix": ""
595
- },
596
- {
597
- "first": "K",
598
- "middle": [],
599
- "last": "De Smedt",
600
- "suffix": ""
601
- }
602
- ],
603
- "year": 1996,
604
- "venue": "",
605
- "volume": "",
606
- "issue": "",
607
- "pages": "",
608
- "other_ids": {},
609
- "num": null,
610
- "urls": [],
611
- "raw_text": "Dijkstra, T., & de Smedt, K. (1996). Computational psycholinguistics. AI and connectionist models of human language processing. London: Taylor & Francis.",
612
- "links": null
613
- },
614
- "BIBREF6": {
615
- "ref_id": "b6",
616
- "title": "Integrating questionnaire design with a cognitive computational model of human question answering",
617
- "authors": [
618
- {
619
- "first": "A",
620
- "middle": [
621
- "C"
622
- ],
623
- "last": "Graesser",
624
- "suffix": ""
625
- },
626
- {
627
- "first": "S",
628
- "middle": [],
629
- "last": "Bommareddy",
630
- "suffix": ""
631
- },
632
- {
633
- "first": "S",
634
- "middle": [],
635
- "last": "Swamer",
636
- "suffix": ""
637
- },
638
- {
639
- "first": "J",
640
- "middle": [],
641
- "last": "Golding",
642
- "suffix": ""
643
- }
644
- ],
645
- "year": 1996,
646
- "venue": "Answering questions: Methods of determining cognitive and communicative processes in survey research",
647
- "volume": "",
648
- "issue": "",
649
- "pages": "343--175",
650
- "other_ids": {},
651
- "num": null,
652
- "urls": [],
653
- "raw_text": "Graesser, A. C., Bommareddy, S., Swamer, S., & Golding, J. (1996). Integrating questionnaire design with a cognitive computational model of human question answering. In N. Schwarz & S. Sudman (Eds.), Answering questions: Methods of determining cognitive and communicative processes in survey research (pp. 343-175). San Francisco, CA: Jossey-Bass.",
654
- "links": null
655
- },
656
- "BIBREF7": {
657
- "ref_id": "b7",
658
- "title": "QUEST: A cognitive model of question answering",
659
- "authors": [
660
- {
661
- "first": "A",
662
- "middle": [
663
- "C"
664
- ],
665
- "last": "Graesser",
666
- "suffix": ""
667
- },
668
- {
669
- "first": "S",
670
- "middle": [
671
- "P"
672
- ],
673
- "last": "Franklin",
674
- "suffix": ""
675
- }
676
- ],
677
- "year": 1990,
678
- "venue": "Discourse Processes",
679
- "volume": "13",
680
- "issue": "",
681
- "pages": "279--304",
682
- "other_ids": {},
683
- "num": null,
684
- "urls": [],
685
- "raw_text": "Graesser, A.C., & Franklin, S.P. (1990). QUEST: A cognitive model of question answering. Discourse Processes, 13, 279-304.",
686
- "links": null
687
- },
688
- "BIBREF8": {
689
- "ref_id": "b8",
690
- "title": "The use of computational cognitive models to improve questions on surveys and questionnaires",
691
- "authors": [
692
- {
693
- "first": "A",
694
- "middle": [
695
- "C"
696
- ],
697
- "last": "Graesser",
698
- "suffix": ""
699
- },
700
- {
701
- "first": "T",
702
- "middle": [],
703
- "last": "Kennedy",
704
- "suffix": ""
705
- },
706
- {
707
- "first": "P",
708
- "middle": [],
709
- "last": "Wiemer-Hastings",
710
- "suffix": ""
711
- },
712
- {
713
- "first": "V",
714
- "middle": [],
715
- "last": "Ottati",
716
- "suffix": ""
717
- }
718
- ],
719
- "year": 1999,
720
- "venue": "Cognition and Survey Research",
721
- "volume": "",
722
- "issue": "",
723
- "pages": "199--216",
724
- "other_ids": {},
725
- "num": null,
726
- "urls": [],
727
- "raw_text": "Graesser, A.C., Kennedy, T., Wiemer-Hastings, P., & Ottati, V. (1999). The use of computational cognitive models to improve questions on surveys and questionnaires. In M. Sirken, D. Herrrnann, S. Schechter, N. Schwarz, J. Tanur, & R. Tourangeau (Eds.), Cognition and Survey Research (pp. 199- 216). New York: John Wiley & Sons.",
728
- "links": null
729
- },
730
- "BIBREF9": {
731
- "ref_id": "b9",
732
- "title": "QUAID: A questionnaire evaluation aid for survey methodologists",
733
- "authors": [
734
- {
735
- "first": "A",
736
- "middle": [
737
- "C"
738
- ],
739
- "last": "Graesser",
740
- "suffix": ""
741
- },
742
- {
743
- "first": "K",
744
- "middle": [],
745
- "last": "Wiemer-Hastings",
746
- "suffix": ""
747
- },
748
- {
749
- "first": "R",
750
- "middle": [],
751
- "last": "Kreuz",
752
- "suffix": ""
753
- },
754
- {
755
- "first": "P",
756
- "middle": [],
757
- "last": "Wiemer-Hastings",
758
- "suffix": ""
759
- },
760
- {
761
- "first": "K",
762
- "middle": [],
763
- "last": "Marquis",
764
- "suffix": ""
765
- }
766
- ],
767
- "year": null,
768
- "venue": "Behavior Research Methods, Instruments, & Computers",
769
- "volume": "",
770
- "issue": "",
771
- "pages": "",
772
- "other_ids": {},
773
- "num": null,
774
- "urls": [],
775
- "raw_text": "Graesser, A.C., Wiemer-Hastings, K., Kreuz, R., Wiemer-Hastings, P., & Marquis, K. (in press). QUAID: A questionnaire evaluation aid for survey methodologists. Behavior Research Methods, Instruments, & Computers.",
776
- "links": null
777
- },
778
- "BIBREF10": {
779
- "ref_id": "b10",
780
- "title": "Comprehension. A paradigm for cognition",
781
- "authors": [
782
- {
783
- "first": "W",
784
- "middle": [],
785
- "last": "Kintsch",
786
- "suffix": ""
787
- }
788
- ],
789
- "year": 1998,
790
- "venue": "",
791
- "volume": "",
792
- "issue": "",
793
- "pages": "",
794
- "other_ids": {},
795
- "num": null,
796
- "urls": [],
797
- "raw_text": "Kintsch, W. (1998). Comprehension. A paradigm for cognition. Cambridge, UK: Cambridge University Press.",
798
- "links": null
799
- },
800
- "BIBREF11": {
801
- "ref_id": "b11",
802
- "title": "A solution to Plato's problem: The latent semantic analysis theory of acquisition, induction, and representation of knowledge",
803
- "authors": [
804
- {
805
- "first": "T",
806
- "middle": [
807
- "K"
808
- ],
809
- "last": "Landauer",
810
- "suffix": ""
811
- },
812
- {
813
- "first": "S",
814
- "middle": [
815
- "T"
816
- ],
817
- "last": "Dumais",
818
- "suffix": ""
819
- }
820
- ],
821
- "year": 1997,
822
- "venue": "Psychological Review",
823
- "volume": "104",
824
- "issue": "",
825
- "pages": "211--240",
826
- "other_ids": {},
827
- "num": null,
828
- "urls": [],
829
- "raw_text": "Landauer, T.K., & Dumais, S.T. (1997). A solution to Plato's problem: The latent semantic analysis theory of acquisition, induction, and representation of knowledge. Psychological Review, 104, 211-240.",
830
- "links": null
831
- },
832
- "BIBREF12": {
833
- "ref_id": "b12",
834
- "title": "Everything that linguists have always wanted to know about logic",
835
- "authors": [
836
- {
837
- "first": "J",
838
- "middle": [
839
- "D"
840
- ],
841
- "last": "Mccawley",
842
- "suffix": ""
843
- }
844
- ],
845
- "year": 1981,
846
- "venue": "",
847
- "volume": "",
848
- "issue": "",
849
- "pages": "",
850
- "other_ids": {},
851
- "num": null,
852
- "urls": [],
853
- "raw_text": "McCawley, J.D. (1981). Everything that linguists have always wanted to know about logic. Chicago: University of Chicago Press.",
854
- "links": null
855
- }
856
- },
857
- "ref_entries": {
858
- "FIGREF0": {
859
- "uris": null,
860
- "num": null,
861
- "type_str": "figure",
862
- "text": "The DP decision structure tree 1.2"
863
- },
864
- "TABREF0": {
865
- "num": null,
866
- "content": "<table><tr><td/><td/><td>of</td></tr><tr><td/><td colspan=\"2\">Presupposition No presupposition</td></tr><tr><td>First word(s)</td><td>When VP</td><td>Initial or following</td></tr><tr><td/><td>What time</td><td>comma:</td></tr><tr><td/><td>Who VP</td><td>-is there</td></tr><tr><td/><td>Why</td><td>-are there</td></tr><tr><td/><td>How much</td><td/></tr><tr><td/><td>How many</td><td>Does / do NP have ...</td></tr><tr><td/><td colspan=\"2\">How often etc. Will NP have ...</td></tr><tr><td/><td>How VP</td><td>Has / Have NP ...</td></tr><tr><td/><td>Where V NP</td><td>Is / are NP ...</td></tr><tr><td>Keywords</td><td>usually</td><td>ever</td></tr><tr><td/><td>Possessives:</td><td>any</td></tr><tr><td/><td colspan=\"2\">mine, yours, anybody</td></tr><tr><td/><td>NP's</td><td>anything</td></tr><tr><td/><td>while</td><td>whether</td></tr><tr><td/><td>Indexicals:</td><td>if</td></tr><tr><td/><td colspan=\"2\">this, these, such could, would</td></tr><tr><td>Specific</td><td>V infinitive</td><td/></tr><tr><td>constructions</td><td>when NP</td><td/></tr></table>",
867
- "html": null,
868
- "type_str": "table",
869
- "text": ""
870
- },
871
- "TABREF1": {
872
- "num": null,
873
- "content": "<table/>",
874
- "html": null,
875
- "type_str": "table",
876
- "text": ""
877
- },
878
- "TABREF2": {
879
- "num": null,
880
- "content": "<table><tr><td/><td>Hit rate</td><td>False alarm rate</td><td>Recall</td><td>Precision</td><td>d'</td></tr><tr><td>P~j</td><td>0.54</td><td>0.34</td><td>0.66</td><td>0,74</td><td>0.50</td></tr><tr><td>Pcomo</td><td>0.72</td><td>0.35</td><td>0.72</td><td>0,50</td><td>0.95</td></tr><tr><td colspan=\"3\">All measures, except for precision, look</td><td/><td/><td/></tr><tr><td colspan=\"3\">comparable or better in relation to Pco~,,</td><td/><td/><td/></tr><tr><td colspan=\"3\">including d', which measures the actual power of</td><td/><td/><td/></tr><tr><td colspan=\"3\">DP to discriminate questions with and without</td><td/><td/><td/></tr><tr><td colspan=\"3\">presuppositions. Of course, picking a criterion</td><td/><td/><td/></tr><tr><td colspan=\"3\">with better matches does not improve the</td><td/><td/><td/></tr><tr><td colspan=\"2\">system's performance in itself.</td><td/><td/><td/><td/></tr></table>",
881
- "html": null,
882
- "type_str": "table",
883
- "text": ""
884
- },
885
- "TABREF3": {
886
- "num": null,
887
- "content": "<table><tr><td/><td>Hit rate</td><td>False alarm rate</td><td>Recall</td><td>Precision</td><td>d'</td></tr><tr><td>Pmai</td><td>0.75</td><td>0.44</td><td>0.84</td><td>0.75</td><td>0.8</td></tr><tr><td>P~o,~p</td><td>0.90</td><td>0.52</td><td>1.00</td><td>0.46</td><td>1.3</td></tr></table>",
888
- "html": null,
889
- "type_str": "table",
890
- "text": ""
891
- }
892
- }
893
- }
894
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1014.json DELETED
@@ -1,1542 +0,0 @@
1
- {
2
- "paper_id": "A00-1014",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:11:51.622924Z"
6
- },
7
- "title": "MIMIC: An Adaptive Mixed Initiative Spoken Dialogue System for Information Queries",
8
- "authors": [
9
- {
10
- "first": "Jennifer",
11
- "middle": [],
12
- "last": "Chu-Carroll",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Lucent Technologies Bell Laboratories",
17
- "location": {
18
- "addrLine": "600 Mountain Avenue Murray Hill",
19
- "postCode": "07974",
20
- "region": "NJ",
21
- "country": "U.S.A"
22
- }
23
- },
24
- "email": "[email protected]"
25
- }
26
- ],
27
- "year": "",
28
- "venue": null,
29
- "identifiers": {},
30
- "abstract": "This paper describes MIMIC, an adaptive mixed initiative spoken dialogue system that provides movie showtime information. MIMIC improves upon previous dialogue systems in two respects. First, it employs initiative-oriented strategy adaptation to automatically adapt response generation strategies based on the cumulative effect of information dynamically extracted from user utterances during the dialogue. Second, MIMIC's dialogue management architecture decouples its initiative module from the goal and response strategy selection processes, providing a general framework for developing spoken dialogue systems with different adaptation behavior.",
31
- "pdf_parse": {
32
- "paper_id": "A00-1014",
33
- "_pdf_hash": "",
34
- "abstract": [
35
- {
36
- "text": "This paper describes MIMIC, an adaptive mixed initiative spoken dialogue system that provides movie showtime information. MIMIC improves upon previous dialogue systems in two respects. First, it employs initiative-oriented strategy adaptation to automatically adapt response generation strategies based on the cumulative effect of information dynamically extracted from user utterances during the dialogue. Second, MIMIC's dialogue management architecture decouples its initiative module from the goal and response strategy selection processes, providing a general framework for developing spoken dialogue systems with different adaptation behavior.",
37
- "cite_spans": [],
38
- "ref_spans": [],
39
- "eq_spans": [],
40
- "section": "Abstract",
41
- "sec_num": null
42
- }
43
- ],
44
- "body_text": [
45
- {
46
- "text": "In recent years, speech and natural language technologies have matured enough to enable the development of spoken dialogue systems in limited domains. Most existing systems employ dialogue strategies pre-specified during the design phase of the dialogue manager without taking into account characteristics of actual dialogue interactions. More specifically, mixed initiative systems typically employ rules that specify conditions (generally based on local dialogue context) under which initiative may shift from one agent to the other. Previous research, on the other hand, has shown that changes in initiative strategies in human-human dialogues can be dynamically modeled in terms of characteristics of the user and of the on-going dialogue (Chu-Carroll and Brown, 1998) and that adaptability of initiative strategies in dialogue systems leads to better system performance (Litman and Pan, 1999) . However, no previous dialogue system takes into account these dialogue characteristics or allows for initiative-oriented adaptation of dialogue strategies.",
47
- "cite_spans": [
48
- {
49
- "start": 743,
50
- "end": 772,
51
- "text": "(Chu-Carroll and Brown, 1998)",
52
- "ref_id": "BIBREF1"
53
- },
54
- {
55
- "start": 875,
56
- "end": 897,
57
- "text": "(Litman and Pan, 1999)",
58
- "ref_id": "BIBREF6"
59
- }
60
- ],
61
- "ref_spans": [],
62
- "eq_spans": [],
63
- "section": "Introduction",
64
- "sec_num": "1"
65
- },
66
- {
67
- "text": "In this paper, we describe MIMIC, a voice-enabled telephone-based dialogue system that provides movie showtime information, emphasizing its dialogue management aspects. MIMIC improves upon previous systems along two dimensions. First, MIMIC automatically adapts dialogue strategies based on participant roles, characteristics of the current utterance, and dialogue history. This automatic adaptation allows appropriate dialogue strategies to be employed based on both local dialogue context and dialogue history, and has been shown to result in significantly better performance than non-adaptive systems. Second, MIMIC employs an initiative module that is decoupled from the goal selection process in the dialogue manager, while allowing the outcome of both components to jointly determine the strategies chosen for response generation. As a result, MIMIC can exhibit drastically different dialogue behavior with very minor adjustments to parameters in the initiative module, allowing for rapid development and comparison of experimental prototypes and resulting in general and portable dialogue systems.",
68
- "cite_spans": [],
69
- "ref_spans": [],
70
- "eq_spans": [],
71
- "section": "Introduction",
72
- "sec_num": "1"
73
- },
74
- {
75
- "text": "In naturally occurring human-human dialogues, speakers often adopt different dialogue strategies based on hearer characteristics, dialogue history, etc. For instance, the speaker may provide more guidance if the hearer is having difficulty making progress toward task completion, while taking a more passive approach when the hearer is an expert in the domain. Our main goal is to enable a spoken dialogue system to simulate such human behavior by dynamically adapting dialogue strategies during an interaction based on information that can be automatically detected from the dialogue. Figure 1 shows an excerpt from a dialogue between MIMIC and an actual user where the user is attempting to find the times at which the movie Analyze This playing at theaters in Montclair. S and U indicate system and user utterances, respectively, and the italicized utterances are the output of our automatic speech recognizer. In addition, each system turn is annotated with its task and dialogue initiative holders, where task initiative tracks the lead in the process toward achieving the dialogue participants' domain goal, while dialogue initiative models the lead in determining the current discourse focus (Chu-Carroll and Brown, 1998) . In our information query application domain, the system has task (and thus dialogue) initiative if its utterances provide helpful guidance toward achieving the user's domain goal, as in utterances (6) and 7where MIMIC provided valid response choices to its query intending to solicit a theater name, while the system has dialogue but not task initiative if its utterances only specify the current discourse goal, as in utterance (4). i This dialogue illustrates several features of our adaptive mixed initiative dialogue manager. First, MIMIC automatically adapted the initiative distribution based on information extracted from user utterances and dialogue history. More specifically, MIMIC took over task initiative in utterance (6), after failing to obtain a valid answer to its query soliciting a missing theater name in (4). It retained task initiative until utterance (12), after the user implicitly indicated her intention to take over task initiative by providing a fully-specified query (utterance (11)) to a limited prompt (utterance (10)). Second, initiative distribution may affect the strategies MIMIC selects to achieve its goals. For instance, in the context of soliciting missing information, when MIMIC did not have task initiative, a simple information-seeking query was generated (utterance (4)). On the other hand, when MIMIC had task initiative, additional guidance was provided (in the form of valid response choices in utterance (6)), which helped the user successfully respond to the system's query. In the context of prompting the user for a new query, when MIMIC had task initiative, a limited prompt was selected to better constrain the user's response (utterance 10), while an open-ended prompt was generated to allow the user to take control of the problem-solving process otherwise (utterances (1) and (13)).",
76
- "cite_spans": [
77
- {
78
- "start": 1199,
79
- "end": 1228,
80
- "text": "(Chu-Carroll and Brown, 1998)",
81
- "ref_id": "BIBREF1"
82
- }
83
- ],
84
- "ref_spans": [
85
- {
86
- "start": 586,
87
- "end": 594,
88
- "text": "Figure 1",
89
- "ref_id": null
90
- }
91
- ],
92
- "eq_spans": [],
93
- "section": "Motivation",
94
- "sec_num": "2.1"
95
- },
96
- {
97
- "text": "In the next section, we briefly review a framework for dynamic initiative modeling. In Section 3, we discuss how this framework was incorporated into the dialogue management component of a spoken dialogue system to allow for automatic adaptation of dialogue strategies. Finally, we outline experiments evaluating the resulting system and show that MIMIC's automatic adaptation capabilities resulted in better system performance.",
98
- "cite_spans": [],
99
- "ref_spans": [],
100
- "eq_spans": [],
101
- "section": "Motivation",
102
- "sec_num": "2.1"
103
- },
104
- {
105
- "text": "In previous work, we proposed a framework for modeling initiative during dialogue interaction (Chu-Carroll and Brown, 1998 ). This framework predicts task and dialogue initiative holders on a turn-by-turn basis in humanhuman dialogues based on participant roles (such as each dialogue agent's level of expertise and the role that she plays in the application domain), cues observed in the current dialogue turn, and dialogue history. More specifically, we utilize the Dempster-Shafer theory (Shafer, 1976; Gordon and Shortliffe, 1984) , and represent the current initiative distribution as two basic probability assignments (bpas) which indicate the amount of support for each dialogue participant having the task and dialogue initiatives. For instance, the bpa mt-cur({S}) =",
106
- "cite_spans": [
107
- {
108
- "start": 94,
109
- "end": 122,
110
- "text": "(Chu-Carroll and Brown, 1998",
111
- "ref_id": "BIBREF1"
112
- },
113
- {
114
- "start": 491,
115
- "end": 505,
116
- "text": "(Shafer, 1976;",
117
- "ref_id": "BIBREF16"
118
- },
119
- {
120
- "start": 506,
121
- "end": 534,
122
- "text": "Gordon and Shortliffe, 1984)",
123
- "ref_id": "BIBREF4"
124
- }
125
- ],
126
- "ref_spans": [],
127
- "eq_spans": [],
128
- "section": "An Evidential Framework for Modeling Initiative",
129
- "sec_num": "2.2"
130
- },
131
- {
132
- "text": "l Although the dialogues we collected in our experiments (Section 5) include cases in which MIMIC has neither initiative, such cases are rare in this application domain, and will not be discussed further in this paper. 0.3, mt-c~,r({U}) = 0.7 indicates that, with all evidence taken into account, there is more support (to the degree 0.7) for the user having task initiative in the current turn than for the system. At the end of each turn, the bpas are updated based on the effects that cues observed during that turn have on changing them, and the new bpas are used to predict the next task and dialogue initiative holders.",
133
- "cite_spans": [],
134
- "ref_spans": [],
135
- "eq_spans": [],
136
- "section": "An Evidential Framework for Modeling Initiative",
137
- "sec_num": "2.2"
138
- },
139
- {
140
- "text": "In this framework, cues that affect initiative distribution include NoNewlnfo, triggered when the speaker simply repeats or rephrases an earlier utterance, implicitly suggesting that the speaker may want to give up initiative, AmbiguousActions, triggered when the speaker proposes an action that is ambiguous in the application domain, potentially prompting the hearer to take over initiative to resolve the detected ambiguity, etc. The effects that each cue has on changing the current bpas are also represented as bpas, which were determined by an iterative training procedure using a corpus of transcribed dialogues where each turn was annotated with the task/dialogue initiative holders and the observed cues. The bpas for the next turn are computed by combining the bpas representing the current initiative distribution and the bpas representing the effects of cues observed during the current turn, using Dempster's combination rule (Gordon and Shortliffe, 1984) . The task and dialogue initiative holders are then predicted based on the new bpas. This framework was evaluated using annotated dialogues from four task-oriented domains, achieving, on average, a correct prediction rate of 97% and 88% for task and dialogue initiative holders, respectively. In Section 3.2, we discuss how this predictive model is converted into a generative model by enabling the system to automatically detect cues that were previously labelled manually. We further discuss how the model is used by the dialogue manager for dynamic dialogue strategy adaptation.",
141
- "cite_spans": [
142
- {
143
- "start": 939,
144
- "end": 968,
145
- "text": "(Gordon and Shortliffe, 1984)",
146
- "ref_id": "BIBREF4"
147
- }
148
- ],
149
- "ref_spans": [],
150
- "eq_spans": [],
151
- "section": "An Evidential Framework for Modeling Initiative",
152
- "sec_num": "2.2"
153
- },
154
- {
155
- "text": "MIMIC is a telephone-based dialogue system that provides movie showtime information. It consists of the following main components, implemented on a distributed, client-server architecture (Zhou et al., 1997 ):",
156
- "cite_spans": [
157
- {
158
- "start": 188,
159
- "end": 206,
160
- "text": "(Zhou et al., 1997",
161
- "ref_id": "BIBREF23"
162
- }
163
- ],
164
- "ref_spans": [],
165
- "eq_spans": [],
166
- "section": "MIMIC: Mixed Initiative Movie Information Consultant",
167
- "sec_num": "3"
168
- },
169
- {
170
- "text": "1.",
171
- "cite_spans": [],
172
- "ref_spans": [],
173
- "eq_spans": [],
174
- "section": "MIMIC: Mixed Initiative Movie Information Consultant",
175
- "sec_num": "3"
176
- },
177
- {
178
- "text": ".",
179
- "cite_spans": [],
180
- "ref_spans": [],
181
- "eq_spans": [],
182
- "section": "MIMIC: Mixed Initiative Movie Information Consultant",
183
- "sec_num": "3"
184
- },
185
- {
186
- "text": "and hang-ups, and enables streaming of audio data on channels of a telephony board.",
187
- "cite_spans": [],
188
- "ref_spans": [],
189
- "eq_spans": [],
190
- "section": "Telephony server: this component detects rings",
191
- "sec_num": null
192
- },
193
- {
194
- "text": "Speech recognizer: the recognizer receives audio data from the telephony server and generates the word string hypothesis that best matches the audio input. We used the Lucent Automatic Speech Recognizer (Reichl and Chou, 1998; Ortmanns et al., 1999) , configured to use class-based probabilistic ngram language models to allow for rapid updates of movie/theater/town names.",
195
- "cite_spans": [
196
- {
197
- "start": 203,
198
- "end": 226,
199
- "text": "(Reichl and Chou, 1998;",
200
- "ref_id": "BIBREF13"
201
- },
202
- {
203
- "start": 227,
204
- "end": 249,
205
- "text": "Ortmanns et al., 1999)",
206
- "ref_id": "BIBREF10"
207
- }
208
- ],
209
- "ref_spans": [],
210
- "eq_spans": [],
211
- "section": "Telephony server: this component detects rings",
212
- "sec_num": null
213
- },
214
- {
215
- "text": "(1) S: Hello, this is MIMIC, the movie information system. 2How can I help you? 3 are carried out by this component: 1) semantic interpretation, which constructs frame-based semantic representations from user utterances, 2) dialogue management, where response strategies are selected based on the semantic representation of the user's utterance, system's domain knowledge, and initiative distribution, and 3) utterance generation, where utterance templates are chosen and instantiated to realize the selected response strategies. These three tasks will be discussed in further detail in the rest of this section.",
216
- "cite_spans": [],
217
- "ref_spans": [],
218
- "eq_spans": [],
219
- "section": "_ql:l Utterance",
220
- "sec_num": null
221
- },
222
- {
223
- "text": "4. Text-to-speech engine: the TTS system receives the word string comprising the system's response from the dialogue component and converts the text into speech for output over the telephone. We used the Bell Labs TTS system (Sproat, 1998) , which in addition to converting plain text into speech, accepts text strings annotated to override default pitch height, accent placement, speaking rate, etc. 2",
224
- "cite_spans": [
225
- {
226
- "start": 225,
227
- "end": 239,
228
- "text": "(Sproat, 1998)",
229
- "ref_id": null
230
- }
231
- ],
232
- "ref_spans": [],
233
- "eq_spans": [],
234
- "section": "_ql:l Utterance",
235
- "sec_num": null
236
- },
237
- {
238
- "text": "MIMIC utilizes a non-recursive frame-based semantic representation commonly used in spoken dialogue systems (e.g. (Seneff et al., 1991; Lamel, 1998) ), which represents an utterance as a set of attribute-value pairs. MIMIC's semantic representation is constructed by first extracting, for each attribute, a set of keywords from the user utterance. Using a vector-based topic identification process (Salton, 1971; Chu-Carroll and Carpenter, 1999) , these keywords are used to determine a set of likely values (including null) for that attribute. Next, the utterance is interpreted with respect to the dialogue history and the system's domain knowledge. This allows MIMIC to handle elliptical sentences and anaphoric references, as well as automatically infer missing values and detect inconsistencies in the current representation.",
239
- "cite_spans": [
240
- {
241
- "start": 114,
242
- "end": 135,
243
- "text": "(Seneff et al., 1991;",
244
- "ref_id": "BIBREF15"
245
- },
246
- {
247
- "start": 136,
248
- "end": 148,
249
- "text": "Lamel, 1998)",
250
- "ref_id": "BIBREF5"
251
- },
252
- {
253
- "start": 398,
254
- "end": 412,
255
- "text": "(Salton, 1971;",
256
- "ref_id": "BIBREF14"
257
- },
258
- {
259
- "start": 413,
260
- "end": 445,
261
- "text": "Chu-Carroll and Carpenter, 1999)",
262
- "ref_id": "BIBREF2"
263
- }
264
- ],
265
- "ref_spans": [],
266
- "eq_spans": [],
267
- "section": "Semantic Interpretation",
268
- "sec_num": "3.1"
269
- },
270
- {
271
- "text": "This semantic representation allows for decoupling of domain-dependent task specifications and domain-independent dialogue management strategies. Each query type is specified by a template indicating, for each attribute, whether a value must, must not, or can optionally be provided in order for a query to be considered well-formed. Figure 2(b) shows that to solicit movie showtime information (question type when), a movie name and a theater name must be provided, whereas a town may optionally be provided. These specifications are determined based on domain semantics, and must be reconstructed when porting the system to a new domain.",
272
- "cite_spans": [],
273
- "ref_spans": [
274
- {
275
- "start": 334,
276
- "end": 345,
277
- "text": "Figure 2(b)",
278
- "ref_id": "FIGREF0"
279
- }
280
- ],
281
- "eq_spans": [],
282
- "section": "Semantic Interpretation",
283
- "sec_num": "3.1"
284
- },
285
- {
286
- "text": "Given a semantic representation, the dialogue history and the system's domain knowledge, the dialogue manager selects a set of strategies that guides MIMIC's response generation process. This task is carried out by three subprocesses: 1) initiative modeling, which determines the initiative distribution for the system's dialogue turn, 2) goal selection, which identifies a goal that MIMIC's response attempts to achieve, and 3) strategy selection, which chooses, based on the initiative distribution, a set of dialogue acts that MIMIC will adopt in its attempt to realize the selected goal.",
287
- "cite_spans": [],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "Dialogue Management",
291
- "sec_num": "3.2"
292
- },
293
- {
294
- "text": "MIMIC's initiative module determines the task and dialogue initiative holders for each system turn in order to enable dynamic strategy adaptation. It automatically detects cues triggered during the current user turn, and combines the effects of these cues with the current initiative distribution to determine the initiative holders for the system's turn.",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "Initiative Modeling",
299
- "sec_num": "3.2.1"
300
- },
301
- {
302
- "text": "The cues and the bpas representing their effects are largely based on a subset of those described in (Chu-Carroll and Brown, 1998) , 3 as shown in Figures 3(a) and 3(b). Figure 3(a) shows that observation of TakeOverTask supports a task initiative shift to the speaker to the degree .35. The remaining support is assigned to O, the set of all possible conclusions (i.e., {speaker,hearer}), indicating that to the degree .65, observation of this cue does not commit to identifying which dialogue participant should have task initiative in the next dialogue turn.",
303
- "cite_spans": [
304
- {
305
- "start": 101,
306
- "end": 130,
307
- "text": "(Chu-Carroll and Brown, 1998)",
308
- "ref_id": "BIBREF1"
309
- }
310
- ],
311
- "ref_spans": [
312
- {
313
- "start": 147,
314
- "end": 159,
315
- "text": "Figures 3(a)",
316
- "ref_id": null
317
- },
318
- {
319
- "start": 170,
320
- "end": 181,
321
- "text": "Figure 3(a)",
322
- "ref_id": null
323
- }
324
- ],
325
- "eq_spans": [],
326
- "section": "Cue Detection",
327
- "sec_num": null
328
- },
329
- {
330
- "text": "The cues used in MIMIC are classified into two categories, discourse cues and analytical cues, based on the types of knowledge needed to detect them: I. Discourse cues, which can be detected by considering the semantic representation of the current utterance and dialogue history:",
331
- "cite_spans": [],
332
- "ref_spans": [],
333
- "eq_spans": [],
334
- "section": "Cue Detection",
335
- "sec_num": null
336
- },
337
- {
338
- "text": "\u2022 TakeOverTask, an implicit indication that the user wants to take control of the problemsolving process, triggered when the user provides more information than the discourse expectation.",
339
- "cite_spans": [],
340
- "ref_spans": [],
341
- "eq_spans": [],
342
- "section": "Cue Detection",
343
- "sec_num": null
344
- },
345
- {
346
- "text": "3We selected only those cues that can be automatically detected in a spoken dialogue system with speech recognition errors and limited semantic interpretation capabilities.",
347
- "cite_spans": [],
348
- "ref_spans": [],
349
- "eq_spans": [],
350
- "section": "Cue Detection",
351
- "sec_num": null
352
- },
353
- {
354
- "text": "\u2022 NoNewlnfo, an indication that the user is unable to make progress toward task completion, triggered when the semantic representations of two consecutive user turns are identical (a result of the user not knowing what to say or the speech recognizer failing to recognize the user utterances).",
355
- "cite_spans": [],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "Cue Detection",
359
- "sec_num": null
360
- },
361
- {
362
- "text": "2. Analytical cues, which can only be detected by taking into account MIMIC's domain knowledge:",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Cue Detection",
367
- "sec_num": null
368
- },
369
- {
370
- "text": "\u2022 lnvalidAction, an indication that the user made an invalid assumption about the domain, triggered when the system database lookup based on the user's query returns null.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Cue Detection",
375
- "sec_num": null
376
- },
377
- {
378
- "text": "\u2022 lnvalidActionResolved, triggered when the previous invalid assumption is corrected.",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Cue Detection",
383
- "sec_num": null
384
- },
385
- {
386
- "text": "\u2022 AmbiguousAction, an indication that the user query is not well-formed, triggered when a mandatory attribute is unspecified or when more than one value is specified for an attribute.",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Cue Detection",
391
- "sec_num": null
392
- },
393
- {
394
- "text": "\u2022 AmbiguousActionResolved, triggered when the attribute in question is uniquely instantiated.",
395
- "cite_spans": [],
396
- "ref_spans": [],
397
- "eq_spans": [],
398
- "section": "Cue Detection",
399
- "sec_num": null
400
- },
401
- {
402
- "text": "To determine the initiative distribution, the bpas representing the effects of cues detected in the current user utterance are instantiated (i.e., speaker~hearer in Figure 3 are instantiated as system~user accordingly). These effects are then interpreted with respect to the current initiative distribution by applying Dempster's combination rule (Gordon and Shortliffe, 1984) to the bpas representing the current initiative distribution and the instantiated bpas. This results in two new bpas representing the task and dialogue initiative distributions for the system's turn. The dialogue participant with the greater degree of support for having the task/dialogue initiative in these bpas is the task/dialogue initiative holder for the system's turn 4 (see Section 4 for an example).",
403
- "cite_spans": [
404
- {
405
- "start": 347,
406
- "end": 376,
407
- "text": "(Gordon and Shortliffe, 1984)",
408
- "ref_id": "BIBREF4"
409
- }
410
- ],
411
- "ref_spans": [
412
- {
413
- "start": 165,
414
- "end": 173,
415
- "text": "Figure 3",
416
- "ref_id": null
417
- }
418
- ],
419
- "eq_spans": [],
420
- "section": "Computing Initiative Distribution",
421
- "sec_num": null
422
- },
423
- {
424
- "text": "The goal selection module selects a goal that MIMIC attempts to achieve in its response by utilizing information from analytical cue detection as shown in Figure 4 . MIMIC's goals focus on two aspects of cooperative dialogue interaction: 1) initiating subdialogues to resolve anomalies that occur during the dialogue by attempting to instantiate an unspecified attribute, constraining an attribute for which multiple values have been specified, or correcting an invalid assumption in the case of invalid van Beeket al., 1993; Raskutti and Zukerman, 1993; Qu and Beale, 1999) , and 2) providing answers to well-formed queries (steps 9-11).",
425
- "cite_spans": [
426
- {
427
- "start": 504,
428
- "end": 525,
429
- "text": "van Beeket al., 1993;",
430
- "ref_id": null
431
- },
432
- {
433
- "start": 526,
434
- "end": 554,
435
- "text": "Raskutti and Zukerman, 1993;",
436
- "ref_id": "BIBREF12"
437
- },
438
- {
439
- "start": 555,
440
- "end": 574,
441
- "text": "Qu and Beale, 1999)",
442
- "ref_id": "BIBREF11"
443
- }
444
- ],
445
- "ref_spans": [
446
- {
447
- "start": 155,
448
- "end": 163,
449
- "text": "Figure 4",
450
- "ref_id": null
451
- }
452
- ],
453
- "eq_spans": [],
454
- "section": "Goal Selection",
455
- "sec_num": "3.2.2"
456
- },
457
- {
458
- "text": "Previous work has argued that initiative affects the degree of control an agent has in the dialogue interaction (Whittaker and Stenton, 1988; Walker and Whittaker, 1990; Chu-Carroll and Brown, 1998) . Thus, a cooperative system may adopt different strategies to achieve the same goal depending on the initiative distribution. Since task initiative models contribution to domain/problemsolving goals, while dialogue initiative affects the cur-5An alternative strategy to step (4) is to perform a database lookup based on the ambiguous query and summarize the results (Litman et al., 1998 ), which we leave for future work. rent discourse goal, we developed alternative strategies for achieving the goals in Figure 4 based on initiative distribution, as shown in Table 1 .",
459
- "cite_spans": [
460
- {
461
- "start": 112,
462
- "end": 141,
463
- "text": "(Whittaker and Stenton, 1988;",
464
- "ref_id": "BIBREF22"
465
- },
466
- {
467
- "start": 142,
468
- "end": 169,
469
- "text": "Walker and Whittaker, 1990;",
470
- "ref_id": "BIBREF20"
471
- },
472
- {
473
- "start": 170,
474
- "end": 198,
475
- "text": "Chu-Carroll and Brown, 1998)",
476
- "ref_id": "BIBREF1"
477
- },
478
- {
479
- "start": 566,
480
- "end": 586,
481
- "text": "(Litman et al., 1998",
482
- "ref_id": "BIBREF7"
483
- }
484
- ],
485
- "ref_spans": [
486
- {
487
- "start": 706,
488
- "end": 714,
489
- "text": "Figure 4",
490
- "ref_id": null
491
- },
492
- {
493
- "start": 761,
494
- "end": 768,
495
- "text": "Table 1",
496
- "ref_id": null
497
- }
498
- ],
499
- "eq_spans": [],
500
- "section": "Strategy Selection",
501
- "sec_num": "3.2.3"
502
- },
503
- {
504
- "text": "The strategies employed when MIMIC has only dialogue initiative are similar to the mixed initiative dialogue strategies employed by many existing spoken dialogue systems (e.g., (Bennacef et al., 1996; Stent et al., 1999) ). To instantiate an attribute, MIMIC adopts the lnfoSeek dialogue act to solicit the missing information. In contrast, when MIMIC has both initiatives, it plays a more active role by presenting the user with additional information comprising valid instantiations of the attribute (GiveOptions). Given an invalid query, MIMIC notifies the user of the failed query and provides an openended prompt when it only has dialogue initiative. When MIMIC has both initiatives, however, in addition to No-tifyFailure, it suggests an alternative close to the user's original query and provides a limited prompt. Finally, when MIMIC has neither initiative, it simply adopts No-tifyFailure, allowing the user to determine the next discourse goal.",
505
- "cite_spans": [
506
- {
507
- "start": 177,
508
- "end": 200,
509
- "text": "(Bennacef et al., 1996;",
510
- "ref_id": "BIBREF0"
511
- },
512
- {
513
- "start": 201,
514
- "end": 220,
515
- "text": "Stent et al., 1999)",
516
- "ref_id": "BIBREF18"
517
- }
518
- ],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "Strategy Selection",
522
- "sec_num": "3.2.3"
523
- },
524
- {
525
- "text": "MIMIC employs a simple template-driven utterance generation approach. Templates are associated with dialogue acts as shown in Table 2 .6 The generation component receives from the dialogue manager the selected dialogue acts and the parameters needed to instantiate the templates. It then generates the system response, which is sent to the TTS module for spoken output synthesis. 6In most cases, there is a one-to-one-mapping between dialogue acts and templates. The exceptions are Answer, NotifyFailure, and SuggestAlternative, whose templates vary based on the question type. Template \"Did you say < valuel > .... or < valuen >.9\" \"Uh-huh.\" \"Choices for < attribute > are < valuex > ... < value, >7 \"What < attribute > would you like?\" E.g., \"< movie > is playing at < theater > at < time1 > ... < time,, >\" \"Can I help you with anything elseT' \"Please say the name of the movie or theater or town you would like information about.\" E.g., \"< movie > is not playing at < theater >. E.g., \"< movie > is playing at < alternativetheater > at < timex > ... < timen >\" ",
526
- "cite_spans": [],
527
- "ref_spans": [
528
- {
529
- "start": 126,
530
- "end": 133,
531
- "text": "Table 2",
532
- "ref_id": "TABREF2"
533
- }
534
- ],
535
- "eq_spans": [],
536
- "section": "Utterance Generation",
537
- "sec_num": "3.3"
538
- },
539
- {
540
- "text": "To illustrate MIMIC's adaptation capabilities, we return to the dialogue in Figure 1 , which is repeated in Figure 5 and annotated with the cues detected in each user turn (in boldfaced italics) and the dialogue acts employed for response generation in each system turn (in boldface).",
541
- "cite_spans": [],
542
- "ref_spans": [
543
- {
544
- "start": 76,
545
- "end": 84,
546
- "text": "Figure 1",
547
- "ref_id": null
548
- },
549
- {
550
- "start": 108,
551
- "end": 116,
552
- "text": "Figure 5",
553
- "ref_id": "FIGREF4"
554
- }
555
- ],
556
- "eq_spans": [],
557
- "section": "Examples",
558
- "sec_num": "4"
559
- },
560
- {
561
- "text": "The bpas representing the initiative distribution for utterance 3 The cue AmbiguousAction is detected in utterance (3) because the mandatory attribute theater was not specified and cannot be inferred (since the town of Montclair has multiple theaters). The bpas representing its effect are instantiated as follows (Figure 3 The updated bpas indicate that MIMIC should have dialogue but not task initiative when attempting to resolve the detected ambiguity in utterance (4).",
562
- "cite_spans": [],
563
- "ref_spans": [
564
- {
565
- "start": 314,
566
- "end": 323,
567
- "text": "(Figure 3",
568
- "ref_id": null
569
- }
570
- ],
571
- "eq_spans": [],
572
- "section": "Examples",
573
- "sec_num": "4"
574
- },
575
- {
576
- "text": "MIMIC selects Instantiate as its goal to be achieved (Figure 4) , which, based on the initiative distribution, leads it to select the InfoSeek action (Table I) and generate the query \"What theater would you like?\"",
577
- "cite_spans": [],
578
- "ref_spans": [
579
- {
580
- "start": 53,
581
- "end": 63,
582
- "text": "(Figure 4)",
583
- "ref_id": null
584
- },
585
- {
586
- "start": 150,
587
- "end": 159,
588
- "text": "(Table I)",
589
- "ref_id": null
590
- }
591
- ],
592
- "eq_spans": [],
593
- "section": "Examples",
594
- "sec_num": "4"
595
- },
596
- {
597
- "text": "The user's response in (5) again triggers Ambiguous-Action, as well as NoNewlnfo since the semantic representations of (3) and (5) are identical, given the dialogue context. When the effects of these cues are taken into account, we have the following initiative distribution for utterance (6): mt-(6)({S}) = 0.62, mt_(6)({U}) = 0.38; md-(6)({S}) = 0.96, rnd_(6)({V}) = 0.04.",
598
- "cite_spans": [],
599
- "ref_spans": [],
600
- "eq_spans": [],
601
- "section": "Examples",
602
- "sec_num": "4"
603
- },
604
- {
605
- "text": "Although Instaatiate is again selected as the goal, MIMIC now has both task and dialogue initiatives; thus it selects both GiveOptions and lnfoSeek to achieve this goal and generates utterances (6) and (7). The additional information, in the form of valid theater choices, helps the user provide the missing value in (8), allowing MIMIC to answer the query in (9) and prompt for the next query. However, despite the limited prompt, the user provides a well-formed query in (11), triggering TakeOverTask. Thus, MIMIC answers the query and switches to an open-ended prompt in (13), relinquishing task initiative to the user.",
606
- "cite_spans": [],
607
- "ref_spans": [],
608
- "eq_spans": [],
609
- "section": "Examples",
610
- "sec_num": "4"
611
- },
612
- {
613
- "text": "In addition to its automatic adaptation capabilities, another advantage of MIMIC is the ease of modifying its adaptation behavior, enabled by the decoupling of the initiative module from the goal and strategy selection processes. For instance, a system-initiative version of MIMIC can be achieved by setting the initial bpas as follows: mt-initial({S}) = 1; md--initial({S}) -~ 1.",
614
- "cite_spans": [],
615
- "ref_spans": [],
616
- "eq_spans": [],
617
- "section": "Examples",
618
- "sec_num": "4"
619
- },
620
- {
621
- "text": "(1) S: Hello, this is MIMIC, the movie information system. [Answer]",
622
- "cite_spans": [],
623
- "ref_spans": [],
624
- "eq_spans": [],
625
- "section": "Examples",
626
- "sec_num": "4"
627
- },
628
- {
629
- "text": "[LimitedPrompt]",
630
- "cite_spans": [],
631
- "ref_spans": [],
632
- "eq_spans": [],
633
- "section": "Examples",
634
- "sec_num": "4"
635
- },
636
- {
637
- "text": "[TakeOverTask]",
638
- "cite_spans": [],
639
- "ref_spans": [],
640
- "eq_spans": [],
641
- "section": "Examples",
642
- "sec_num": "4"
643
- },
644
- {
645
- "text": "[Answer]",
646
- "cite_spans": [],
647
- "ref_spans": [],
648
- "eq_spans": [],
649
- "section": "Examples",
650
- "sec_num": "4"
651
- },
652
- {
653
- "text": "[OpenPrompt] This is because in the Dempster-Shafer theory, if the initial bpas or the bpas for a cue provide definite evidence for drawing a certain conclusion, then no subsequent cue has any effect on changing that conclusion. Thus, MIMIC will retain both initiatives throughout the dialogue. Alternatively, versions of MIMIC with different adaptation behavior can be achieved by tailoring the initial bpas and/or the bpas for each cue based on the application. For instance, for an electronic sales agent, the effect oflnvalidAction can be increased so that when the user orders an out-of-stock item, the system will always take over task initiative and suggest an alternative item.",
654
- "cite_spans": [],
655
- "ref_spans": [],
656
- "eq_spans": [],
657
- "section": "Examples",
658
- "sec_num": "4"
659
- },
660
- {
661
- "text": "We conducted two experiments to evaluate MIMIC's automatic adaptation capabilities. We compared MIMIC with two control systems: MIMIC-SI, a system-initiative version of MIMIC in which the system retains both initiatives throughout the dialogue, and MIMIC-MI, a nonadaptive mixed-initiative version of MIMIC that resembles the behavior of many existing dialogue systems. In this section we summarize these experiments and their results. A companion paper describes the evaluation process and results in further detail (Chu-Carroll and Nickerson, 2000) . Each experiment involved eight users interacting with MIMIC and MIMIC-SI or MIMIC-MI to perform a set of tasks, each requiring the user to obtain specific movie information. User satisfaction was assessed by asking the subjects to fill out a questionnaire after interacting with each version of the system. Furthermore, a number of performance features, largely based on the PARADISE dialogue evaluation scheme (Walker et al., 1997) , were automatically logged, derived, or manually annotated. In addition, we logged the cues automatically detected in each user utterance, as well as the initiative distribution for each turn and the dialogue acts selected to generate each system response.",
662
- "cite_spans": [
663
- {
664
- "start": 517,
665
- "end": 550,
666
- "text": "(Chu-Carroll and Nickerson, 2000)",
667
- "ref_id": "BIBREF3"
668
- },
669
- {
670
- "start": 964,
671
- "end": 985,
672
- "text": "(Walker et al., 1997)",
673
- "ref_id": "BIBREF21"
674
- }
675
- ],
676
- "ref_spans": [],
677
- "eq_spans": [],
678
- "section": "System Evaluation",
679
- "sec_num": "5"
680
- },
681
- {
682
- "text": "The features gathered from the dialogue interactions were analyzed along three dimensions: system performance, discourse features (in terms of characteristics of the resulting dialogues, such as the cues detected in user utterances), and initiative distribution. Our results show that MIMIC's adaptation capabilities 1) led to better system performance in terms of user satisfaction, dialogue efficiency (shorter dialogues), and dialogue quality (fewer ASR timeouts), and 2) better matched user expectations (by giving up task initiative when the user intends to have control of the dialogue interaction) and more efficiently resolved dialogue anomalies (by taking over task initiative to provide guidance when no progress is made in the dialogue, or to constrain user utterances when ASR performance is poor).",
683
- "cite_spans": [],
684
- "ref_spans": [],
685
- "eq_spans": [],
686
- "section": "System Evaluation",
687
- "sec_num": "5"
688
- },
689
- {
690
- "text": "In this paper, we discussed MIMIC, an adaptive mixedinitiative spoken dialogue system. MIMIC's automatic adaptation capabilities allow it to employ appropriate strategies based on the cumulative effect of information dynamically extracted from user utterances during dialogue interactions, enabling MIMIC to provide more cooperative and satisfactory responses than existing nonadaptive systems. Furthermore, MIMIC was implemented as a general framework for information query systems by decoupling its initiative module from the goal selection process, while allowing the outcome of both processes to jointly determine the response strategies employed. This feature enables easy modification to MIMIC's adaptation behavior, thus allowing the framework to be used for rapid development and comparisons of experimental prototypes of spoken dialogue systems.",
691
- "cite_spans": [],
692
- "ref_spans": [],
693
- "eq_spans": [],
694
- "section": "Conclusions",
695
- "sec_num": "6"
696
- },
697
- {
698
- "text": "See(Nakatani and Chu-Carroll, 2000) for how MIMIC's dialoguelevel knowledge is used to override default prosodic assignments for concept-to-speech generation.",
699
- "cite_spans": [],
700
- "ref_spans": [],
701
- "eq_spans": [],
702
- "section": "",
703
- "sec_num": null
704
- }
705
- ],
706
- "back_matter": [
707
- {
708
- "text": "The author would like to thank Egbert Ammicht, Antoine Saad, Qiru Zhou, Wolfgang Reichl, and Stefan Ortmanns for their help on system integration and on ASR/telephony server development, Jill Nickerson for conducting the evaluation experiments, and Bob Carpenter, Diane Litman, Christine Nakatani, and Jill Nickerson for their comments on an earlier draft of this paper.",
709
- "cite_spans": [],
710
- "ref_spans": [],
711
- "eq_spans": [],
712
- "section": "Acknowledgments",
713
- "sec_num": null
714
- }
715
- ],
716
- "bib_entries": {
717
- "BIBREF0": {
718
- "ref_id": "b0",
719
- "title": "Dialog in the RAILTEL telephone-based system",
720
- "authors": [
721
- {
722
- "first": "S",
723
- "middle": [],
724
- "last": "Bennacef",
725
- "suffix": ""
726
- },
727
- {
728
- "first": "L",
729
- "middle": [],
730
- "last": "Devillers",
731
- "suffix": ""
732
- },
733
- {
734
- "first": "S",
735
- "middle": [],
736
- "last": "Rosset",
737
- "suffix": ""
738
- },
739
- {
740
- "first": "L",
741
- "middle": [],
742
- "last": "Lamel",
743
- "suffix": ""
744
- }
745
- ],
746
- "year": 1996,
747
- "venue": "Proceedings of the 4th International Conference on Spoken Language Processing",
748
- "volume": "",
749
- "issue": "",
750
- "pages": "",
751
- "other_ids": {},
752
- "num": null,
753
- "urls": [],
754
- "raw_text": "S. Bennacef, L. Devillers, S. Rosset, and L. Lamel. 1996. Dialog in the RAILTEL telephone-based sys- tem. In Proceedings of the 4th International Confer- ence on Spoken Language Processing.",
755
- "links": null
756
- },
757
- "BIBREF1": {
758
- "ref_id": "b1",
759
- "title": "An evidential model for tracking initiative in collaborative dialogue interactions",
760
- "authors": [
761
- {
762
- "first": "Jennifer",
763
- "middle": [],
764
- "last": "Chu",
765
- "suffix": ""
766
- },
767
- {
768
- "first": "-",
769
- "middle": [],
770
- "last": "Carroll",
771
- "suffix": ""
772
- },
773
- {
774
- "first": "Michael",
775
- "middle": [
776
- "K"
777
- ],
778
- "last": "Brown",
779
- "suffix": ""
780
- }
781
- ],
782
- "year": 1998,
783
- "venue": "User Modeling and User-Adapted Interaction",
784
- "volume": "8",
785
- "issue": "3-4",
786
- "pages": "215--253",
787
- "other_ids": {},
788
- "num": null,
789
- "urls": [],
790
- "raw_text": "Jennifer Chu-Carroll and Michael K. Brown. 1998. An evidential model for tracking initiative in collabora- tive dialogue interactions. User Modeling and User- Adapted Interaction, 8(3-4):215-253.",
791
- "links": null
792
- },
793
- "BIBREF2": {
794
- "ref_id": "b2",
795
- "title": "Vectorbased natural language call routing",
796
- "authors": [
797
- {
798
- "first": "Jennifer",
799
- "middle": [],
800
- "last": "Chu",
801
- "suffix": ""
802
- },
803
- {
804
- "first": "-",
805
- "middle": [],
806
- "last": "Carroll",
807
- "suffix": ""
808
- },
809
- {
810
- "first": "Bob",
811
- "middle": [],
812
- "last": "Carpenter",
813
- "suffix": ""
814
- }
815
- ],
816
- "year": 1999,
817
- "venue": "Computational Linguistics",
818
- "volume": "25",
819
- "issue": "3",
820
- "pages": "361--388",
821
- "other_ids": {},
822
- "num": null,
823
- "urls": [],
824
- "raw_text": "Jennifer Chu-Carroll and Bob Carpenter. 1999. Vector- based natural language call routing. Computational Linguistics, 25(3):361-388.",
825
- "links": null
826
- },
827
- "BIBREF3": {
828
- "ref_id": "b3",
829
- "title": "Evaluating automatic dialogue strategy adaptation for a spoken dialogue system",
830
- "authors": [
831
- {
832
- "first": "Jennifer",
833
- "middle": [],
834
- "last": "Chu",
835
- "suffix": ""
836
- },
837
- {
838
- "first": "-",
839
- "middle": [],
840
- "last": "Carroll",
841
- "suffix": ""
842
- },
843
- {
844
- "first": "Jill",
845
- "middle": [
846
- "S"
847
- ],
848
- "last": "Nickerson",
849
- "suffix": ""
850
- }
851
- ],
852
- "year": 2000,
853
- "venue": "Proceedings of the 1st Conference of the North American Chapter of the Association for Computational Linguistics",
854
- "volume": "",
855
- "issue": "",
856
- "pages": "",
857
- "other_ids": {},
858
- "num": null,
859
- "urls": [],
860
- "raw_text": "Jennifer Chu-Carroll and Jill S. Nickerson. 2000. Evalu- ating automatic dialogue strategy adaptation for a spo- ken dialogue system. In Proceedings of the 1st Con- ference of the North American Chapter of the Associ- ation for Computational Linguistics. To appear.",
861
- "links": null
862
- },
863
- "BIBREF4": {
864
- "ref_id": "b4",
865
- "title": "The Dempster-Shafer theory of evidence",
866
- "authors": [
867
- {
868
- "first": "Jean",
869
- "middle": [],
870
- "last": "Gordon",
871
- "suffix": ""
872
- },
873
- {
874
- "first": "Edward",
875
- "middle": [
876
- "H"
877
- ],
878
- "last": "Shortliffe",
879
- "suffix": ""
880
- }
881
- ],
882
- "year": 1984,
883
- "venue": "Rule-Based Expert Systems: The MYCIN Experiments of the Stanford Heuristic Programming Project",
884
- "volume": "13",
885
- "issue": "",
886
- "pages": "272--292",
887
- "other_ids": {},
888
- "num": null,
889
- "urls": [],
890
- "raw_text": "Jean Gordon and Edward H. Shortliffe. 1984. The Dempster-Shafer theory of evidence. In Bruce Buchanan and Edward Shortliffe, editors, Rule-Based Expert Systems: The MYCIN Experiments of the Stanford Heuristic Programming Project, chapter 13, pages 272-292. Addison-Wesley.",
891
- "links": null
892
- },
893
- "BIBREF5": {
894
- "ref_id": "b5",
895
- "title": "Spoken language dialog system development and evaluation at LIMSI",
896
- "authors": [
897
- {
898
- "first": "Lori",
899
- "middle": [],
900
- "last": "Lamel",
901
- "suffix": ""
902
- }
903
- ],
904
- "year": 1998,
905
- "venue": "Proceedings of the International Symposium on Spoken Dialogue",
906
- "volume": "",
907
- "issue": "",
908
- "pages": "9--17",
909
- "other_ids": {},
910
- "num": null,
911
- "urls": [],
912
- "raw_text": "Lori Lamel. 1998. Spoken language dialog system de- velopment and evaluation at LIMSI. In Proceedings of the International Symposium on Spoken Dialogue, pages 9-17.",
913
- "links": null
914
- },
915
- "BIBREF6": {
916
- "ref_id": "b6",
917
- "title": "Empirically evaluating an adaptable spoken dialogue system",
918
- "authors": [
919
- {
920
- "first": "Diane",
921
- "middle": [
922
- "J"
923
- ],
924
- "last": "Litman",
925
- "suffix": ""
926
- },
927
- {
928
- "first": "Shimei",
929
- "middle": [],
930
- "last": "Pan",
931
- "suffix": ""
932
- }
933
- ],
934
- "year": 1999,
935
- "venue": "Proceedings of the 7th International Conference on User Modeling",
936
- "volume": "",
937
- "issue": "",
938
- "pages": "55--64",
939
- "other_ids": {},
940
- "num": null,
941
- "urls": [],
942
- "raw_text": "Diane J. Litman and Shimei Pan. 1999. Empirically evaluating an adaptable spoken dialogue system. In Proceedings of the 7th International Conference on User Modeling, pages 55-64.",
943
- "links": null
944
- },
945
- "BIBREF7": {
946
- "ref_id": "b7",
947
- "title": "Evaluating response strategies in a web-based spoken dialogue agent",
948
- "authors": [
949
- {
950
- "first": "Diane",
951
- "middle": [
952
- "J"
953
- ],
954
- "last": "Litman",
955
- "suffix": ""
956
- },
957
- {
958
- "first": "Shimei",
959
- "middle": [],
960
- "last": "Pan",
961
- "suffix": ""
962
- },
963
- {
964
- "first": "Marilyn",
965
- "middle": [
966
- "A"
967
- ],
968
- "last": "Walker",
969
- "suffix": ""
970
- }
971
- ],
972
- "year": 1998,
973
- "venue": "Proceedings of the 36th",
974
- "volume": "",
975
- "issue": "",
976
- "pages": "",
977
- "other_ids": {},
978
- "num": null,
979
- "urls": [],
980
- "raw_text": "Diane J. Litman, Shimei Pan, and Marilyn A. Walker. 1998. Evaluating response strategies in a web-based spoken dialogue agent. In Proceedings of the 36th",
981
- "links": null
982
- },
983
- "BIBREF8": {
984
- "ref_id": "b8",
985
- "title": "Annual Meeting of the Association for Computational Linguistics",
986
- "authors": [],
987
- "year": null,
988
- "venue": "",
989
- "volume": "",
990
- "issue": "",
991
- "pages": "780--786",
992
- "other_ids": {},
993
- "num": null,
994
- "urls": [],
995
- "raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 780-786.",
996
- "links": null
997
- },
998
- "BIBREF9": {
999
- "ref_id": "b9",
1000
- "title": "Using dialogue representations for concept-to-speech generation",
1001
- "authors": [
1002
- {
1003
- "first": "Christine",
1004
- "middle": [
1005
- "H"
1006
- ],
1007
- "last": "Nakatani",
1008
- "suffix": ""
1009
- },
1010
- {
1011
- "first": "Jennifer",
1012
- "middle": [],
1013
- "last": "Chu-Carroll",
1014
- "suffix": ""
1015
- }
1016
- ],
1017
- "year": 2000,
1018
- "venue": "Proceedings of the ANLP-NAACL Workshop on Conversational Systems",
1019
- "volume": "",
1020
- "issue": "",
1021
- "pages": "",
1022
- "other_ids": {},
1023
- "num": null,
1024
- "urls": [],
1025
- "raw_text": "Christine H. Nakatani and Jennifer Chu-Carroll. 2000. Using dialogue representations for concept-to-speech generation. In Proceedings of the ANLP-NAACL Workshop on Conversational Systems.",
1026
- "links": null
1027
- },
1028
- "BIBREF10": {
1029
- "ref_id": "b10",
1030
- "title": "An efficient decoding method for real time speech recognition",
1031
- "authors": [
1032
- {
1033
- "first": "Stefan",
1034
- "middle": [],
1035
- "last": "Ortmanns",
1036
- "suffix": ""
1037
- },
1038
- {
1039
- "first": "Wolfgang",
1040
- "middle": [],
1041
- "last": "Reichl",
1042
- "suffix": ""
1043
- },
1044
- {
1045
- "first": "Wu",
1046
- "middle": [],
1047
- "last": "Chou",
1048
- "suffix": ""
1049
- }
1050
- ],
1051
- "year": 1999,
1052
- "venue": "Proceedings of the 5th European Conference on Speech Communication and Technology",
1053
- "volume": "",
1054
- "issue": "",
1055
- "pages": "",
1056
- "other_ids": {},
1057
- "num": null,
1058
- "urls": [],
1059
- "raw_text": "Stefan Ortmanns, Wolfgang Reichl, and Wu Chou. 1999. An efficient decoding method for real time speech recognition. In Proceedings of the 5th European Con- ference on Speech Communication and Technology.",
1060
- "links": null
1061
- },
1062
- "BIBREF11": {
1063
- "ref_id": "b11",
1064
- "title": "A constraint-based model for cooperative response generation in information dialogues",
1065
- "authors": [
1066
- {
1067
- "first": "Yan",
1068
- "middle": [],
1069
- "last": "Qu",
1070
- "suffix": ""
1071
- },
1072
- {
1073
- "first": "Steve",
1074
- "middle": [],
1075
- "last": "Beale",
1076
- "suffix": ""
1077
- }
1078
- ],
1079
- "year": 1999,
1080
- "venue": "Proceedings of the Sixteenth National Conference on Artificial Intelligence",
1081
- "volume": "",
1082
- "issue": "",
1083
- "pages": "",
1084
- "other_ids": {},
1085
- "num": null,
1086
- "urls": [],
1087
- "raw_text": "Yan Qu and Steve Beale. 1999. A constraint-based model for cooperative response generation in informa- tion dialogues. In Proceedings of the Sixteenth Na- tional Conference on Artificial Intelligence.",
1088
- "links": null
1089
- },
1090
- "BIBREF12": {
1091
- "ref_id": "b12",
1092
- "title": "Eliciting additional information during cooperative consultations",
1093
- "authors": [
1094
- {
1095
- "first": "Bhavani",
1096
- "middle": [],
1097
- "last": "Raskutti",
1098
- "suffix": ""
1099
- },
1100
- {
1101
- "first": "Ingrid",
1102
- "middle": [],
1103
- "last": "Zukerman",
1104
- "suffix": ""
1105
- }
1106
- ],
1107
- "year": 1993,
1108
- "venue": "Proceedings of the 15th Annual Meeting of the Cognitive Science Society",
1109
- "volume": "",
1110
- "issue": "",
1111
- "pages": "",
1112
- "other_ids": {},
1113
- "num": null,
1114
- "urls": [],
1115
- "raw_text": "Bhavani Raskutti and Ingrid Zukerman. 1993. Elicit- ing additional information during cooperative consul- tations. In Proceedings of the 15th Annual Meeting of the Cognitive Science Society.",
1116
- "links": null
1117
- },
1118
- "BIBREF13": {
1119
- "ref_id": "b13",
1120
- "title": "Decision tree state tying based on segmental clustering for acoustic modeling",
1121
- "authors": [
1122
- {
1123
- "first": "Wolfgang",
1124
- "middle": [],
1125
- "last": "Reichl",
1126
- "suffix": ""
1127
- },
1128
- {
1129
- "first": "",
1130
- "middle": [],
1131
- "last": "Wu' Chou",
1132
- "suffix": ""
1133
- }
1134
- ],
1135
- "year": 1998,
1136
- "venue": "Proceedings of the International Conference on Acoustics, Speech, and Signal Processing",
1137
- "volume": "",
1138
- "issue": "",
1139
- "pages": "",
1140
- "other_ids": {},
1141
- "num": null,
1142
- "urls": [],
1143
- "raw_text": "Wolfgang Reichl and Wu' Chou. 1998. Decision tree state tying based on segmental clustering for acoustic modeling. In Proceedings of the International Confer- ence on Acoustics, Speech, and Signal Processing.",
1144
- "links": null
1145
- },
1146
- "BIBREF14": {
1147
- "ref_id": "b14",
1148
- "title": "The SMART Retrieval System",
1149
- "authors": [
1150
- {
1151
- "first": "Gerald",
1152
- "middle": [],
1153
- "last": "Salton",
1154
- "suffix": ""
1155
- }
1156
- ],
1157
- "year": 1971,
1158
- "venue": "",
1159
- "volume": "",
1160
- "issue": "",
1161
- "pages": "",
1162
- "other_ids": {},
1163
- "num": null,
1164
- "urls": [],
1165
- "raw_text": "Gerald Salton. 1971. The SMART Retrieval System. Prentice Hall, Inc.",
1166
- "links": null
1167
- },
1168
- "BIBREF15": {
1169
- "ref_id": "b15",
1170
- "title": "Development and preliminary evaluation of the MIT ATIS system",
1171
- "authors": [
1172
- {
1173
- "first": "Stephanie",
1174
- "middle": [],
1175
- "last": "Seneff",
1176
- "suffix": ""
1177
- },
1178
- {
1179
- "first": "James",
1180
- "middle": [],
1181
- "last": "Glass",
1182
- "suffix": ""
1183
- },
1184
- {
1185
- "first": "David",
1186
- "middle": [],
1187
- "last": "Goddeau",
1188
- "suffix": ""
1189
- },
1190
- {
1191
- "first": "David",
1192
- "middle": [],
1193
- "last": "Goodine",
1194
- "suffix": ""
1195
- },
1196
- {
1197
- "first": "Lynette",
1198
- "middle": [],
1199
- "last": "Hirschman",
1200
- "suffix": ""
1201
- },
1202
- {
1203
- "first": "Hong",
1204
- "middle": [],
1205
- "last": "Leung",
1206
- "suffix": ""
1207
- },
1208
- {
1209
- "first": "Michael",
1210
- "middle": [],
1211
- "last": "Phillips",
1212
- "suffix": ""
1213
- },
1214
- {
1215
- "first": "Joseph",
1216
- "middle": [],
1217
- "last": "Polifroni",
1218
- "suffix": ""
1219
- },
1220
- {
1221
- "first": "Victor",
1222
- "middle": [],
1223
- "last": "Zue",
1224
- "suffix": ""
1225
- }
1226
- ],
1227
- "year": 1991,
1228
- "venue": "Proceedings of the DARPA Speech and Natural Language Workshop",
1229
- "volume": "",
1230
- "issue": "",
1231
- "pages": "88--93",
1232
- "other_ids": {},
1233
- "num": null,
1234
- "urls": [],
1235
- "raw_text": "Stephanie Seneff, James Glass, David Goddeau, David Goodine, Lynette Hirschman, Hong Leung, Michael Phillips, Joseph Polifroni, and Victor Zue. 1991. De- velopment and preliminary evaluation of the MIT ATIS system. In Proceedings of the DARPA Speech and Natural Language Workshop, pages 88-93.",
1236
- "links": null
1237
- },
1238
- "BIBREF16": {
1239
- "ref_id": "b16",
1240
- "title": "A Mathematical Theory of Evidence",
1241
- "authors": [
1242
- {
1243
- "first": "Glenn",
1244
- "middle": [],
1245
- "last": "Shafer",
1246
- "suffix": ""
1247
- }
1248
- ],
1249
- "year": 1976,
1250
- "venue": "",
1251
- "volume": "",
1252
- "issue": "",
1253
- "pages": "",
1254
- "other_ids": {},
1255
- "num": null,
1256
- "urls": [],
1257
- "raw_text": "Glenn Shafer. 1976. A Mathematical Theory of Evi- dence. Princeton University Press.",
1258
- "links": null
1259
- },
1260
- "BIBREF17": {
1261
- "ref_id": "b17",
1262
- "title": "Multilingual Text-to-Speech Synthesis: The Bell Labs Approach",
1263
- "authors": [],
1264
- "year": 1998,
1265
- "venue": "",
1266
- "volume": "",
1267
- "issue": "",
1268
- "pages": "",
1269
- "other_ids": {},
1270
- "num": null,
1271
- "urls": [],
1272
- "raw_text": "Richard Sproat, editor. 1998. Multilingual Text-to- Speech Synthesis: The Bell Labs Approach. Kluwer, Boston, MA.",
1273
- "links": null
1274
- },
1275
- "BIBREF18": {
1276
- "ref_id": "b18",
1277
- "title": "The CommandTalk spoken dialogue system",
1278
- "authors": [
1279
- {
1280
- "first": "Amanda",
1281
- "middle": [],
1282
- "last": "Stent",
1283
- "suffix": ""
1284
- },
1285
- {
1286
- "first": "John",
1287
- "middle": [],
1288
- "last": "Dowding",
1289
- "suffix": ""
1290
- },
1291
- {
1292
- "first": "Jean",
1293
- "middle": [
1294
- "Mark"
1295
- ],
1296
- "last": "Gawron",
1297
- "suffix": ""
1298
- },
1299
- {
1300
- "first": "Elizabeth",
1301
- "middle": [
1302
- "Owen"
1303
- ],
1304
- "last": "Bratt",
1305
- "suffix": ""
1306
- },
1307
- {
1308
- "first": "Robert",
1309
- "middle": [],
1310
- "last": "Moore",
1311
- "suffix": ""
1312
- }
1313
- ],
1314
- "year": 1999,
1315
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics",
1316
- "volume": "",
1317
- "issue": "",
1318
- "pages": "183--190",
1319
- "other_ids": {},
1320
- "num": null,
1321
- "urls": [],
1322
- "raw_text": "Amanda Stent, John Dowding, Jean Mark Gawron, Eliz- abeth Owen Bratt, and Robert Moore. 1999. The CommandTalk spoken dialogue system. In Proceed- ings of the 37th Annual Meeting of the Association for Computational Linguistics, pages 183-190.",
1323
- "links": null
1324
- },
1325
- "BIBREF19": {
1326
- "ref_id": "b19",
1327
- "title": "From plan critiquing to clarification dialogue for cooperative response generation",
1328
- "authors": [
1329
- {
1330
- "first": "Robin",
1331
- "middle": [],
1332
- "last": "Peter Van Beek",
1333
- "suffix": ""
1334
- },
1335
- {
1336
- "first": "Ken",
1337
- "middle": [],
1338
- "last": "Cohen",
1339
- "suffix": ""
1340
- },
1341
- {
1342
- "first": "",
1343
- "middle": [],
1344
- "last": "Schmidt",
1345
- "suffix": ""
1346
- }
1347
- ],
1348
- "year": 1993,
1349
- "venue": "Computational Intelligence",
1350
- "volume": "9",
1351
- "issue": "2",
1352
- "pages": "132--154",
1353
- "other_ids": {},
1354
- "num": null,
1355
- "urls": [],
1356
- "raw_text": "Peter van Beek, Robin Cohen, and Ken Schmidt. 1993. From plan critiquing to clarification dialogue for co- operative response generation. Computational Intelli- gence, 9(2):132-154.",
1357
- "links": null
1358
- },
1359
- "BIBREF20": {
1360
- "ref_id": "b20",
1361
- "title": "Mixed initiative in dialogue: An investigation into discourse segmentation",
1362
- "authors": [
1363
- {
1364
- "first": "Marilyn",
1365
- "middle": [],
1366
- "last": "Walker",
1367
- "suffix": ""
1368
- },
1369
- {
1370
- "first": "Steve",
1371
- "middle": [],
1372
- "last": "Whittaker",
1373
- "suffix": ""
1374
- }
1375
- ],
1376
- "year": 1990,
1377
- "venue": "Proceedings of the 28th Annual Meeting of the Association for Computational Linguistics",
1378
- "volume": "",
1379
- "issue": "",
1380
- "pages": "70--78",
1381
- "other_ids": {},
1382
- "num": null,
1383
- "urls": [],
1384
- "raw_text": "Marilyn Walker and Steve Whittaker. 1990. Mixed ini- tiative in dialogue: An investigation into discourse segmentation. In Proceedings of the 28th Annual Meeting of the Association for Computational Lin- guistics, pages 70-78.",
1385
- "links": null
1386
- },
1387
- "BIBREF21": {
1388
- "ref_id": "b21",
1389
- "title": "PARADISE: A framework for evaluating spoken dialogue agents",
1390
- "authors": [
1391
- {
1392
- "first": "Marilyn",
1393
- "middle": [
1394
- "A"
1395
- ],
1396
- "last": "Walker",
1397
- "suffix": ""
1398
- },
1399
- {
1400
- "first": "Diane",
1401
- "middle": [
1402
- "J"
1403
- ],
1404
- "last": "Litman",
1405
- "suffix": ""
1406
- },
1407
- {
1408
- "first": "Candance",
1409
- "middle": [
1410
- "A"
1411
- ],
1412
- "last": "Kamm",
1413
- "suffix": ""
1414
- },
1415
- {
1416
- "first": "Alicia",
1417
- "middle": [],
1418
- "last": "Abella",
1419
- "suffix": ""
1420
- }
1421
- ],
1422
- "year": 1997,
1423
- "venue": "Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics",
1424
- "volume": "",
1425
- "issue": "",
1426
- "pages": "271--280",
1427
- "other_ids": {},
1428
- "num": null,
1429
- "urls": [],
1430
- "raw_text": "Marilyn A. Walker, Diane J. Litman, Candance A. Kamm, and Alicia Abella. 1997. PARADISE: A framework for evaluating spoken dialogue agents. In Proceedings of the 35th Annual Meeting of the Associ- ation for Computational Linguistics, pages 271-280.",
1431
- "links": null
1432
- },
1433
- "BIBREF22": {
1434
- "ref_id": "b22",
1435
- "title": "Cues and control in expert-client dialogues",
1436
- "authors": [
1437
- {
1438
- "first": "Steve",
1439
- "middle": [],
1440
- "last": "Whittaker",
1441
- "suffix": ""
1442
- },
1443
- {
1444
- "first": "Phil",
1445
- "middle": [],
1446
- "last": "Stenton",
1447
- "suffix": ""
1448
- }
1449
- ],
1450
- "year": 1988,
1451
- "venue": "Proceedings of the 26th Annual Meeting of the Association for Computational Linguistics",
1452
- "volume": "",
1453
- "issue": "",
1454
- "pages": "123--130",
1455
- "other_ids": {},
1456
- "num": null,
1457
- "urls": [],
1458
- "raw_text": "Steve Whittaker and Phil Stenton. 1988. Cues and con- trol in expert-client dialogues. In Proceedings of the 26th Annual Meeting of the Association for Computa- tional Linguistics, pages 123-130.",
1459
- "links": null
1460
- },
1461
- "BIBREF23": {
1462
- "ref_id": "b23",
1463
- "title": "Speech technology integration and research platform: A system study",
1464
- "authors": [
1465
- {
1466
- "first": "Qiru",
1467
- "middle": [],
1468
- "last": "Zhou",
1469
- "suffix": ""
1470
- },
1471
- {
1472
- "first": "Chin-Hui",
1473
- "middle": [],
1474
- "last": "Lee",
1475
- "suffix": ""
1476
- },
1477
- {
1478
- "first": "Wu",
1479
- "middle": [],
1480
- "last": "Chou",
1481
- "suffix": ""
1482
- },
1483
- {
1484
- "first": "Andrew",
1485
- "middle": [],
1486
- "last": "Pargellis",
1487
- "suffix": ""
1488
- }
1489
- ],
1490
- "year": 1997,
1491
- "venue": "Proceedings of the 5th European Conference on Speech Communication and Technology",
1492
- "volume": "",
1493
- "issue": "",
1494
- "pages": "",
1495
- "other_ids": {},
1496
- "num": null,
1497
- "urls": [],
1498
- "raw_text": "Qiru Zhou, Chin-Hui Lee, Wu Chou, and Andrew Pargel- lis. 1997. Speech technology integration and research platform: A system study. In Proceedings of the 5th European Conference on Speech Communication and Technology.",
1499
- "links": null
1500
- }
1501
- },
1502
- "ref_entries": {
1503
- "FIGREF0": {
1504
- "uris": null,
1505
- "type_str": "figure",
1506
- "num": null,
1507
- "text": "Figure 2(a) shows the frame-based semantic representation for the utterance \"What time is Analyze This playing Semantic Representation and Task Specification in Montclair?\""
1508
- },
1509
- "FIGREF1": {
1510
- "uris": null,
1511
- "type_str": "figure",
1512
- "num": null,
1513
- "text": "41n practice, this is the preferred initiative holder since practical reasons may prevent the dialogue participant from actually holding the initiative. For instance, if having task initiative dictates inclusion of additional helpful information, this can only be realized if M1M1C's knowledge base provides such information. ({speaker}) = 0.35; mr-tot(O) = 0.65 mt-,~ni({hearer}) = 0.35; mt-nn~(O) = 0.65 mt-i~({hearer}) = 0.35; mt-ia(O) = 0.65 mt-iar({hearer}) = 0.35; mt-iar(O) = 0.65 mt-aa({hearer}) = 0.35; mt-a~(O) = 0.65 mt .... ({speaker}) = 0.35; mt .... ({speaker}) = 0.35; ma-tot(O) = 0.65 md-nni({hearer}) = 0.35; md-nni(O) -~-0.65 md-ia ({hearer}) = 0.7; md-ia (O) = 0.3 ma-iar({hearer}) = 0.7; ma-iar(O) = 0.3 ma-aa({hearer}) = 0.7; md_a~(O) = 0.3 ma .... ({speaker}) = 0.7; md .... (O) = 0"
1514
- },
1515
- "FIGREF2": {
1516
- "uris": null,
1517
- "type_str": "figure",
1518
- "num": null,
1519
- "text": "are the initial bpas, which, based on MIMIC's role as an information provider, are mt-(3)({S}) = 0.3, mt-(3)({U}) = 0.7; = 0.6, md-(3)({V}) = 0.4."
1520
- },
1521
- "FIGREF3": {
1522
- "uris": null,
1523
- "type_str": "figure",
1524
- "num": null,
1525
- "text": "): mt-,,({S}) = 0.35, mt_,,(O) = 0.65; md-aa({S}) = 0.7, md-aa(O) = 0.3. Combining the current bpas with the effects of the observed cue, we obtain the following new bpas: mt-(4)({S}) = 0.4, mt_(a)({U}) = 0.6; md_(4)({S}) = 0.83, md_(4)({U}) = 0.17."
1526
- },
1527
- "FIGREF4": {
1528
- "uris": null,
1529
- "type_str": "figure",
1530
- "num": null,
1531
- "text": "Annotated Dialogue Shown inFigure 1"
1532
- },
1533
- "TABREF2": {
1534
- "type_str": "table",
1535
- "text": "Mappings Between Dialogue Acts and Utterance Templates",
1536
- "content": "<table/>",
1537
- "html": null,
1538
- "num": null
1539
- }
1540
- }
1541
- }
1542
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1015.json DELETED
@@ -1,844 +0,0 @@
1
- {
2
- "paper_id": "A00-1015",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:24.026319Z"
6
- },
7
- "title": "J avox: A Toolkit for Building Speech-Enabled Applications",
8
- "authors": [
9
- {
10
- "first": "Michael",
11
- "middle": [
12
- "S"
13
- ],
14
- "last": "Fulkerson",
15
- "suffix": "",
16
- "affiliation": {
17
- "laboratory": "",
18
- "institution": "Duke University Durham",
19
- "location": {
20
- "postCode": "27708",
21
- "region": "North Carolina",
22
- "country": "USA"
23
- }
24
- },
25
- "email": ""
26
- },
27
- {
28
- "first": "Alan",
29
- "middle": [
30
- "W"
31
- ],
32
- "last": "Biermann",
33
- "suffix": "",
34
- "affiliation": {
35
- "laboratory": "",
36
- "institution": "Duke University Durham",
37
- "location": {
38
- "postCode": "27708",
39
- "region": "North Carolina",
40
- "country": "USA"
41
- }
42
- },
43
- "email": ""
44
- }
45
- ],
46
- "year": "",
47
- "venue": null,
48
- "identifiers": {},
49
- "abstract": "JAVOX provides a mechanism for the development of spoken-language systems from existing desktop applications. We present an architecture that allows existing Java 1 programs to be speech-enabled with no source-code modification, through the use of reflection and automatic modification to the application's compiled code. The grammars used in JAvox are based on the Java Speech Grammar Format (JSGF); JAVOX grammars have an additional semantic component based on our JAVOX Scripting Language (JSL). JAVOX has been successfully demonstrated on real-world applications.",
50
- "pdf_parse": {
51
- "paper_id": "A00-1015",
52
- "_pdf_hash": "",
53
- "abstract": [
54
- {
55
- "text": "JAVOX provides a mechanism for the development of spoken-language systems from existing desktop applications. We present an architecture that allows existing Java 1 programs to be speech-enabled with no source-code modification, through the use of reflection and automatic modification to the application's compiled code. The grammars used in JAvox are based on the Java Speech Grammar Format (JSGF); JAVOX grammars have an additional semantic component based on our JAVOX Scripting Language (JSL). JAVOX has been successfully demonstrated on real-world applications.",
56
- "cite_spans": [],
57
- "ref_spans": [],
58
- "eq_spans": [],
59
- "section": "Abstract",
60
- "sec_num": null
61
- }
62
- ],
63
- "body_text": [
64
- {
65
- "text": "1 Overview JAVOX is an implemented set of tools that allows software developers to speech-enable existing applications. The process requires no changes to the program's source code: Speech capacity is pluggedin to the existing code by modifying the compiled program as it loads. JAVOX is intended to provide similar functionality to that usually associated with menus and mouse actions in graphical user interfaces (GUIs). It is completely programmable -developers can provide a speech interface to whatever functionality they desire. Jivox has been successfully demonstrated with several GUI-based applications.",
66
- "cite_spans": [],
67
- "ref_spans": [],
68
- "eq_spans": [],
69
- "section": "",
70
- "sec_num": null
71
- },
72
- {
73
- "text": "Previous systems to assist in the development of spoken-langnage systems (SLSs) have focused on building stand-alone, customized applications, such as (Sutton et al., 1996) and (Pargellis et al., 1999) . The goal of the JAVOX toolkit is to speech-enable traditional desktop applications -this is similar to the goals of the MELISSA project (Schmidt et al., 1998) . It is intended to both speed the development of SLSs and to localize the speech-specific code within the application. JAVOX allows developers to add speech interfaces to applications at the end of the development process; SLSs no longer need to be built from the ground up.",
74
- "cite_spans": [
75
- {
76
- "start": 151,
77
- "end": 172,
78
- "text": "(Sutton et al., 1996)",
79
- "ref_id": "BIBREF5"
80
- },
81
- {
82
- "start": 177,
83
- "end": 201,
84
- "text": "(Pargellis et al., 1999)",
85
- "ref_id": "BIBREF2"
86
- },
87
- {
88
- "start": 340,
89
- "end": 362,
90
- "text": "(Schmidt et al., 1998)",
91
- "ref_id": "BIBREF3"
92
- }
93
- ],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "",
97
- "sec_num": null
98
- },
99
- {
100
- "text": "We will briefly present an overview of how JAVOX works, including its major modules. First, we 1Java and Java Speech are registered trademarks of Sun Microsystems, Inc.",
101
- "cite_spans": [],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "",
105
- "sec_num": null
106
- },
107
- {
108
- "text": "will examine TRANSLATOR, the implemented JAVOX natural language processing (NLP) component; its role is to translate from natural language utterances to the JhVOX Scripting Language (JSL). Next, we will discuss JSL in conjunction with a discussion of EXECUTER, the interface between JAVOX and the application. We will explain the JhvOX infrastructure and its current implementation in Java. In conclusion, we will discuss the current state of the project and where it is going.",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "",
113
- "sec_num": null
114
- },
115
- {
116
- "text": "Basic Operation Jivox can be used as the sole location of NLP for an application; the application is written as a nonspeech-enabled program and JhvOX adds the speech capability. The current implementation is written in Java and works with Java programs. The linkage between the application program and JhvOX is created by modifying -at load time -all constructors in the application to register new objects with JAVOX. For this reason, the application's source code does not need any modification to enable JAVOX. A thorough discussion of this technique is presented in Section 4. The schematic in Figure 1 shows a high-level overview of the JAVOX architecture.",
117
- "cite_spans": [],
118
- "ref_spans": [
119
- {
120
- "start": 598,
121
- "end": 606,
122
- "text": "Figure 1",
123
- "ref_id": "FIGREF1"
124
- }
125
- ],
126
- "eq_spans": [],
127
- "section": "2",
128
- "sec_num": null
129
- },
130
- {
131
- "text": "Issuing a voice command begins with a user utterance, which the speech recognizer processes and passes to the NLP component, TRANSLATOR. We are using the IBM implementation of Sun's Java Speech application program interface (API) (Sun Microsystems, Inc., 1998) in conjunction with IBM's VIAVOICE. The job of TRANSLATORor a different module conforming to its API -is to translate the utterance into a form that represents the corresponding program actions. The current implementation of TRANSLATOR uses a context-free grammar, with each rule carrying an optional JSL fragment. A typical bottom-up parser processes utterances and a complete JSL program results. The resulting JSL is forwarded to EXECUTER, where the JSL code is executed. For example, in a hypothetical banking application, the utterance add $100 to the account might be translated into the JSL command: myBalance = myBalance + i00; The job of EXECUTER -or a different module that conforms to EXECUTER'S API -is to execute and monitor upcalls into the running application. The upcalls are the actual functions that would be made by the appropriate mouse clicks or menu selections had the user not used speech. For this reason, we are currently concentrating our efforts on event-driven programs, the class of most GUI applications. Their structure is usually amenable to this approach. Our implementation of EXECUTER performs the upcalls by interpreting and executing JSL, though the technology could be used with systems other than JSL. In the banking example, EXECUTER would identify the myBalemce variable and increment it by $100.",
132
- "cite_spans": [
133
- {
134
- "start": 230,
135
- "end": 260,
136
- "text": "(Sun Microsystems, Inc., 1998)",
137
- "ref_id": "BIBREF4"
138
- }
139
- ],
140
- "ref_spans": [],
141
- "eq_spans": [],
142
- "section": "2",
143
- "sec_num": null
144
- },
145
- {
146
- "text": "The main JAVOX components, TRANSLATOR and EXECUTER, are written to flexible APIs. Developers may choose to use their own custom components instead of these two. Those who want a different NLP scheme can implement a different version of TRANSLATOR and -as long as it outputs JSLstill use EXECUTER. Conversely, those who want a different scripting system can replace JSL and still use TRANSLATOR and even EXECUTER's low-level infrastructure.",
147
- "cite_spans": [],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "2",
151
- "sec_num": null
152
- },
153
- {
154
- "text": "The JAVOX infrastructure is not tied to any particular NLP method; in fact, the JAVOX grammar system is the second NLP implementation we have used. It is presented here because it is straightforward, easy to implement, and surprisingly powerful. JAVOX grammars axe based on Sun's Java Speech Grammar Format (JSGF) (Sun Microsystems, Inc., 1998) . JSGF is a rule-based, speech-recognition grammar, designed to specify acceptable input to a recognizer. In JAVOX grammars, each J S G F rule may be augmented with a fragment of JAVOX Scripting Language code -we refer to JAVOX grammars as scriptable grammars. The result of parsing an utterance with a JAVOX grammar is a complete piece of JSL code, which is then interpreted to perform the action specified by the user. The process of speech-enabling an application in JAVOX consists of writing a grammar that contains the language to be used and the corresponding actions to be performed. Building on top of 3SGF means -in many cases -only one file is needed to contain all application-specific information. JSL-specific code is automatically stripped from the grammar at runtime, leaving an ordinary JSGF grammar. This JSGF grammar is sent to a Java-Speech-compliant recognizer as its input grammar. In the current Java implementation, each Java source file (Foo. java) can have an associated JAVOX grammar file (Foo. gram) that contains all the information needed to speak to the application. Encapsulating all natural language information in one file also means that porting the application to different languages is far easier than in most SLSs.",
155
- "cite_spans": [
156
- {
157
- "start": 314,
158
- "end": 344,
159
- "text": "(Sun Microsystems, Inc., 1998)",
160
- "ref_id": "BIBREF4"
161
- }
162
- ],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "J a v o x G r a m m a r s",
166
- "sec_num": "3"
167
- },
168
- {
169
- "text": "Since JSGF grammars are primarily speechrecognition grammars, they lack the ability to encode semantic information. They only possess a limited tag mechanism. Tags allow the recognizer to output a canonical representation of the utterance instead of the recognition verbatim. For example, public <ACTION> = move [the] <PART> <DIR>; public <PART> = eyes; public <PART> = ( cap I hat ); public <DIR> = up; public <DIR> = down;",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "S e r i p t a b l e G r a m m a r s",
174
- "sec_num": "3.1"
175
- },
176
- {
177
- "text": "Grammar 1: A JSGF fragment from the Mr. Potato Head domain. the tag rm may be the output from both delete the file and remove it.",
178
- "cite_spans": [],
179
- "ref_spans": [],
180
- "eq_spans": [],
181
- "section": "S e r i p t a b l e G r a m m a r s",
182
- "sec_num": "3.1"
183
- },
184
- {
185
- "text": "Tags are not implemented in JAVOX grammars; instead, we augment the rules of JSGF with fragments of a scripting language, which contains much richer semantic information than is possible with tags. TRANSLATOR receives the raw utterance from the recognizer and translates it into the appropriate semantic representation. JAvox grammars do not mandate the syntax of the additional semantic portion. Though JSL is presented here, TRANSLATOR has been used to form Prolog predicates and Visual Basic fragments.",
186
- "cite_spans": [],
187
- "ref_spans": [],
188
- "eq_spans": [],
189
- "section": "S e r i p t a b l e G r a m m a r s",
190
- "sec_num": "3.1"
191
- },
192
- {
193
- "text": "JSGF rules can be explicitly made public or are implicitly private. Public rules can be imported by other grammars and can serve as the result of a recognition; a private rule can be used in a recognition, but cannot be the sole result. The five rules in Grammar 1 are from a JSGF-only grammar fragment from the Mr. Potato Head 2 domain (discussed later). Grammar 1 allows eight sentences, such as move the eyes up, move the eyes down, move the cap up, move the cap down, and move cap up. Rule names are valid Java identifiers enclosed within angle brackets; the left-hand side (LHS) is everything to the left of the equality sign and the right-hand side (RHS) is everything to the right. JAVOX grammars include the standard constructs available in JSGF, these include: Imports Any grammar file can be imported into other grammar files, though only public rules are exported. This allows for the creation of grammar libraries. When using JSL, Java classes can also be imported.",
194
- "cite_spans": [],
195
- "ref_spans": [],
196
- "eq_spans": [],
197
- "section": "S e r i p t a b l e G r a m m a r s",
198
- "sec_num": "3.1"
199
- },
200
- {
201
- "text": "Comments Grammars can be documented using Java comments: single-line comments (//) and delimited ones (/* until */).",
202
- "cite_spans": [],
203
- "ref_spans": [],
204
- "eq_spans": [],
205
- "section": "S e r i p t a b l e G r a m m a r s",
206
- "sec_num": "3.1"
207
- },
208
- {
209
- "text": "Alternatives A vertical bar ( I ) can be used to separate alternative elements, as in the <PART> rule of Grammar 1.",
210
- "cite_spans": [],
211
- "ref_spans": [],
212
- "eq_spans": [],
213
- "section": "Parenthesis Precedence can be modified with parentheses.",
214
- "sec_num": null
215
- },
216
- {
217
- "text": "Optionals Optional elements are enclosed within brackets ([ and ] ), such as the in Grammar l's <ACTION> rule.",
218
- "cite_spans": [
219
- {
220
- "start": 57,
221
- "end": 65,
222
- "text": "([ and ]",
223
- "ref_id": null
224
- }
225
- ],
226
- "ref_spans": [],
227
- "eq_spans": [],
228
- "section": "Parenthesis Precedence can be modified with parentheses.",
229
- "sec_num": null
230
- },
231
- {
232
- "text": "2Mr. Potato Head is a registered trademark of Hasbro, Inc.",
233
- "cite_spans": [],
234
- "ref_spans": [],
235
- "eq_spans": [],
236
- "section": "Parenthesis Precedence can be modified with parentheses.",
237
- "sec_num": null
238
- },
239
- {
240
- "text": "Kleene Star Operator A postfix Kleene star (*) operator can be used to indicate that the preceding element may occur zero or more times.",
241
- "cite_spans": [],
242
- "ref_spans": [],
243
- "eq_spans": [],
244
- "section": "Parenthesis Precedence can be modified with parentheses.",
245
- "sec_num": null
246
- },
247
- {
248
- "text": "Plus Operator A similar operator to indicate that an element may appear one or more times.",
249
- "cite_spans": [],
250
- "ref_spans": [],
251
- "eq_spans": [],
252
- "section": "Parenthesis Precedence can be modified with parentheses.",
253
- "sec_num": null
254
- },
255
- {
256
- "text": "A grammar's rules may be organized however the developer wishes. Some may choose to have one rule per utterance, while others may divide rules to the parts-of-speech level or group them by semantic value. In practice, we tend to write rules grouped by semantic value for nouns and verbs and at the partsof-speech level for function words. Grammar 2 shows the Mr. Potato Head grammar augmented with JSL fragments.",
257
- "cite_spans": [],
258
- "ref_spans": [],
259
- "eq_spans": [],
260
- "section": "Parenthesis Precedence can be modified with parentheses.",
261
- "sec_num": null
262
- },
263
- {
264
- "text": "The semantic component of each rule is separated from the RHS by a colon and delimited with a brace and colon ({: until :}). Using Grammar 2, the parse and translation for Move the cap up is shown in Figure 2 .",
265
- "cite_spans": [],
266
- "ref_spans": [
267
- {
268
- "start": 200,
269
- "end": 208,
270
- "text": "Figure 2",
271
- "ref_id": "FIGREF3"
272
- }
273
- ],
274
- "eq_spans": [],
275
- "section": "Parenthesis Precedence can be modified with parentheses.",
276
- "sec_num": null
277
- },
278
- {
279
- "text": "Each rule may have either one semantic fragment or any number of named fields. A single fragment is sufficient when there is a one-to-one correlation between a lexical item and its representation in the program. Occasionally, a single lexical item may require several components to adequately express its meaning within a program. In Grammar 2, there is a one-to-one correlation between the direction of movement and the slideUp and slideDown functions in the <DIR> rules. These functions can also written as a single slide function, with the direction of the movement given by two parametric variables (cos and sin). In this situation, the direction rule (<DIR.}/F>) needs to be expressed with two values, each known as a named field. The word up may be represented by the named fields cos and sin, with the values 0 and 1 respectively.",
280
- "cite_spans": [],
281
- "ref_spans": [],
282
- "eq_spans": [],
283
- "section": "Parenthesis Precedence can be modified with parentheses.",
284
- "sec_num": null
285
- },
286
- {
287
- "text": "Another issue in JSL -which does not arise in the syntax-only JSGF -is the need to uniquely identify multiple sub-rules of the same type, when they occur in the same rule. For example, in a geometry grammar, two <POINT>s may be needed in a rule to declare a <LINE>, as in:",
288
- "cite_spans": [],
289
- "ref_spans": [],
290
- "eq_spans": [],
291
- "section": "Parenthesis Precedence can be modified with parentheses.",
292
- "sec_num": null
293
- },
294
- {
295
- "text": "public <LINE> = make a line from <POINT> to <POINT> : ...",
296
- "cite_spans": [],
297
- "ref_spans": [],
298
- "eq_spans": [],
299
- "section": "Parenthesis Precedence can be modified with parentheses.",
300
- "sec_num": null
301
- },
302
- {
303
- "text": "Uniquely numbering the sub-rules eliminates the ambiguity as to which <POINT> is which. Numbering can be used in both the RttS and the semantic portion of a rule; numbering is not allowed in the LHS of a rule. Syntactically, sub-rules are numbered with a series of single quotes3:",
304
- "cite_spans": [],
305
- "ref_spans": [],
306
- "eq_spans": [],
307
- "section": "Parenthesis Precedence can be modified with parentheses.",
308
- "sec_num": null
309
- },
310
- {
311
- "text": "public <LINE> = make a line from <POINT'> to <POINT''> : ...",
312
- "cite_spans": [],
313
- "ref_spans": [],
314
- "eq_spans": [],
315
- "section": "Parenthesis Precedence can be modified with parentheses.",
316
- "sec_num": null
317
- },
318
- {
319
- "text": "The JAVOX Scripting Language (JSL) is a standalone programming language, developed for use with the JAVOX infrastructure. JSL can be used to manipulate a running Java program and can be thought of as an application-independent macro language. The EXECUTER module interprets JSL and performs the specified actions. The specifics of JSL are not important to understanding JAVOX; for this reason, only a brief summary is presented here. JSL can read of modify the contents of an object's fields (data members) and can execute methods (member functions) on objects. Unlike Java, JSL is loosely-typed: Type checking is not done until a given method is executed. JSL has its own variables, which can hold objects from the host application; a JSL variable can store an object of any type and no casting is required. JSL supports Java's primitive types, Java's reference types (objects), and Lisp-like lists. Though JSL does support 3This representation is motivated by the grammars of (Hipp, 1992 ). Java's primitive types, they are converted into their reference-type equivalent. For example, an integer is stored as a java. lang. Integer and is converted back to an integer when needed.",
320
- "cite_spans": [
321
- {
322
- "start": 978,
323
- "end": 989,
324
- "text": "(Hipp, 1992",
325
- "ref_id": "BIBREF1"
326
- }
327
- ],
328
- "ref_spans": [],
329
- "eq_spans": [],
330
- "section": "J a v o x S c r i p t i n g L a n g u a g e ( J S L )",
331
- "sec_num": "3.2"
332
- },
333
- {
334
- "text": "JSL has the standard control flow mechanisms found in most conventional programming languages, including if-else, for and while loops. With the exception of the evaluation of their boolean expressions, these constructs follow the syntax and behavior of their Java counterparts. Java requires that if-else conditions and loop termination criteria be a boolean value. JSL conditionals are more flexible; in addition to booleans, it evaluates non-empty strings as true, empty strings as false, non-zero values as true, zero as false, non-null objects as true, and n u l l as false.",
335
- "cite_spans": [],
336
- "ref_spans": [],
337
- "eq_spans": [],
338
- "section": "J a v o x S c r i p t i n g L a n g u a g e ( J S L )",
339
- "sec_num": "3.2"
340
- },
341
- {
342
- "text": "In addition to Java's control flow mechanisms, JSL also supports f o r e a c h loops, similar to those found in Perl. These loops iterate over both JSL lists and members of java.util.List, executing the associated code block on each item. JSL lists are often constructed by recursive rules in order to handle conjunctions, as seen in Section 5.",
343
- "cite_spans": [],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "J a v o x S c r i p t i n g L a n g u a g e ( J S L )",
347
- "sec_num": "3.2"
348
- },
349
- {
350
- "text": "The JAVOX infrastructure has been designed to completely separate NLP code from the application's code. The application still can be run without JAVOX, as a typical, non-speech-enabled program -it is only speech-enabled when run with JAVOX.",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "Infrastructure",
355
- "sec_num": "4"
356
- },
357
- {
358
- "text": "From the application's perspective, JAVOX operates at the systems-level and sits between the application and the operating system (virtual machine), as shown in Figure 1 . TRANSLATOR interfaces with the speech recognizer and performs all necessary NLP. EXECUTER interfaces directly with the application and performs upcalls into the running program. Java has two key features that make it an ideal test platform for our experimental implementation: reflection and a redefineable loading scheme. Reflection provides a running program the ability to inspect itself, sometimes called introspection. Objects can determine their parent classes; every class is itself an object in Java (an instance of j ava.lang.Class). Methods, fields, constructors, and all class attributes can be obtained from a Class object. So, given an object, reflection can determine its class; given a class, reflection can find its methods and fields. JAVOX uses reflection to (1) map from the JSL-textual representation of an object to the actual instance in the running program; (2) find the appropriate j ava.lang.reflect.Methods for an object/method-name combination; and (3) actually invoke the method, once all of its arguments are known.",
359
- "cite_spans": [],
360
- "ref_spans": [
361
- {
362
- "start": 161,
363
- "end": 169,
364
- "text": "Figure 1",
365
- "ref_id": "FIGREF1"
366
- }
367
- ],
368
- "eq_spans": [],
369
- "section": "Infrastructure",
370
- "sec_num": "4"
371
- },
372
- {
373
- "text": "Reflection is very helpful in examining the application program's structure; however, prior to using reflection, EXECUTER needs access to the objects in the running program. To obtain pointers to the objects, JAVOX uses JOIE, a load-time transformation tool (Cohen et al., 1998) . JOIE allows us to modify each application class as it is loaded into the virtual machine. The JAVOX transform adds code to every constructor in the application that registers the new object with Executer. Conceptually, the following line is added to every constructor:",
374
- "cite_spans": [
375
- {
376
- "start": 258,
377
- "end": 278,
378
- "text": "(Cohen et al., 1998)",
379
- "ref_id": "BIBREF0"
380
- }
381
- ],
382
- "ref_spans": [],
383
- "eq_spans": [],
384
- "section": "Infrastructure",
385
- "sec_num": "4"
386
- },
387
- {
388
- "text": "Executer. register (this).",
389
- "cite_spans": [],
390
- "ref_spans": [],
391
- "eq_spans": [],
392
- "section": "Infrastructure",
393
- "sec_num": "4"
394
- },
395
- {
396
- "text": "This modification is done as the class is loaded, the compiled copy -on disk -is not changed. This allows the program to still be run without JhVOX, as a non-speech application. EXECUTER can -once it has the registered objects -use reflection to obtain everything else it needs to perform the actions specified by the JSL.",
397
- "cite_spans": [],
398
- "ref_spans": [],
399
- "eq_spans": [],
400
- "section": "Infrastructure",
401
- "sec_num": "4"
402
- },
403
- {
404
- "text": "Our longest running test application has been a Mr. Potato Head program; that allows users to manipulates a graphical representation of the classic children's toy. Its operations include those typically found in drawing programs, to include moving, recoloring and hiding various pieces of Mr. Potato Head. Grammar 3 shows a portion of application's grammar needed to process the utterance Move the eyes and glasses up. The result of parsing this utterance is shown in Figure 3 .",
405
- "cite_spans": [],
406
- "ref_spans": [
407
- {
408
- "start": 468,
409
- "end": 476,
410
- "text": "Figure 3",
411
- "ref_id": "FIGREF4"
412
- }
413
- ],
414
- "eq_spans": [],
415
- "section": "Example",
416
- "sec_num": "5"
417
- },
418
- {
419
- "text": "Once TRANSLATOR has processed an utterance, it forwards the resulting JSL fragment to EXECUTER. Figure 4 provides a reduced class diagram for the Mr. Potato Head application; the arrows correspond to the first iteration in the following trace. The following steps are performed as the JSL fragment from Figure 3 is interpreted:",
420
- "cite_spans": [],
421
- "ref_spans": [
422
- {
423
- "start": 96,
424
- "end": 104,
425
- "text": "Figure 4",
426
- "ref_id": null
427
- },
428
- {
429
- "start": 303,
430
- "end": 311,
431
- "text": "Figure 3",
432
- "ref_id": "FIGREF4"
433
- }
434
- ],
435
- "eq_spans": [],
436
- "section": "Example",
437
- "sec_num": "5"
438
- },
439
- {
440
- "text": "1. A new variable -local to EXECUTER -named $iter is created. Any previously-declared variable with the same name is destroyed.",
441
- "cite_spans": [],
442
- "ref_spans": [],
443
- "eq_spans": [],
444
- "section": "Example",
445
- "sec_num": "5"
446
- },
447
- {
448
- "text": "2. The foreach loop starts by initializing the loop variable to the first item in the list: Canvas.eyes0bj. This object's name consists of two parts; the steps to locate the actual instance in the application are:",
449
- "cite_spans": [],
450
- "ref_spans": [],
451
- "eq_spans": [],
452
- "section": "Example",
453
- "sec_num": "5"
454
- },
455
- {
456
- "text": "(a) The first part of the name, Canvas, is mapped to the only instance of the Canvas class in the context of this application. JAVOX has a reference to the instance because it registered with EXECUTER when it was created, thanks to a JOIE transformation.",
457
- "cite_spans": [],
458
- "ref_spans": [],
459
- "eq_spans": [],
460
- "section": "Example",
461
- "sec_num": "5"
462
- },
463
- {
464
- "text": "(b) The second part of the name, eyes0bj, is found through reflection. Every instance of Canvas has a field named eyes0bj of type BodyPaxt. This field is the eyes0bj for which we are looking.",
465
- "cite_spans": [],
466
- "ref_spans": [],
467
- "eq_spans": [],
468
- "section": "Example",
469
- "sec_num": "5"
470
- },
471
- {
472
- "text": "3. Once eyes0bj is located, the appropriate method must be found. We determinethrough reflection -that there are two methods in the BodyPart class with the name move, as seen in Figure 4 . 4. We next examine the two arguments and determine them to be both integers. Had the arguments been objects, fields, or other method calls, this entire procedure would be done recursively on each.",
473
- "cite_spans": [],
474
- "ref_spans": [
475
- {
476
- "start": 178,
477
- "end": 186,
478
- "text": "Figure 4",
479
- "ref_id": null
480
- }
481
- ],
482
- "eq_spans": [],
483
- "section": "Example",
484
- "sec_num": "5"
485
- },
486
- {
487
- "text": "5. We examine each possible method and determine that we need the one with two integer arguments, not the one taking a single Point argument.",
488
- "cite_spans": [],
489
- "ref_spans": [],
490
- "eq_spans": [],
491
- "section": "Example",
492
- "sec_num": "5"
493
- },
494
- {
495
- "text": "6. Now that we have the object, the method, and the arguments, the upcall is made and the method is executed in the application. The result is that Mr. Potato Head's eyes move up on the screen.",
496
- "cite_spans": [],
497
- "ref_spans": [],
498
- "eq_spans": [],
499
- "section": "Example",
500
- "sec_num": "5"
501
- },
502
- {
503
- "text": "7. This process is repeated for glass0bj and the loop terminates.",
504
- "cite_spans": [],
505
- "ref_spans": [],
506
- "eq_spans": [],
507
- "section": "Example",
508
- "sec_num": "5"
509
- },
510
- {
511
- "text": "After this process, both the eyes and glasses have moved up 20 units and Executer waits for additional input. The application continues to accept mouse and keyboard commands, just as it would without speech. ",
512
- "cite_spans": [],
513
- "ref_spans": [],
514
- "eq_spans": [],
515
- "section": "Example",
516
- "sec_num": "5"
517
- },
518
- {
519
- "text": "In practice, building a JAvox-based, speech interface -for limited-functionality applications -is straightforward and reasonably quick. To date, we have used three diverse applications as our test platforms. Speech-enabling the last of these, an image manipulation program, took little more than one person-day. Though these applications have been small; we are beginning to explore JAvOX's scalability to larger applications. We are also developing a library of JAVOX grammars for use with the standard Java classes. This resource will shorten development times even more; especially compared to building a SLS from the ground up. One of the existing challenges is to work with applications consisting entirely of dynamic objects, those that cannot be identified at load time. Some typical dynamic-object applications are drawing programs or presentation software; in both cases, the user creates the interesting objects during runtime. We have implemented a system in JSL which allows objects to be filtered based on an attribute, such as color in the utterance: Move the blue square.",
520
- "cite_spans": [],
521
- "ref_spans": [],
522
- "eq_spans": [],
523
- "section": "D i s c u s s i o n and Future Work",
524
- "sec_num": "6"
525
- },
526
- {
527
- "text": "In situations where there is a one-to-one correlation between a lexical item in the grammar and an object in the program, it is often the case that the lexical item is very similar to the element's identifier. It is quite often the same word or a direct synonym. Since JAVOX is primarily performing upcalls based on existing functions within the program, it also can be predicted what type of objects will cooccur in utterances. In the Mr. cation, we can assume that objects representing a Point or integers will occur when the user speaks of moving a BodyPart. We are developing a system to exploit these characteristics to automatically generate JAVOX grammars from an application's compiled code. The automatically-generated grammars are intended to serve as a starting point for developers -though they may certainly require some hand crafting. Our current, grammar-generation tool assumes the program is written with Java's standard naming conventions. It is imaginable that additional data sources -such as a sample corpus -will allow us to more accurately generate grammars for an application. Though in its infancy, we believe this approach holds vast potential for SLS development.",
528
- "cite_spans": [],
529
- "ref_spans": [],
530
- "eq_spans": [],
531
- "section": "D i s c u s s i o n and Future Work",
532
- "sec_num": "6"
533
- },
534
- {
535
- "text": "JAVOX provides a fast and flexible method to add a speech-interface to existing Java applications. The application program requires no source-code modification: The JAVOX infrastructure provides all NLP capabilities. We have implemented a grammar and scripting system that is straightforward enough that inexperienced developers and those unfamiliar with NLP can learn it quickly. We have demonstrated the technology on several programs and are commencing work on more ambitious applications. The current implementation of JAVOX is available for download at:",
536
- "cite_spans": [],
537
- "ref_spans": [],
538
- "eq_spans": [],
539
- "section": "Conclusion",
540
- "sec_num": "7"
541
- }
542
- ],
543
- "back_matter": [
544
- {
545
- "text": "This work has been partially supported by the Defense Advanced Research Projects Agency under contract F30602-99-C-0060.",
546
- "cite_spans": [],
547
- "ref_spans": [],
548
- "eq_spans": [],
549
- "section": "Acknowledgments",
550
- "sec_num": null
551
- }
552
- ],
553
- "bib_entries": {
554
- "BIBREF0": {
555
- "ref_id": "b0",
556
- "title": "Automatic program transformation with JOIE",
557
- "authors": [
558
- {
559
- "first": "Geoff",
560
- "middle": [
561
- "A"
562
- ],
563
- "last": "Cohen",
564
- "suffix": ""
565
- },
566
- {
567
- "first": "Jeffrey",
568
- "middle": [
569
- "S"
570
- ],
571
- "last": "Chase",
572
- "suffix": ""
573
- },
574
- {
575
- "first": "David",
576
- "middle": [
577
- "L"
578
- ],
579
- "last": "Kaminsky",
580
- "suffix": ""
581
- }
582
- ],
583
- "year": 1998,
584
- "venue": "USENIX Annual Technical Conference (N098)",
585
- "volume": "",
586
- "issue": "",
587
- "pages": "",
588
- "other_ids": {},
589
- "num": null,
590
- "urls": [],
591
- "raw_text": "Geoff A. Cohen, Jeffrey S. Chase, and David L. Kaminsky. 1998. Automatic program transforma- tion with JOIE. In USENIX Annual Technical Conference (N098), New Orleans, LA.",
592
- "links": null
593
- },
594
- "BIBREF1": {
595
- "ref_id": "b1",
596
- "title": "A New Technique for Parsing Ill-formed Spoken Natural-language Dialog",
597
- "authors": [
598
- {
599
- "first": "D",
600
- "middle": [],
601
- "last": "",
602
- "suffix": ""
603
- },
604
- {
605
- "first": "Richard",
606
- "middle": [],
607
- "last": "Hipp",
608
- "suffix": ""
609
- }
610
- ],
611
- "year": 1992,
612
- "venue": "",
613
- "volume": "",
614
- "issue": "",
615
- "pages": "",
616
- "other_ids": {},
617
- "num": null,
618
- "urls": [],
619
- "raw_text": "D. Richard Hipp. 1992. A New Technique for Pars- ing Ill-formed Spoken Natural-language Dialog. Ph.D. thesis, Duke University.",
620
- "links": null
621
- },
622
- "BIBREF2": {
623
- "ref_id": "b2",
624
- "title": "Automatic dialogue generator creates user defined applications",
625
- "authors": [
626
- {
627
- "first": "Andrew",
628
- "middle": [],
629
- "last": "Pargellis",
630
- "suffix": ""
631
- },
632
- {
633
- "first": "Jeffkuo",
634
- "middle": [],
635
- "last": "",
636
- "suffix": ""
637
- },
638
- {
639
- "first": "Chin-Hui",
640
- "middle": [],
641
- "last": "Lee",
642
- "suffix": ""
643
- }
644
- ],
645
- "year": 1999,
646
- "venue": "6th European Conference on Speech Communication and Technology",
647
- "volume": "3",
648
- "issue": "",
649
- "pages": "1175--1178",
650
- "other_ids": {},
651
- "num": null,
652
- "urls": [],
653
- "raw_text": "Andrew Pargellis, JeffKuo, and Chin-Hui Lee. 1999. Automatic dialogue generator creates user de- fined applications. In 6th European Conference on Speech Communication and Technology, volume 3, pages 1175--1178, Budapest, Hungary.",
654
- "links": null
655
- },
656
- "BIBREF3": {
657
- "ref_id": "b3",
658
- "title": "Natural language access to software applications",
659
- "authors": [
660
- {
661
- "first": "Paul",
662
- "middle": [],
663
- "last": "Schmidt",
664
- "suffix": ""
665
- },
666
- {
667
- "first": "Sibylle",
668
- "middle": [],
669
- "last": "Rieder",
670
- "suffix": ""
671
- },
672
- {
673
- "first": "Axel",
674
- "middle": [],
675
- "last": "Theofilidis",
676
- "suffix": ""
677
- },
678
- {
679
- "first": "Marius",
680
- "middle": [],
681
- "last": "Groenendijk",
682
- "suffix": ""
683
- },
684
- {
685
- "first": "Peter",
686
- "middle": [],
687
- "last": "Phelan",
688
- "suffix": ""
689
- },
690
- {
691
- "first": "Henrik",
692
- "middle": [],
693
- "last": "Schulz",
694
- "suffix": ""
695
- },
696
- {
697
- "first": "Thierry",
698
- "middle": [],
699
- "last": "Declerck",
700
- "suffix": ""
701
- },
702
- {
703
- "first": "Andrew",
704
- "middle": [],
705
- "last": "Brenenkamp",
706
- "suffix": ""
707
- }
708
- ],
709
- "year": 1998,
710
- "venue": "Proceedings of COLING-ACL-98",
711
- "volume": "",
712
- "issue": "",
713
- "pages": "1193--1197",
714
- "other_ids": {},
715
- "num": null,
716
- "urls": [],
717
- "raw_text": "Paul Schmidt, Sibylle Rieder, Axel Theofilidis, Mar- ius Groenendijk, Peter Phelan, Henrik Schulz, Thierry Declerck, and Andrew Brenenkamp. 1998. Natural language access to software applica- tions. In Proceedings of COLING-ACL-98, pages 1193-1197, Montreal, Quebec.",
718
- "links": null
719
- },
720
- "BIBREF4": {
721
- "ref_id": "b4",
722
- "title": "Java speech API specification 1.0",
723
- "authors": [
724
- {
725
- "first": "",
726
- "middle": [],
727
- "last": "Sun Microsystems",
728
- "suffix": ""
729
- },
730
- {
731
- "first": "",
732
- "middle": [],
733
- "last": "Inc",
734
- "suffix": ""
735
- }
736
- ],
737
- "year": 1998,
738
- "venue": "",
739
- "volume": "",
740
- "issue": "",
741
- "pages": "",
742
- "other_ids": {},
743
- "num": null,
744
- "urls": [],
745
- "raw_text": "Sun Microsystems, Inc. 1998. Java speech API spec- ification 1.0.",
746
- "links": null
747
- },
748
- "BIBREF5": {
749
- "ref_id": "b5",
750
- "title": "Building 10,000 spoken-dialogue systems",
751
- "authors": [
752
- {
753
- "first": "Stephen",
754
- "middle": [],
755
- "last": "Sutton",
756
- "suffix": ""
757
- },
758
- {
759
- "first": "David",
760
- "middle": [
761
- "G"
762
- ],
763
- "last": "Novick",
764
- "suffix": ""
765
- },
766
- {
767
- "first": "Ronald",
768
- "middle": [
769
- "A"
770
- ],
771
- "last": "Cole",
772
- "suffix": ""
773
- },
774
- {
775
- "first": "Pieter",
776
- "middle": [],
777
- "last": "Vermeulen",
778
- "suffix": ""
779
- },
780
- {
781
- "first": "Jacques",
782
- "middle": [],
783
- "last": "De Villiers",
784
- "suffix": ""
785
- },
786
- {
787
- "first": "Johan",
788
- "middle": [],
789
- "last": "Schalkwyk",
790
- "suffix": ""
791
- },
792
- {
793
- "first": "Mark",
794
- "middle": [],
795
- "last": "Fanty",
796
- "suffix": ""
797
- }
798
- ],
799
- "year": 1996,
800
- "venue": "Proceedings of the International Conference on Spoken Language Processing (ICSLP)",
801
- "volume": "",
802
- "issue": "",
803
- "pages": "709--712",
804
- "other_ids": {},
805
- "num": null,
806
- "urls": [],
807
- "raw_text": "Stephen Sutton, David G. Novick, Ronald A. Cole, Pieter Vermeulen, Jacques de Villiers, Johan Schalkwyk, and Mark Fanty. 1996. Building 10,000 spoken-dialogue systems. In Proceedings of the International Conference on Spoken Language Processing (ICSLP), pages 709--712, Philadel- phia, PA.",
808
- "links": null
809
- }
810
- },
811
- "ref_entries": {
812
- "FIGREF1": {
813
- "uris": null,
814
- "text": "Schematic of the JAVOX architecture.",
815
- "num": null,
816
- "type_str": "figure"
817
- },
818
- "FIGREF2": {
819
- "uris": null,
820
- "text": "A JAVOX grammar fragment for the Mr. Potato Head domain.",
821
- "num": null,
822
- "type_str": "figure"
823
- },
824
- "FIGREF3": {
825
- "uris": null,
826
- "text": "The JAVOX translation process -NL to JSL -for Move the cap up.",
827
- "num": null,
828
- "type_str": "figure"
829
- },
830
- "FIGREF4": {
831
- "uris": null,
832
- "text": "public <modPOS> = move <PARTS> <DIR> : {: dim Slier; foreach $iter (<PARTS>) $iter.move(<DIR:X>,<DIR:Y>); :}; public <PARTS> = [<ART>] <PART> : {: [<PART>] :}; public <PARTS> = <PARTS> [<CONJ>] [<ART>] <PART> : {:public <DIR> = up : X {: 0 :} : Y {: -20 :}; The translation process for the utterance Move the eyes and g/asses up.",
833
- "num": null,
834
- "type_str": "figure"
835
- },
836
- "FIGREF5": {
837
- "uris": null,
838
- "text": "Potato Head applio f-/,pp f~A-d o-, ......................................................... A simplified class diagram for the Mr. Potato Head application.",
839
- "num": null,
840
- "type_str": "figure"
841
- }
842
- }
843
- }
844
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1016.json DELETED
@@ -1,1336 +0,0 @@
1
- {
2
- "paper_id": "A00-1016",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:48.192924Z"
6
- },
7
- "title": "A Compact Architecture for Dialogue Management Based on Scripts ond Meta-Outputs",
8
- "authors": [
9
- {
10
- "first": "Manny",
11
- "middle": [],
12
- "last": "Rayner",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": ""
16
- },
17
- {
18
- "first": "Beth",
19
- "middle": [
20
- "Ann"
21
- ],
22
- "last": "Hockey",
23
- "suffix": "",
24
- "affiliation": {},
25
- "email": ""
26
- },
27
- {
28
- "first": "Frankie",
29
- "middle": [],
30
- "last": "J~mes",
31
- "suffix": "",
32
- "affiliation": {},
33
- "email": ""
34
- }
35
- ],
36
- "year": "",
37
- "venue": null,
38
- "identifiers": {},
39
- "abstract": "We describe an architecture for spoken dialogue interfaces to semi-autonomous systems that transforms speech signals through successive representations of linguistic, dialogue, and domain knowledge. Each step produces an output, and a meta-output describing the transformation, with an executable program in a simple scripting language as the final result. The output/meta-output distinction permits perspicuous treatment of diverse tasks such as resolving pronouns, correcting user misconceptions, and optimizing scripts.",
40
- "pdf_parse": {
41
- "paper_id": "A00-1016",
42
- "_pdf_hash": "",
43
- "abstract": [
44
- {
45
- "text": "We describe an architecture for spoken dialogue interfaces to semi-autonomous systems that transforms speech signals through successive representations of linguistic, dialogue, and domain knowledge. Each step produces an output, and a meta-output describing the transformation, with an executable program in a simple scripting language as the final result. The output/meta-output distinction permits perspicuous treatment of diverse tasks such as resolving pronouns, correcting user misconceptions, and optimizing scripts.",
46
- "cite_spans": [],
47
- "ref_spans": [],
48
- "eq_spans": [],
49
- "section": "Abstract",
50
- "sec_num": null
51
- }
52
- ],
53
- "body_text": [
54
- {
55
- "text": "The basic task we consider in this paper is that of using spoken language to give commands to a semiautonomous robot or other similar system. As evidence of the importance of this task in the NLP community note that the early, influential system SHRDLU (Winograd, 1973) was intended to address just this type of problem. More recent work on spoken language interfaces to semi-antonomous robots include SRrs Flakey robot (Konolige et al., 1993) and NCARArs InterBOT project (Perzanowski et al., 1998; Perzanowski et al., 1999) . A number of other systems have addressed part of the task. Com-mandTalk (Moore et al., 1997) , Circuit Fix-It Shop (Smith, 1997) and (Traum and Allen, 1994; Tranm and Andersen, 1999) are spoken language systems but they interface to simulation or help facilities rather than semi-autonomous agents. Jack's MOOse Lodge (Badler et al., 1999 ) takes text rather than speech as natural language input and the avatars being controlled are not semi-autonomous. Other researchers have considered particular aspects of the problem such as accounting for various aspects of actions (Webber, 1995; Pyre et al., 1995) . In most of this and other related work the treatment is some variant of the following. If there is a speech interface, the input speech signal is converted into text. Text either from the recognizer or directly input by the user is then converted into some kind of logical formula, which abstractly represents the user's intended command; this formula is then fed into a command interpreter, which executes the command.",
56
- "cite_spans": [
57
- {
58
- "start": 253,
59
- "end": 269,
60
- "text": "(Winograd, 1973)",
61
- "ref_id": "BIBREF16"
62
- },
63
- {
64
- "start": 420,
65
- "end": 443,
66
- "text": "(Konolige et al., 1993)",
67
- "ref_id": "BIBREF2"
68
- },
69
- {
70
- "start": 473,
71
- "end": 499,
72
- "text": "(Perzanowski et al., 1998;",
73
- "ref_id": "BIBREF6"
74
- },
75
- {
76
- "start": 500,
77
- "end": 525,
78
- "text": "Perzanowski et al., 1999)",
79
- "ref_id": "BIBREF7"
80
- },
81
- {
82
- "start": 600,
83
- "end": 620,
84
- "text": "(Moore et al., 1997)",
85
- "ref_id": "BIBREF4"
86
- },
87
- {
88
- "start": 643,
89
- "end": 656,
90
- "text": "(Smith, 1997)",
91
- "ref_id": "BIBREF10"
92
- },
93
- {
94
- "start": 661,
95
- "end": 684,
96
- "text": "(Traum and Allen, 1994;",
97
- "ref_id": null
98
- },
99
- {
100
- "start": 685,
101
- "end": 710,
102
- "text": "Tranm and Andersen, 1999)",
103
- "ref_id": null
104
- },
105
- {
106
- "start": 846,
107
- "end": 866,
108
- "text": "(Badler et al., 1999",
109
- "ref_id": "BIBREF0"
110
- },
111
- {
112
- "start": 1101,
113
- "end": 1115,
114
- "text": "(Webber, 1995;",
115
- "ref_id": "BIBREF15"
116
- },
117
- {
118
- "start": 1116,
119
- "end": 1134,
120
- "text": "Pyre et al., 1995)",
121
- "ref_id": "BIBREF9"
122
- }
123
- ],
124
- "ref_spans": [],
125
- "eq_spans": [],
126
- "section": "Introduction",
127
- "sec_num": "1"
128
- },
129
- {
130
- "text": "We do not think the standard treatment outlined above is in essence incorrect, but we do believe that, as it stands, it is in need of some modification. This paper will in particular make three points. First, we suggest that the output representation should not be regarded as a logical expression, but rather as a program in some kind of scripting language. Second, we argue that it is not merely the case that the process of converting the input signal to the final representation can sometimes go wrong; rather, this is the normal course of events, and the interpretation process should be organized with that assumption in mind. Third, we claim, perhaps surprisingly, that the first and second points are related. These claims are elaborated in Section 2.",
131
- "cite_spans": [],
132
- "ref_spans": [],
133
- "eq_spans": [],
134
- "section": "Introduction",
135
- "sec_num": "1"
136
- },
137
- {
138
- "text": "The remainder of the paper describes an architecture which addresses the issues outlined above, and which has been used to implement a prototype speech interface to a simulated semi-autonomous robot intended for deployment on the International Space Station. Sections 3 and 4 present an overview of the implemented interface, focussing on representational issues relevant to dialogue management. Illustrative examples of interactions with the system are provided in Section 5. Section 6 concludes.",
139
- "cite_spans": [],
140
- "ref_spans": [],
141
- "eq_spans": [],
142
- "section": "Introduction",
143
- "sec_num": "1"
144
- },
145
- {
146
- "text": "Theoretical Ideas",
147
- "cite_spans": [],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "2",
151
- "sec_num": null
152
- },
153
- {
154
- "text": "Let's first look in a little more detail at the question of what the output representation of a spoken language interface to a semi-autonomous robot/agent should be. In practice, there seem to be two main choices: atheoreticai representations, or some kind of logic. Logic is indeed an excellent way to think about representing static relationships like database queries, but it is much less clear that it is a good way to represent commands. In real life, when people wish to give a command to a computer, they usually do so via its operating system; a complex command is an expression in a scripting language like CSHELL, Perl, or VBScript. These languages are related to logical formalisms, but cannot be mapped onto them in a simple way. Here are some of the obvious differences:",
155
- "cite_spans": [],
156
- "ref_spans": [],
157
- "eq_spans": [],
158
- "section": "Scripts vs Logical Forms",
159
- "sec_num": "2.1"
160
- },
161
- {
162
- "text": "\u2022 A scripting language is essentially imperative, rather than relational.",
163
- "cite_spans": [],
164
- "ref_spans": [],
165
- "eq_spans": [],
166
- "section": "Scripts vs Logical Forms",
167
- "sec_num": "2.1"
168
- },
169
- {
170
- "text": "\u2022 The notion of temporal sequence is fundamental to the language. \"Do P and then Q\" is not the same as \"Make the goals P and Q true\"; it is explicitly stated that P is to be done first. Similarly, \"For each X in the list (A B C), do P(X)\" is not the same as \"For all X, make P(X) true\"; once again, the scripting language defines an or: der, but not the logical language 1.",
171
- "cite_spans": [],
172
- "ref_spans": [],
173
- "eq_spans": [],
174
- "section": "Scripts vs Logical Forms",
175
- "sec_num": "2.1"
176
- },
177
- {
178
- "text": "\u2022 Scripting languages assume that commands do not always succeed. For example, UNIX-based scripting languages like CSHELL provide each script with the three predefined streams stdin, stdout and sl;derr. Input is read from stdin and written to sCdout; error messages, warnings and other comments are sent to stderr.",
179
- "cite_spans": [],
180
- "ref_spans": [],
181
- "eq_spans": [],
182
- "section": "Scripts vs Logical Forms",
183
- "sec_num": "2.1"
184
- },
185
- {
186
- "text": "We do not think that these properties of scripting language are accidental. They have evolved as the result of strong selectional pressure from real users with real-world tasks that need to be carried out, and represent a competitive way to meet said users' needs. We consequently think it is worth taking seriously the idea that a target representation produced by a spoken language interface should share many of these properties.",
187
- "cite_spans": [],
188
- "ref_spans": [],
189
- "eq_spans": [],
190
- "section": "Scripts vs Logical Forms",
191
- "sec_num": "2.1"
192
- },
193
- {
194
- "text": "We now move on to the question of modelling the interpretation process, that is to say the process that converts the input (speech) signal to the output (executable) representation. As already indicated, we think it is important to realize that interpretation is a process which, like any other process, may succeed more or less well in achieving its intended goals. Users may express themselves unclearly or incompletely, or the system may more or less seriously fail to understand exactly what they mean. A good interpretation architecture will keep these considerations in mind.",
195
- "cite_spans": [],
196
- "ref_spans": [],
197
- "eq_spans": [],
198
- "section": "Meta-outputs",
199
- "sec_num": null
200
- },
201
- {
202
- "text": "Taking our lead from the description of scripting languages sketched above, we adapt the notion of the \"error stream\" to the interpretation process. In the course of interpreting an utterance, the system translates it into successively \"deeper\" levels of representation. Each translation step has not only an input (the representation consumed) and an output 1In cases like these, the theorem prover or logic programruing interpreter used to evaluate the logical formula typically assigns a conventional order to the conjuncts; note however that this is part of the procedural semantics of the theorem prover/interpreter, and does not follow from the declarative semantics of the logical formalism.",
203
- "cite_spans": [],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "Meta-outputs",
207
- "sec_num": null
208
- },
209
- {
210
- "text": "(the representation produced), but also something we will refer to as a \"meta-output\": this provides information about how the translation was performed.",
211
- "cite_spans": [],
212
- "ref_spans": [],
213
- "eq_spans": [],
214
- "section": "Meta-outputs",
215
- "sec_num": null
216
- },
217
- {
218
- "text": "At a high level of abstraction, our architecture will be as follows. Interpretation proceeds as a series of non-deterministic translation steps, each producing a set of possible outputs and associated metaoutputs. The final translation step produces an executable script. The interface attempts to simulate execution of each possible script produced, in order to determine what would happen if that script were selected; simulated execution can itself produce further meta-outputs. Finally, the system uses the meta-output information to decide what to do with the various possible interpretations it has produced. Possible actions include selection and execution of an output script, paraphrasing meta-output information back to the user, or some combination of the two.",
219
- "cite_spans": [],
220
- "ref_spans": [],
221
- "eq_spans": [],
222
- "section": "Meta-outputs",
223
- "sec_num": null
224
- },
225
- {
226
- "text": "In the following section, we present a more detailed description showing how the output/metaoutput distinction works in a practical system.",
227
- "cite_spans": [],
228
- "ref_spans": [],
229
- "eq_spans": [],
230
- "section": "Meta-outputs",
231
- "sec_num": null
232
- },
233
- {
234
- "text": "A Prototype Implementation",
235
- "cite_spans": [],
236
- "ref_spans": [],
237
- "eq_spans": [],
238
- "section": "3",
239
- "sec_num": null
240
- },
241
- {
242
- "text": "The ideas sketched out above have been realized as a prototype spoken language dialogue interface to a simulated version of the Personal Satellite Assistant (PSA; (PSA, 2000) ). This section gives an overview of the implementation; in the following section, we focus on the specific aspects of dialogue management which are facilitated by the output/meta-output architecture.",
243
- "cite_spans": [
244
- {
245
- "start": 163,
246
- "end": 174,
247
- "text": "(PSA, 2000)",
248
- "ref_id": null
249
- }
250
- ],
251
- "ref_spans": [],
252
- "eq_spans": [],
253
- "section": "3",
254
- "sec_num": null
255
- },
256
- {
257
- "text": "The real PSA is a miniature robot currently being developed at NASA Ames Research Center, which is intended for deployment on the Space Shuttle and/or International Space Station. It will be capable of free navigation in an indoor micro-gravity environment, and will provide mobile sensory capacity as a backup to a network of fixed sensors. The PSA will primarily be controlled by voice commands through a hand-held or head-mounted microphone, with speech and language processing being handled by an offboard processor. Since the speech processing units are not in fact physically connected to the PSA we envisage that they could also be used to control or monitor other environmental functions. In particular, our simulation allows voice access to the current and past values of the fixed sensor readings. The initial PSA speech interface demo consists of a simple simulation of the Shuttle. State parameters include the PSA's current position, some environmental variables such as local temperature, pressure and carbon dioxide levels, and the status of the Shuttle's doors (open/closed). A visual display gives direct feedback on some of these parameters.",
258
- "cite_spans": [],
259
- "ref_spans": [],
260
- "eq_spans": [],
261
- "section": "Levels of Representation",
262
- "sec_num": "3.1"
263
- },
264
- {
265
- "text": "The speech and language processing architecture is based on that of the SRI CommandTalk system (Moore et al., 1997; Stent et al., 1999) . The system comprises a suite of about 20 agents, connected together using the SPd Open Agent Architecture (OAA; (Martin et al., 1998) ). Speech recognition is performed using a version of the Nuance recognizer (Nuance, 2000) . Initial language processing is carried out using the SRI Gemini system (Dowding et al., 1993) , using a domain~independent unification grammar and a domain-specific lexicon. The language processing grammar is compiled into a recognition grarnm~kr using the methods of (Moore et al., 1997) ; the net result is that only grammatically wellformed utterances can be recognized. Output from the initial language-processing step is represented in a version of Quasi Logical Form (van Eijck and Moore, 1992) , and passed in that form to the dialogue manager. We refer to these as linguistic level representations.",
266
- "cite_spans": [
267
- {
268
- "start": 95,
269
- "end": 115,
270
- "text": "(Moore et al., 1997;",
271
- "ref_id": "BIBREF4"
272
- },
273
- {
274
- "start": 116,
275
- "end": 135,
276
- "text": "Stent et al., 1999)",
277
- "ref_id": "BIBREF11"
278
- },
279
- {
280
- "start": 250,
281
- "end": 271,
282
- "text": "(Martin et al., 1998)",
283
- "ref_id": "BIBREF3"
284
- },
285
- {
286
- "start": 348,
287
- "end": 362,
288
- "text": "(Nuance, 2000)",
289
- "ref_id": "BIBREF5"
290
- },
291
- {
292
- "start": 436,
293
- "end": 458,
294
- "text": "(Dowding et al., 1993)",
295
- "ref_id": "BIBREF1"
296
- },
297
- {
298
- "start": 633,
299
- "end": 653,
300
- "text": "(Moore et al., 1997)",
301
- "ref_id": "BIBREF4"
302
- },
303
- {
304
- "start": 853,
305
- "end": 865,
306
- "text": "Moore, 1992)",
307
- "ref_id": "BIBREF14"
308
- }
309
- ],
310
- "ref_spans": [],
311
- "eq_spans": [],
312
- "section": "Levels of Representation",
313
- "sec_num": "3.1"
314
- },
315
- {
316
- "text": "The aspects of the system which are of primary interest here concern the dialogue manager (DM) and related modules. Once a linguistic level representation has been produced, the following processing steps occur:",
317
- "cite_spans": [],
318
- "ref_spans": [],
319
- "eq_spans": [],
320
- "section": "Levels of Representation",
321
- "sec_num": "3.1"
322
- },
323
- {
324
- "text": "\u2022 The linguistic level representation is converted into a discourse level representation. This primarily involves regularizing differences in surface form: so, for example, \"measure the pressure\" and '~hat is the pressure?\" have different representations at the linguistic level, but the same representation at the discourse level.",
325
- "cite_spans": [],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "Levels of Representation",
329
- "sec_num": "3.1"
330
- },
331
- {
332
- "text": "\u2022 If necessary, the system attempts to resolve instances of ellipsis and anaph*oric reference. For example, if the previous command was \"measure temperature at flight deck\", then the new command \"lower deck\" will be resolved to an expression meaning \"measure temperature at lower deck\". Similarly, if the previous command was \"move to the crew hatch\", then the command \"open it\" will be resolved to \"open the crew hatch\". We call the output of this step a resolved discourse level representation.",
333
- "cite_spans": [],
334
- "ref_spans": [],
335
- "eq_spans": [],
336
- "section": "Levels of Representation",
337
- "sec_num": "3.1"
338
- },
339
- {
340
- "text": "\u2022 The resolved discourse level representation is converted into an executable script in a language essentially equivalent to a subset of CSHELL. This involves two sub-steps. First, quantified variables are given scope: for example, \"go to the flight deck and lower deck and measure pressure\" becomes something approximately equivalent to the script foreach x (flight_deck lower_deck) go_to $x measure pressure end The point to note here is that the foreach has scope over both the go_to and the meeusmre actions; an alternate (incorrect) scoping would be foreachx (flight_deck lower_deck) go_to $x end measure pressure",
341
- "cite_spans": [],
342
- "ref_spans": [],
343
- "eq_spans": [],
344
- "section": "Levels of Representation",
345
- "sec_num": "3.1"
346
- },
347
- {
348
- "text": "The second sub-step is to attempt to optimize the plan. In the current example, this can be done by reordering the list (flight.deck louer_deck). For instance, if the PSA is already at the lower deck, reversing the list will mean that the robot only makes one trip, instead of two.",
349
- "cite_spans": [],
350
- "ref_spans": [],
351
- "eq_spans": [],
352
- "section": "Levels of Representation",
353
- "sec_num": "3.1"
354
- },
355
- {
356
- "text": "The final step in the interpretation process is plan evaluation: the system tries to work out what will happen if it actually executes the plan. (The relationship between plan evaluation and plan execution is described in more detail in Section 4.1). Among other things, this gives the dialogue manager the possibility of comparing different interpretations of the original command, and picking the one which is most efficient.",
357
- "cite_spans": [],
358
- "ref_spans": [],
359
- "eq_spans": [],
360
- "section": "Levels of Representation",
361
- "sec_num": "3.1"
362
- },
363
- {
364
- "text": "Tr---qlation , The above sketch shows how context-dependent interpretation is arranged as a series of nondeterministic translation steps; in each case, we have described the input and the output for the step in question. We now go back to the concerns of Section 2. First, note that each translation step is in general fallible. We give several examples:",
365
- "cite_spans": [],
366
- "ref_spans": [],
367
- "eq_spans": [],
368
- "section": "How Meta-outputs Participate in the",
369
- "sec_num": "3.2"
370
- },
371
- {
372
- "text": "One of the most obvious cases arises when the user simply issues an invalid command, such as requesting the PSA to open a door D which is already open. Here, one of the meta-outputs issued by the plan evaluation step will be the term presupposition_failure(already_open(D)); the DM can decide to paraphrase this back to the user as a surface string of the form \"D is already open\". Note that plan evaluation does not involve actually executing the final script, which can be important. For instance, if the command is \"go to the crew hatch and open it\" and the crew hatch is already open, the interface has the option of informing the user that there is a problem without first carrying out the \"go to\" action.",
373
- "cite_spans": [],
374
- "ref_spans": [],
375
- "eq_spans": [],
376
- "section": "How Meta-outputs Participate in the",
377
- "sec_num": "3.2"
378
- },
379
- {
380
- "text": "The resolution step can give rise to similar kinds of metaooutput. For example, a command may include a referring expression that has no denotation, or an ambiguous denotation; for example, the user might say \"both decks\", presumably being unaware that there are in fact three of them. This time, the meta-output produced is presupposition_failure ( incorrect_size_of_set (2,3) )",
381
- "cite_spans": [],
382
- "ref_spans": [
383
- {
384
- "start": 348,
385
- "end": 378,
386
- "text": "( incorrect_size_of_set (2,3)",
387
- "ref_id": null
388
- }
389
- ],
390
- "eq_spans": [],
391
- "section": "How Meta-outputs Participate in the",
392
- "sec_num": "3.2"
393
- },
394
- {
395
- "text": "representing the user's incorrect belief about the number of decks. The DM then has the possibility of informingthe user of this misconcelfi tion by realizing the meta-output term as the surface string \"in fact there are three of them\". Ambiguous denotation occurs when a description is under-specified. For instance, the user might say \"the deck\" in a situation where there is no clearly salient deck, either in the discourse situation or in the simulated world: here, the meta-output will be presupposition_failure ( underspecif ied_def inite (deck))",
396
- "cite_spans": [],
397
- "ref_spans": [],
398
- "eq_spans": [],
399
- "section": "How Meta-outputs Participate in the",
400
- "sec_num": "3.2"
401
- },
402
- {
403
- "text": "which can be realized as the clarification question \"which deck do you mean?\"",
404
- "cite_spans": [],
405
- "ref_spans": [],
406
- "eq_spans": [],
407
- "section": "How Meta-outputs Participate in the",
408
- "sec_num": "3.2"
409
- },
410
- {
411
- "text": "\u2022 A slightly more complex case involves plan costs. During plan evaluation, the system simulates execution of the output script while keeping track of execution cost. (Currently, the cost is just an estimate of the time required to execute the script). Execution costs are treated as meta-outputs of the form",
412
- "cite_spans": [],
413
- "ref_spans": [],
414
- "eq_spans": [],
415
- "section": "How Meta-outputs Participate in the",
416
- "sec_num": "3.2"
417
- },
418
- {
419
- "text": "cost (C)",
420
- "cite_spans": [],
421
- "ref_spans": [],
422
- "eq_spans": [],
423
- "section": "How Meta-outputs Participate in the",
424
- "sec_num": "3.2"
425
- },
426
- {
427
- "text": "and passed back through the interpreter so that the plan optimization step can make use of them. \u2022 Finally, we consider what happens when the system receives incorrect input from the speech recognizer. Although the recognizer's language model is constrained so that it can only produce grammatical utterances, it can still misrecognize one grammatical string as another one. Many of these cases fall into one of a small number of syntactic patterns, which function as fairly reliable indicators of bad recognition. A typical example is conjunction involving a pronoun: if the system hears \"it and flight deck\", this is most likely a misrecognition of something like \"go to flight deck\". During the processing phase which translates linguistic level representations into discourse level representations, the system attempts to match each misrecognition pattern against the input linguistic form, and if successful produces a meta-output of the form presupposition_failure ( dubious_If (<Type>))",
428
- "cite_spans": [],
429
- "ref_spans": [],
430
- "eq_spans": [],
431
- "section": "How Meta-outputs Participate in the",
432
- "sec_num": "3.2"
433
- },
434
- {
435
- "text": "These meta-outputs are passed down to the DM, which in the absence of sufficiently compelling contrary evidence will normally issue a response of the form \"I'm sorry, I think I misheard you\".",
436
- "cite_spans": [],
437
- "ref_spans": [],
438
- "eq_spans": [],
439
- "section": "How Meta-outputs Participate in the",
440
- "sec_num": "3.2"
441
- },
442
- {
443
- "text": "Based on Scripts and Meta-Outputs None of the individual functionalities outlined above are particularly novel in themselves. What we find new and interesting is the fact that they can all be expressed in a uniform way in terms of the script output/meta-output architecture. This section presents three examples illustrating how the architecture can be used to simplify the overall organization of the system.",
444
- "cite_spans": [],
445
- "ref_spans": [],
446
- "eq_spans": [],
447
- "section": "A Compact Architecture for Dialogue Management",
448
- "sec_num": "4"
449
- },
450
- {
451
- "text": "Recall that the DM simulates evaluation of the plan before running it, in order to obtain relevant metainformation. At plan execution time, plan actions result in changes to the world; at plan evaluation time, they result in simulated changes to the world and/or produce meta-outputs.",
452
- "cite_spans": [],
453
- "ref_spans": [],
454
- "eq_spans": [],
455
- "section": "Integration of plan evaluation, plan execution and dialogue management",
456
- "sec_num": "4.1"
457
- },
458
- {
459
- "text": "Conceptualizing plans as scripts rather than logicai formulas permits an elegant treatment of the execution/evaluation dichotomy. There is one script interpreter, which functions both as a script executive and a script evaluator, and one set of rules which defines the procedural semantics of script actions. Rules are parameterized by execution type which is either \"execute\" or \"evaluate\". In \"evaluate\" mode, primitive actions modify a state vector which is threaded through the interpreter; in \"execute\" mode, they result in commands being sent to (real or simulated) effector agents. Conversely, \"meta-information\" actions, such as presupposition failures, result in output being sent to the metaoutput stream in \"evaluate\" mode, and in a null action in \"execute\" mode. The upshot is that a simple semantics can be assigned to rules like the following one, which defines the action of attempting to open a door which may already be open: if_then_else (status (D, open_closed, open) , change_status (D, open_closed, open) ",
460
- "cite_spans": [],
461
- "ref_spans": [
462
- {
463
- "start": 943,
464
- "end": 986,
465
- "text": "if_then_else (status (D, open_closed, open)",
466
- "ref_id": null
467
- },
468
- {
469
- "start": 989,
470
- "end": 1025,
471
- "text": "change_status (D, open_closed, open)",
472
- "ref_id": null
473
- }
474
- ],
475
- "eq_spans": [],
476
- "section": "Integration of plan evaluation, plan execution and dialogue management",
477
- "sec_num": "4.1"
478
- },
479
- {
480
- "text": "procedure ( open_door (D),",
481
- "cite_spans": [],
482
- "ref_spans": [],
483
- "eq_spans": [],
484
- "section": "Integration of plan evaluation, plan execution and dialogue management",
485
- "sec_num": "4.1"
486
- },
487
- {
488
- "text": "presupposition_failure (already_open(D)),",
489
- "cite_spans": [],
490
- "ref_spans": [],
491
- "eq_spans": [],
492
- "section": "Integration of plan evaluation, plan execution and dialogue management",
493
- "sec_num": "4.1"
494
- },
495
- {
496
- "text": "As described in the preceding section, the resolution step is in general non-deterministic and gives rise to meta-outputs which describe the type of resolution carried out. For example, consider a command involving a definite description, like \"open the door\". Depending on the preceding context, resolution will produce a number of possible interpretations; \"the door\" may be resolved to one or more contextually available doors, or the expression may be left unresolved. In each case, the type of resolution used appears as a meta-output, and is available to the dialogue manager when it decides which interpretation is most felicitous. By default, the DM's strategy is to attempt to supply antecedents for referring expre~.. sious, preferring the most recently occurring sortally appropriate candidate. In some cases, however, it is desirable to allow the default strategy to be overridden: for instance, it may result in a script which produces a presupposition failure during plan evaluation. Treating resolution choices and plan evaluation problems as similar types of objects makes it easy to implement this kind of idea.",
497
- "cite_spans": [],
498
- "ref_spans": [],
499
- "eq_spans": [],
500
- "section": ") ) 4.2 Using meta-outputs to choose between interpretations",
501
- "sec_num": null
502
- },
503
- {
504
- "text": "Perhaps the key advantage of our architecture is that collecting together several types of information as a bag of meta-outputs simplifies the top-level structure of the dialogue manager. In our application, the critical choice of dialogue move comes after the dialogue manager has selected the most plausible interpretation. It now has to make two choices. First, it must decide whether or not to paraphrase any of the meta-outputs back to the user; for example, if resolution was unable to fill some argument position or find an antecedent for a pronoun, it may be appropriate to paraphrase the corresponding metaoutput as a question, e.g. \"where do you mean?\", or \"what do you mean by 'it' ?'. Having all the metaoutputs available together means that the DM is able to plan a coherent response: so if there are several recta-outputs which could potentially be worth paraphrasing, it typically realizes only the most important one. Second, if interpretation was able to produce a well-formed plan, the DM currently has the three options of executing it, paraphrasing it back to the user as a confirmation question, or doing nothing. Once again, this decision often requires global information about what has happened during the interpretation process. For example, knowing that plan evaluation showed that the plan would take significant time to execute makes it more plausible that the user would prefer to receive a confirmation.",
505
- "cite_spans": [],
506
- "ref_spans": [],
507
- "eq_spans": [],
508
- "section": "Using meta-outputs to choose between dialogue management moves",
509
- "sec_num": "4.3"
510
- },
511
- {
512
- "text": "This section presents a commented sample dialogue with the implemented PSA interface demo chosen to illustrate the points discussed in the previous sec-tions. We focus in particular on the role that metaoutputs play in interpretation. Text preceded by USER represents spoken utterances from the user. Text preceded by PSA represents synthesized spoken responses from the PSA. Text inside square brackets describes the actions carried out by the PSA.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "Examples",
517
- "sec_num": "5"
518
- },
519
- {
520
- "text": "1. The PSA can move to various locations and measure environmental factors. In the first exchange, reference resolution is used to interpret the phrase \"all three decks\" as referring to the lower deck, mid deck and flight deck. Plan op-timiT.ation finds the most efficient way to order the locations, and paraphrases the resulting plan back to the user.",
521
- "cite_spans": [],
522
- "ref_spans": [],
523
- "eq_spans": [],
524
- "section": "Examples",
525
- "sec_num": "5"
526
- },
527
- {
528
- "text": "USER: Go to all three decks and measure carbon dioxide.",
529
- "cite_spans": [],
530
- "ref_spans": [],
531
- "eq_spans": [],
532
- "section": "Examples",
533
- "sec_num": "5"
534
- },
535
- {
536
- "text": "PSA: I will move to flight deck, mid deck and then lower deck and I will measure carbon dioxide level, okay? 3. Resolution of ellipsis can involve filling in unspecified semantic parameters to a syntactically complete command. It is not meaningful to talk about a sensor reading without reference to the place at which that reading was carried out; in the following example, resolution assumes that the location in question is the one most recently mentioned in the dialogue. Note also that since the PSA does not have to move in order to carry out the command, and hence can execute it immediately, the DM reasonably decides to skip the confirmation question. This decision is made based on the metaoutput encoding the estimated cost of executing the script.",
537
- "cite_spans": [],
538
- "ref_spans": [],
539
- "eq_spans": [],
540
- "section": "Examples",
541
- "sec_num": "5"
542
- },
543
- {
544
- "text": "USER: What was the carbon dioxide level at fifteen oh five according to the fixed sensors?",
545
- "cite_spans": [],
546
- "ref_spans": [],
547
- "eq_spans": [],
548
- "section": "Examples",
549
- "sec_num": "5"
550
- },
551
- {
552
- "text": "PSA: According to the fixed sensors, at fifteen oh five the carbon dioxide level at the pilot's seat was one percent. 4. The system encounters a user misconception, which results in a \"presupposition failure\" meta-output being produced during reference resolution. It consequently decides to alert the user to this, rather than attempt to carry out the command.",
553
- "cite_spans": [],
554
- "ref_spans": [],
555
- "eq_spans": [],
556
- "section": "Examples",
557
- "sec_num": "5"
558
- },
559
- {
560
- "text": "USER: Close both doors.",
561
- "cite_spans": [],
562
- "ref_spans": [],
563
- "eq_spans": [],
564
- "section": "Examples",
565
- "sec_num": "5"
566
- },
567
- {
568
- "text": "PSA: There are in fact three of them.",
569
- "cite_spans": [],
570
- "ref_spans": [],
571
- "eq_spans": [],
572
- "section": "Examples",
573
- "sec_num": "5"
574
- },
575
- {
576
- "text": "5. The user utterance includes a pronoun, \"it\". The system resolves \"it\" to \"the crew hatch\", and issues an appropriate paraphrase in order to confirm that this is what the user meant.",
577
- "cite_spans": [],
578
- "ref_spans": [],
579
- "eq_spans": [],
580
- "section": "Examples",
581
- "sec_num": "5"
582
- },
583
- {
584
- "text": "USER: Go to crew hatch and close it.",
585
- "cite_spans": [],
586
- "ref_spans": [],
587
- "eq_spans": [],
588
- "section": "Examples",
589
- "sec_num": "5"
590
- },
591
- {
592
- "text": "PSA: I will go to crew hatch and I will close crew hatch, okay?",
593
- "cite_spans": [],
594
- "ref_spans": [],
595
- "eq_spans": [],
596
- "section": "Examples",
597
- "sec_num": "5"
598
- },
599
- {
600
- "text": "USER: Yeah.",
601
- "cite_spans": [],
602
- "ref_spans": [],
603
- "eq_spans": [],
604
- "section": "Examples",
605
- "sec_num": "5"
606
- },
607
- {
608
- "text": "[PSA moves to crew hatch and closes it]",
609
- "cite_spans": [],
610
- "ref_spans": [],
611
- "eq_spans": [],
612
- "section": "Examples",
613
- "sec_num": "5"
614
- },
615
- {
616
- "text": "6. The user asks the system to close the door, but it is unclear which door they are referring to. An \"ambiguous reference\" meta-output is produced by reference resolution, resulting in the DM issuing a clarification question.",
617
- "cite_spans": [],
618
- "ref_spans": [],
619
- "eq_spans": [],
620
- "section": "Examples",
621
- "sec_num": "5"
622
- },
623
- {
624
- "text": "USER: Close the door.",
625
- "cite_spans": [],
626
- "ref_spans": [],
627
- "eq_spans": [],
628
- "section": "Examples",
629
- "sec_num": "5"
630
- },
631
- {
632
- "text": "PSA: Which door do you mean?",
633
- "cite_spans": [],
634
- "ref_spans": [],
635
- "eq_spans": [],
636
- "section": "Examples",
637
- "sec_num": "5"
638
- },
639
- {
640
- "text": "7. The user response is successfully interpreted by reference resolution as an instance of ellipsis. Trying to close the crew hatch a second time however causes a presupposition failure metaoutput to be generated during plan evaluation.",
641
- "cite_spans": [],
642
- "ref_spans": [],
643
- "eq_spans": [],
644
- "section": "Examples",
645
- "sec_num": "5"
646
- },
647
- {
648
- "text": "The system informs the user of this and takes no action.",
649
- "cite_spans": [],
650
- "ref_spans": [],
651
- "eq_spans": [],
652
- "section": "Examples",
653
- "sec_num": "5"
654
- },
655
- {
656
- "text": "USER: The crew hatch.",
657
- "cite_spans": [],
658
- "ref_spans": [],
659
- "eq_spans": [],
660
- "section": "Examples",
661
- "sec_num": "5"
662
- },
663
- {
664
- "text": "The crew hatch is already closed.",
665
- "cite_spans": [],
666
- "ref_spans": [],
667
- "eq_spans": [],
668
- "section": "PSA:",
669
- "sec_num": null
670
- },
671
- {
672
- "text": "8. The user presents three locations in a suboptimal order. Plan evaluation finds a more efficient ordering.",
673
- "cite_spans": [],
674
- "ref_spans": [],
675
- "eq_spans": [],
676
- "section": "PSA:",
677
- "sec_num": null
678
- },
679
- {
680
- "text": "USER: Move to storage lockers, commander's seat and flight deck and measure temperature.",
681
- "cite_spans": [],
682
- "ref_spans": [],
683
- "eq_spans": [],
684
- "section": "PSA:",
685
- "sec_num": null
686
- },
687
- {
688
- "text": "PSA: I will move to flight deck, commander's seat and then storage lockers and I will measure temperature, okay? USER: Sure.",
689
- "cite_spans": [],
690
- "ref_spans": [],
691
- "eq_spans": [],
692
- "section": "PSA:",
693
- "sec_num": null
694
- },
695
- {
696
- "text": "[PSA moves to flight deck, commander's seat and storage lockers in that order and reports temperature at each location]",
697
- "cite_spans": [],
698
- "ref_spans": [],
699
- "eq_spans": [],
700
- "section": "PSA:",
701
- "sec_num": null
702
- },
703
- {
704
- "text": "9. The user asks the system to repeat the previous command. Since the PSA is now starting from a different position, its most efficient plan is no longer the same, and it informs the user of this.",
705
- "cite_spans": [],
706
- "ref_spans": [],
707
- "eq_spans": [],
708
- "section": "PSA:",
709
- "sec_num": null
710
- },
711
- {
712
- "text": "USER: Do that again.",
713
- "cite_spans": [],
714
- "ref_spans": [],
715
- "eq_spans": [],
716
- "section": "PSA:",
717
- "sec_num": null
718
- },
719
- {
720
- "text": "PSA: I will move to storage lockers, commander's seat and then flight deck and I will measure temperature, okay? 10. The user interrupts the PSA in the middle of the plan, and asks it to go back to its last stopping place. Since \"stop\" may involve a potentially hazardous condition, the interface obeys immediately, without attempting to confirm first. We have described a generic architecture for spoken language dialogue interfaces to semi-autonomous agents, based on the standard notion of translating to successively deeper levels of representation. The novel aspects of our scheme center around two ideas: first, that the final output representations are best conceptualized not as logical expressions but rather as programs in a scripting language; second, that steps in the translation process should produce not only a simple output, but also meta-information describing how the output was produced. We have presented examples suggesting how several apparently diverse types of dialogue behavior can be captured simply within our framework, and outlined a prototype implementation of the scheme.",
721
- "cite_spans": [],
722
- "ref_spans": [],
723
- "eq_spans": [],
724
- "section": "PSA:",
725
- "sec_num": null
726
- }
727
- ],
728
- "back_matter": [],
729
- "bib_entries": {
730
- "BIBREF0": {
731
- "ref_id": "b0",
732
- "title": "Real time virtual humans",
733
- "authors": [
734
- {
735
- "first": "N",
736
- "middle": [],
737
- "last": "Badler",
738
- "suffix": ""
739
- },
740
- {
741
- "first": "R",
742
- "middle": [],
743
- "last": "Bindiganavale",
744
- "suffix": ""
745
- },
746
- {
747
- "first": "J",
748
- "middle": [],
749
- "last": "Bourne",
750
- "suffix": ""
751
- },
752
- {
753
- "first": "J",
754
- "middle": [],
755
- "last": "Allbeck",
756
- "suffix": ""
757
- },
758
- {
759
- "first": "J",
760
- "middle": [],
761
- "last": "Shi",
762
- "suffix": ""
763
- },
764
- {
765
- "first": "M",
766
- "middle": [],
767
- "last": "Palmer",
768
- "suffix": ""
769
- }
770
- ],
771
- "year": 1999,
772
- "venue": "International Conference on Digital Media Futures",
773
- "volume": "",
774
- "issue": "",
775
- "pages": "",
776
- "other_ids": {},
777
- "num": null,
778
- "urls": [],
779
- "raw_text": "N. Badler, R. Bindiganavale, J. Bourne, J. Allbeck, J. Shi, and M. Palmer. 1999. Real time virtual humans. In International Conference on Digital Media Futures.",
780
- "links": null
781
- },
782
- "BIBREF1": {
783
- "ref_id": "b1",
784
- "title": "Gemini: A natural language system for spoken language understanding",
785
- "authors": [
786
- {
787
- "first": "J",
788
- "middle": [],
789
- "last": "Dowding",
790
- "suffix": ""
791
- },
792
- {
793
- "first": "M",
794
- "middle": [],
795
- "last": "Gawron",
796
- "suffix": ""
797
- },
798
- {
799
- "first": "D",
800
- "middle": [],
801
- "last": "Appelt",
802
- "suffix": ""
803
- },
804
- {
805
- "first": "L",
806
- "middle": [],
807
- "last": "Cherny",
808
- "suffix": ""
809
- },
810
- {
811
- "first": "R",
812
- "middle": [],
813
- "last": "Moore",
814
- "suffix": ""
815
- },
816
- {
817
- "first": "D",
818
- "middle": [],
819
- "last": "Moran",
820
- "suffix": ""
821
- }
822
- ],
823
- "year": 1993,
824
- "venue": "Proceedings of the Thirty-First Annual Meeting of the Association for Computational Linguistics",
825
- "volume": "",
826
- "issue": "",
827
- "pages": "",
828
- "other_ids": {},
829
- "num": null,
830
- "urls": [],
831
- "raw_text": "J. Dowding, M. Gawron, D. Appelt, L. Cherny, R. Moore, and D. Moran. 1993. Gemini: A nat- ural language system for spoken language un- derstanding. In Proceedings of the Thirty-First Annual Meeting of the Association for Computa- tional Linguistics.",
832
- "links": null
833
- },
834
- "BIBREF2": {
835
- "ref_id": "b2",
836
- "title": "Flakey in action: The 1992 AAAI robot competition",
837
- "authors": [
838
- {
839
- "first": "K",
840
- "middle": [],
841
- "last": "Konolige",
842
- "suffix": ""
843
- },
844
- {
845
- "first": "K",
846
- "middle": [],
847
- "last": "Myers",
848
- "suffix": ""
849
- },
850
- {
851
- "first": "E",
852
- "middle": [],
853
- "last": "Ruspini",
854
- "suffix": ""
855
- },
856
- {
857
- "first": "A",
858
- "middle": [],
859
- "last": "Saffiotti",
860
- "suffix": ""
861
- }
862
- ],
863
- "year": 1993,
864
- "venue": "",
865
- "volume": "",
866
- "issue": "",
867
- "pages": "",
868
- "other_ids": {},
869
- "num": null,
870
- "urls": [],
871
- "raw_text": "K. Konolige, K. Myers, E. Ruspini, and A. Saf- fiotti. 1993. Flakey in action: The 1992 AAAI robot competition. Technical Report SRI Techni- cal Note 528, SKI, AI Center, SKI International, 333 Ravenswood Ave., Menlo Park, CA 94025.",
872
- "links": null
873
- },
874
- "BIBREF3": {
875
- "ref_id": "b3",
876
- "title": "Building distributed software systems with the open agent architecture",
877
- "authors": [
878
- {
879
- "first": "D",
880
- "middle": [],
881
- "last": "Martin",
882
- "suffix": ""
883
- },
884
- {
885
- "first": "A",
886
- "middle": [],
887
- "last": "Cheyer",
888
- "suffix": ""
889
- },
890
- {
891
- "first": "D",
892
- "middle": [],
893
- "last": "Moran",
894
- "suffix": ""
895
- }
896
- ],
897
- "year": 1998,
898
- "venue": "Proceedings of the Third International Conference on the Practical Application of Intelligent Agenta and Multi-Agent Technalogy",
899
- "volume": "",
900
- "issue": "",
901
- "pages": "",
902
- "other_ids": {},
903
- "num": null,
904
- "urls": [],
905
- "raw_text": "D. Martin, A. Cheyer, and D. Moran. 1998. Build- ing distributed software systems with the open agent architecture. In Proceedings of the Third International Conference on the Practical Appli- cation of Intelligent Agenta and Multi-Agent Tech- nalogy.",
906
- "links": null
907
- },
908
- "BIBREF4": {
909
- "ref_id": "b4",
910
- "title": "CommandTalk: A spoken-language interface for battlefield simulations",
911
- "authors": [
912
- {
913
- "first": "R",
914
- "middle": [],
915
- "last": "Moore",
916
- "suffix": ""
917
- },
918
- {
919
- "first": "J",
920
- "middle": [],
921
- "last": "Dowding",
922
- "suffix": ""
923
- },
924
- {
925
- "first": "H",
926
- "middle": [],
927
- "last": "Bratt",
928
- "suffix": ""
929
- },
930
- {
931
- "first": "J",
932
- "middle": [],
933
- "last": "Gawron~",
934
- "suffix": ""
935
- },
936
- {
937
- "first": "-Y",
938
- "middle": [],
939
- "last": "Gorfu",
940
- "suffix": ""
941
- },
942
- {
943
- "first": "A",
944
- "middle": [],
945
- "last": "Cheyer",
946
- "suffix": ""
947
- }
948
- ],
949
- "year": 1997,
950
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
951
- "volume": "",
952
- "issue": "",
953
- "pages": "1--7",
954
- "other_ids": {},
955
- "num": null,
956
- "urls": [],
957
- "raw_text": "R. Moore, J. Dowding, H. Bratt, J. Gawron~- Y. Gorfu, and A. Cheyer. 1997. CommandTalk: A spoken-language interface for battlefield simu- lations. In Proceedings of the Fifth Conference on Applied Natural Language Processing, pages 1-7.",
958
- "links": null
959
- },
960
- "BIBREF5": {
961
- "ref_id": "b5",
962
- "title": "Nuance Communications",
963
- "authors": [
964
- {
965
- "first": "",
966
- "middle": [],
967
- "last": "Nuance",
968
- "suffix": ""
969
- }
970
- ],
971
- "year": 2000,
972
- "venue": "",
973
- "volume": "",
974
- "issue": "",
975
- "pages": "",
976
- "other_ids": {},
977
- "num": null,
978
- "urls": [],
979
- "raw_text": "Nuance, 2000. Nuance Communications, Inc. http://www.nuance.com. As of 9 March 2000.",
980
- "links": null
981
- },
982
- "BIBREF6": {
983
- "ref_id": "b6",
984
- "title": "Integrating natural language and gesture in a robotics domain",
985
- "authors": [
986
- {
987
- "first": "D",
988
- "middle": [],
989
- "last": "Perzanowski",
990
- "suffix": ""
991
- },
992
- {
993
- "first": "A",
994
- "middle": [],
995
- "last": "Schnltz",
996
- "suffix": ""
997
- },
998
- {
999
- "first": "W",
1000
- "middle": [],
1001
- "last": "Adams",
1002
- "suffix": ""
1003
- }
1004
- ],
1005
- "year": 1998,
1006
- "venue": "IEEE International Symposium on Intelligent Control",
1007
- "volume": "",
1008
- "issue": "",
1009
- "pages": "247--252",
1010
- "other_ids": {},
1011
- "num": null,
1012
- "urls": [],
1013
- "raw_text": "D. Perzanowski, A. Schnltz, and W. Adams. 1998. Integrating natural language and gesture in a robotics domain. In IEEE International Sympo- sium on Intelligent Control.\" ISIC/CIRA/ISAS Joint Conference, pages 247-252, Gaithersburg, MD: National Institute of Standards and Tech- nology.",
1014
- "links": null
1015
- },
1016
- "BIBREF7": {
1017
- "ref_id": "b7",
1018
- "title": "Goal tracking in s natural language interface: Towards achieving adjustable autonomy",
1019
- "authors": [
1020
- {
1021
- "first": "D",
1022
- "middle": [],
1023
- "last": "Perzanowski",
1024
- "suffix": ""
1025
- },
1026
- {
1027
- "first": "A",
1028
- "middle": [],
1029
- "last": "Schnltz",
1030
- "suffix": ""
1031
- },
1032
- {
1033
- "first": "W",
1034
- "middle": [],
1035
- "last": "Adams",
1036
- "suffix": ""
1037
- },
1038
- {
1039
- "first": "E",
1040
- "middle": [],
1041
- "last": "Marsh",
1042
- "suffix": ""
1043
- }
1044
- ],
1045
- "year": 1999,
1046
- "venue": "ISIS/CIRA99 Conference",
1047
- "volume": "",
1048
- "issue": "",
1049
- "pages": "",
1050
- "other_ids": {},
1051
- "num": null,
1052
- "urls": [],
1053
- "raw_text": "D. Perzanowski, A. Schnltz, W. Adams, and E. Marsh. 1999. Goal tracking in s natural lan- guage interface: Towards achieving adjustable au- tonomy. In ISIS/CIRA99 Conference, Monterey, CA. IEEE.",
1054
- "links": null
1055
- },
1056
- "BIBREF8": {
1057
- "ref_id": "b8",
1058
- "title": "Personal Satellite Assistant (PSA) Project",
1059
- "authors": [],
1060
- "year": 2000,
1061
- "venue": "PSA",
1062
- "volume": "",
1063
- "issue": "",
1064
- "pages": "",
1065
- "other_ids": {},
1066
- "num": null,
1067
- "urls": [],
1068
- "raw_text": "PSA, 2000. Personal Satellite Assistant (PSA) Project. http://ic.arc.nasa.gov/ic/psa/. As of 9 March 2000.",
1069
- "links": null
1070
- },
1071
- "BIBREF9": {
1072
- "ref_id": "b9",
1073
- "title": "Actions as processes: a position on planning",
1074
- "authors": [
1075
- {
1076
- "first": "D",
1077
- "middle": [],
1078
- "last": "Pyre",
1079
- "suffix": ""
1080
- },
1081
- {
1082
- "first": "L",
1083
- "middle": [],
1084
- "last": "Pryor",
1085
- "suffix": ""
1086
- },
1087
- {
1088
- "first": "D",
1089
- "middle": [],
1090
- "last": "Murphy",
1091
- "suffix": ""
1092
- }
1093
- ],
1094
- "year": 1995,
1095
- "venue": "Working Notes, AAAI Symposium on Eztending Theories of Action",
1096
- "volume": "",
1097
- "issue": "",
1098
- "pages": "169--173",
1099
- "other_ids": {},
1100
- "num": null,
1101
- "urls": [],
1102
- "raw_text": "D. Pyre, L. Pryor, and D. Murphy. 1995. Actions as processes: a position on planning. In Working Notes, AAAI Symposium on Eztending Theories of Action, pages 169-173.",
1103
- "links": null
1104
- },
1105
- "BIBREF10": {
1106
- "ref_id": "b10",
1107
- "title": "An evaluation of strategies for selective utterance verification for spoken natural language dialog",
1108
- "authors": [
1109
- {
1110
- "first": "R",
1111
- "middle": [
1112
- "W"
1113
- ],
1114
- "last": "Smith",
1115
- "suffix": ""
1116
- }
1117
- ],
1118
- "year": 1997,
1119
- "venue": "Proceedings of the Fifth Con-]erence on Applied Natural Language Processing",
1120
- "volume": "",
1121
- "issue": "",
1122
- "pages": "41--48",
1123
- "other_ids": {},
1124
- "num": null,
1125
- "urls": [],
1126
- "raw_text": "R. W. Smith. 1997. An evaluation of strategies for selective utterance verification for spoken natural language dialog. In Proceedings of the Fifth Con- ]erence on Applied Natural Language Processing, pages 41-48.",
1127
- "links": null
1128
- },
1129
- "BIBREF11": {
1130
- "ref_id": "b11",
1131
- "title": "The CommandTalk spoken dialogue system",
1132
- "authors": [
1133
- {
1134
- "first": "A",
1135
- "middle": [],
1136
- "last": "Stent",
1137
- "suffix": ""
1138
- },
1139
- {
1140
- "first": "J",
1141
- "middle": [],
1142
- "last": "Dowding",
1143
- "suffix": ""
1144
- },
1145
- {
1146
- "first": "J",
1147
- "middle": [],
1148
- "last": "Gawron",
1149
- "suffix": ""
1150
- },
1151
- {
1152
- "first": "E",
1153
- "middle": [],
1154
- "last": "Bratt",
1155
- "suffix": ""
1156
- },
1157
- {
1158
- "first": "R",
1159
- "middle": [],
1160
- "last": "Moore",
1161
- "suffix": ""
1162
- }
1163
- ],
1164
- "year": 1999,
1165
- "venue": "Proceedings of the Thirty-Seventh Annual Meeting of the Association for Computational Linguistics",
1166
- "volume": "",
1167
- "issue": "",
1168
- "pages": "183--190",
1169
- "other_ids": {},
1170
- "num": null,
1171
- "urls": [],
1172
- "raw_text": "A. Stent, J. Dowding, J. Gawron, E. Bratt, and R. Moore. 1999. The CommandTalk spoken di- alogue system. In Proceedings of the Thirty- Seventh Annual Meeting of the Association for Computational Linguistics, pages 183-190.",
1173
- "links": null
1174
- },
1175
- "BIBREF12": {
1176
- "ref_id": "b12",
1177
- "title": "Discourse obligations in dialogue processing",
1178
- "authors": [
1179
- {
1180
- "first": "D",
1181
- "middle": [
1182
- "R"
1183
- ],
1184
- "last": "Tranm",
1185
- "suffix": ""
1186
- },
1187
- {
1188
- "first": "J",
1189
- "middle": [],
1190
- "last": "Allen",
1191
- "suffix": ""
1192
- }
1193
- ],
1194
- "year": 1994,
1195
- "venue": "Proceedings of the Thirty-Second Annual Meetiitg of the Association for Computational Linguistics",
1196
- "volume": "",
1197
- "issue": "",
1198
- "pages": "1--8",
1199
- "other_ids": {},
1200
- "num": null,
1201
- "urls": [],
1202
- "raw_text": "D. R. Tranm and J. Allen. 1994. Discourse obliga- tions in dialogue processing. In Proceedings of the Thirty-Second Annual Meetiitg of the Association for Computational Linguistics, pages 1-8.",
1203
- "links": null
1204
- },
1205
- "BIBREF13": {
1206
- "ref_id": "b13",
1207
- "title": "Representations of dialogue state for domain and task independent meta-dialogue",
1208
- "authors": [
1209
- {
1210
- "first": "D",
1211
- "middle": [
1212
- "R"
1213
- ],
1214
- "last": "Traum",
1215
- "suffix": ""
1216
- },
1217
- {
1218
- "first": "C",
1219
- "middle": [
1220
- "F"
1221
- ],
1222
- "last": "Andersen",
1223
- "suffix": ""
1224
- }
1225
- ],
1226
- "year": 1999,
1227
- "venue": "Proceedings of the IJ-CAI'gg Workshop on Knowledge and Reasoning in Practical Dialogue Systems",
1228
- "volume": "",
1229
- "issue": "",
1230
- "pages": "113--120",
1231
- "other_ids": {},
1232
- "num": null,
1233
- "urls": [],
1234
- "raw_text": "D. R. Traum and C. F. Andersen. 1999. Represen- tations of dialogue state for domain and task inde- pendent meta-dialogue. In Proceedings of the IJ- CAI'gg Workshop on Knowledge and Reasoning in Practical Dialogue Systems, pages 113-120.",
1235
- "links": null
1236
- },
1237
- "BIBREF14": {
1238
- "ref_id": "b14",
1239
- "title": "Semantic rules for English",
1240
- "authors": [
1241
- {
1242
- "first": "J",
1243
- "middle": [],
1244
- "last": "Van Eijck",
1245
- "suffix": ""
1246
- },
1247
- {
1248
- "first": "R",
1249
- "middle": [],
1250
- "last": "Moore",
1251
- "suffix": ""
1252
- }
1253
- ],
1254
- "year": 1992,
1255
- "venue": "",
1256
- "volume": "",
1257
- "issue": "",
1258
- "pages": "",
1259
- "other_ids": {},
1260
- "num": null,
1261
- "urls": [],
1262
- "raw_text": "J. van Eijck and R. Moore. 1992. Semantic rules for English. In H. Alshawi, editor, The Core Lan- guage Engine. MIT Press.",
1263
- "links": null
1264
- },
1265
- "BIBREF15": {
1266
- "ref_id": "b15",
1267
- "title": "Instructing animated agents: Viewing language in behavioral terms",
1268
- "authors": [
1269
- {
1270
- "first": "B",
1271
- "middle": [],
1272
- "last": "Webber",
1273
- "suffix": ""
1274
- }
1275
- ],
1276
- "year": 1995,
1277
- "venue": "Proceedings of the International Conference on Cooperative Multi-modal Communication",
1278
- "volume": "",
1279
- "issue": "",
1280
- "pages": "",
1281
- "other_ids": {},
1282
- "num": null,
1283
- "urls": [],
1284
- "raw_text": "B. Webber. 1995. Instructing animated agents: Viewing language in behavioral terms. In Proceed- ings of the International Conference on Coopera- tive Multi-modal Communication.",
1285
- "links": null
1286
- },
1287
- "BIBREF16": {
1288
- "ref_id": "b16",
1289
- "title": "A procedural model of language understanding",
1290
- "authors": [
1291
- {
1292
- "first": "T",
1293
- "middle": [
1294
- "A"
1295
- ],
1296
- "last": "Winograd",
1297
- "suffix": ""
1298
- }
1299
- ],
1300
- "year": 1973,
1301
- "venue": "",
1302
- "volume": "",
1303
- "issue": "",
1304
- "pages": "",
1305
- "other_ids": {},
1306
- "num": null,
1307
- "urls": [],
1308
- "raw_text": "T. A. Winograd. 1973. A procedural model of lan- guage understanding. In R. C. Shank and K. M.",
1309
- "links": null
1310
- },
1311
- "BIBREF17": {
1312
- "ref_id": "b17",
1313
- "title": "Computer Models of Thought and Language",
1314
- "authors": [
1315
- {
1316
- "first": "Colby",
1317
- "middle": [],
1318
- "last": "",
1319
- "suffix": ""
1320
- }
1321
- ],
1322
- "year": null,
1323
- "venue": "",
1324
- "volume": "",
1325
- "issue": "",
1326
- "pages": "",
1327
- "other_ids": {},
1328
- "num": null,
1329
- "urls": [],
1330
- "raw_text": "Colby, editors, Computer Models of Thought and Language. Freeman, San Francisco, CA.",
1331
- "links": null
1332
- }
1333
- },
1334
- "ref_entries": {}
1335
- }
1336
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1017.json DELETED
@@ -1,1033 +0,0 @@
1
- {
2
- "paper_id": "A00-1017",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:29.736740Z"
6
- },
7
- "title": "A Representation for Complex and Evolving Data Dependencies in Generation",
8
- "authors": [
9
- {
10
- "first": "C",
11
- "middle": [],
12
- "last": "Mellish $",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": ""
16
- },
17
- {
18
- "first": "R",
19
- "middle": [],
20
- "last": "Evans",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- },
25
- {
26
- "first": "L",
27
- "middle": [],
28
- "last": "Cahill",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- },
33
- {
34
- "first": "C",
35
- "middle": [],
36
- "last": "Doran",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": ""
40
- },
41
- {
42
- "first": "D",
43
- "middle": [],
44
- "last": "Paiva",
45
- "suffix": "",
46
- "affiliation": {},
47
- "email": ""
48
- },
49
- {
50
- "first": "D",
51
- "middle": [],
52
- "last": "Scott T, N Tipper",
53
- "suffix": "",
54
- "affiliation": {},
55
- "email": ""
56
- }
57
- ],
58
- "year": "",
59
- "venue": null,
60
- "identifiers": {},
61
- "abstract": "This paper introduces an approach to representing the kinds of information that components in a natural language generation (NLG) system will need to communicate to one another. This information may be partial, may involve more than one level of analysis and may need to include information about the history of a derivation. We present a general representation scheme capable of handling these cases. In addition, we make a proposal for organising intermodule communication in an NLG system by having a central server for this information. We have validated the approach by a reanalysis of an existing NLG system and through a full implementation of a runnable specification. 1This work is supported by ESPRC grants GR/L77041 (Edinburgh) and GR/L77102 (Brighton),",
62
- "pdf_parse": {
63
- "paper_id": "A00-1017",
64
- "_pdf_hash": "",
65
- "abstract": [
66
- {
67
- "text": "This paper introduces an approach to representing the kinds of information that components in a natural language generation (NLG) system will need to communicate to one another. This information may be partial, may involve more than one level of analysis and may need to include information about the history of a derivation. We present a general representation scheme capable of handling these cases. In addition, we make a proposal for organising intermodule communication in an NLG system by having a central server for this information. We have validated the approach by a reanalysis of an existing NLG system and through a full implementation of a runnable specification. 1This work is supported by ESPRC grants GR/L77041 (Edinburgh) and GR/L77102 (Brighton),",
68
- "cite_spans": [],
69
- "ref_spans": [],
70
- "eq_spans": [],
71
- "section": "Abstract",
72
- "sec_num": null
73
- }
74
- ],
75
- "body_text": [
76
- {
77
- "text": "One of the distinctive properties of natural language generation when compared with other language engineering applications is that it has to take seriously the full range of linguistic representation, from concepts to morphology, or even phonetics. Any processing system is only as sophisticated as its input allows, so while a natural language understanding system might be judged primarily by its syntactic prowess, even if its attention to semantics, pragmatics and underlying conceptual analysis is minimal, a generation system is only as good as its deepest linguistic representations. Moreover, any attempt to abstract away from individual generation systems to a more generic architectural specification faces an even greater challenge: not only are complex linguistic representations required, able to support the dynamic evolutionary development of data during the gener-* Now at the MITRE Corporation, Bedford, MA, USA, cdoran@mitre, org.",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": "1"
83
- },
84
- {
85
- "text": "ation process, but they must do so in a generic and flexible fashion.",
86
- "cite_spans": [],
87
- "ref_spans": [],
88
- "eq_spans": [],
89
- "section": "Introduction",
90
- "sec_num": "1"
91
- },
92
- {
93
- "text": "This paper describes a representation developed to meet these requirements. It offers a formally well-defined declarative representation language, which provides a framework for expressing the complex and dynamic data requirements of NLG systems. The approach supports different levels of representation, mixed representations that cut across levels, partial and shared structures and 'canned' representations, as well as dynamic relationships between data at different stages in processing. We are using the approach to develop a high level data model for NLG systems as part of a generic generation architecture called RAGS 1.",
94
- "cite_spans": [],
95
- "ref_spans": [],
96
- "eq_spans": [],
97
- "section": "Introduction",
98
- "sec_num": "1"
99
- },
100
- {
101
- "text": "The framework has been implemented in the form of a database server for modular generation systems. As proof of concept of the framework, we have reimplemented an existing NLG system. The system we chose was the Caption Generation System (CGS) (Mittal et al., 1995; Mittal et al., 1998) . The reimplementation involved defining the interfaces to the modules of CGS in terms of the RAGS representations and then implementing modules that had the requisite input and output representations.",
102
- "cite_spans": [
103
- {
104
- "start": 244,
105
- "end": 265,
106
- "text": "(Mittal et al., 1995;",
107
- "ref_id": "BIBREF4"
108
- },
109
- {
110
- "start": 266,
111
- "end": 286,
112
- "text": "Mittal et al., 1998)",
113
- "ref_id": "BIBREF5"
114
- }
115
- ],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "Introduction",
119
- "sec_num": "1"
120
- },
121
- {
122
- "text": "Generation systems, especially end-to-end, applied generation systems, have, unsurprisingly, many things in common. Reiter (1994) proposed an analysis of such systems in terms of a simple three stage pipeline. More recently, the RAGS project attempted to repeat the anal-ysis (Cahill et al., 1999a) , but found that while most systems did implement a pipeline, they did not implement the same pipeline -different functionalities occurred in different places and different orders in different systems. In order to accommodate this result, we sought to develop an architecture that is more general than a simple pipeline, and thus supports the range of pipelines observed, as well as other more complex control regimes (see (Cahill et al., 1999a; Cahill et al., 1999b) ). In this paper, we argue that supporting such an architecture requires careful consideration of the way data representations interact and develop. Any formal framework for expressing the architecture must take account of this.",
123
- "cite_spans": [
124
- {
125
- "start": 116,
126
- "end": 129,
127
- "text": "Reiter (1994)",
128
- "ref_id": "BIBREF6"
129
- },
130
- {
131
- "start": 276,
132
- "end": 298,
133
- "text": "(Cahill et al., 1999a)",
134
- "ref_id": "BIBREF0"
135
- },
136
- {
137
- "start": 722,
138
- "end": 744,
139
- "text": "(Cahill et al., 1999a;",
140
- "ref_id": "BIBREF0"
141
- },
142
- {
143
- "start": 745,
144
- "end": 766,
145
- "text": "Cahill et al., 1999b)",
146
- "ref_id": "BIBREF1"
147
- }
148
- ],
149
- "ref_spans": [],
150
- "eq_spans": [],
151
- "section": "Introduction",
152
- "sec_num": "1"
153
- },
154
- {
155
- "text": "We noted in the introduction that generation systems have to deal with a range of linguistic information. It is natural, especially in the context of a generic architecture proposal, to model this breadth in terms of discrete layers of representation: (1999a) introduce layers such as conceptual, semantic, rhetorical, syntactic and document structure, but the precise demarcation is not as important here as the principle. The different kinds of information are typically represented differently, and built up separately. However the layers are far from independent: objects at one layer are directly related to those at others, forming chains of dependency from conceptual through rhetorical and semantic structure to final syntactic and document realisation. This means that data resources, such as grammars and lexicons, and processing modules in the system, are often defined in terms of mixed data: structures that include information in more than one representation layer. So the ability to represent such mixed structures in a single formal framework is an important property of a generic data proposal.",
156
- "cite_spans": [],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "The representational requirements of generation systems",
160
- "sec_num": "2"
161
- },
162
- {
163
- "text": "In addition, it is largely standard in generation as elsewhere in language applications, to make extensive use of partial representations, often using a type system to capture grades of underspecification. An immediate corollary of providing support for partial structures is the notion that they may become further specified over time, that data structures evolve. If the framework seeks to avoid over-commitment to particular processing strategies it needs to provide a way of representing such evolution explicitly if required, rather than relying on destructive modification of a structure. Related to this, it should provide explicit support for representing alternative specifications at any point. Finally, to fully support efficient processing across the range of applications, from the simple to the most complex, the representation must allow for compact sharing of information in tangled structures (two structures which share components).",
164
- "cite_spans": [],
165
- "ref_spans": [],
166
- "eq_spans": [],
167
- "section": "The representational requirements of generation systems",
168
- "sec_num": "2"
169
- },
170
- {
171
- "text": "In addition to these direct requirements of the generation task itself, additional requirements arise from more general methodological considerations: we desire a representation that is formally well defined, allows for theoretical reasoning about the data and performance of systems, and supports control regimes from simple deterministic pipelines to complex parallel architectures.",
172
- "cite_spans": [],
173
- "ref_spans": [],
174
- "eq_spans": [],
175
- "section": "The representational requirements of generation systems",
176
- "sec_num": "2"
177
- },
178
- {
179
- "text": "In this section, we present our proposal for a general representation scheme capable of covering the above requirements. Our formulation is layered: the foundation is a simple, flexible, rigorously defined graph representation formalism, on top of which we introduce notions of complex types and larger data structures and relationships between them. This much is sufficient to capture the requirements just discussed. We suppose a yet higher level of specification could capture a more constraining data model but make no specific proposals about this here, however the following sections use examples that do conform to such a higher level data model. The lowest level of the representation scheme is:",
180
- "cite_spans": [],
181
- "ref_spans": [],
182
- "eq_spans": [],
183
- "section": "The Representation Scheme",
184
- "sec_num": "3"
185
- },
186
- {
187
- "text": "\u2022 relational: the basic data entity is x -~ y, an arrow representing a relation from object x to object y;",
188
- "cite_spans": [],
189
- "ref_spans": [],
190
- "eq_spans": [],
191
- "section": "The Representation Scheme",
192
- "sec_num": "3"
193
- },
194
- {
195
- "text": "\u2022 typed: objects and arrows have an associated type system, so it is possible to define classes and subclasses of objects and arrows.",
196
- "cite_spans": [],
197
- "ref_spans": [],
198
- "eq_spans": [],
199
- "section": "The Representation Scheme",
200
- "sec_num": "3"
201
- },
202
- {
203
- "text": "At the most fundamental level, this is more or less the whole definition. There is no commitment to what object or arrow types there are or how they relate to each other. So a representation allowed by the scheme consists of:",
204
- "cite_spans": [],
205
- "ref_spans": [],
206
- "eq_spans": [],
207
- "section": "The Representation Scheme",
208
- "sec_num": "3"
209
- },
210
- {
211
- "text": "\u2022 a set of objects, organised into types;",
212
- "cite_spans": [],
213
- "ref_spans": [],
214
- "eq_spans": [],
215
- "section": "The Representation Scheme",
216
- "sec_num": "3"
217
- },
218
- {
219
- "text": "\u2022 a set of binary relations, organised into types;",
220
- "cite_spans": [],
221
- "ref_spans": [],
222
- "eq_spans": [],
223
- "section": "The Representation Scheme",
224
- "sec_num": "3"
225
- },
226
- {
227
- "text": "\u2022 a set of arrows, each indicating that a relation holds between one object and another object.",
228
- "cite_spans": [],
229
- "ref_spans": [],
230
- "eq_spans": [],
231
- "section": "The Representation Scheme",
232
- "sec_num": "3"
233
- },
234
- {
235
- "text": "Sets, sequences and functions For the next level, we introduce more structure in the type system to support sets, sequences and functions. Objects are always atomic (though they can be of type set, sequence or function) -it is not possible to make an object which actually is a set of two other objects (as you might with data structures in a computer program). To create a set, we introduce a set type for the object, and a set membership arrow type (el), that links the set's elements to the set. Similarly, for a sequence, we introduce a sequence type and sequence member arrow types (1-el, 2-el, 3-el, ... ) , and for a function, we have a complex type which specifies the types of the arrows that make up the domain and the range of the function. Here, the tree nodes correspond to objects, each labelled with its type. The root node is of type SemRep, and although it is not an explicit sequence type, we can see that it is a triple, as it has three sequence member arrows (with types 1-el, 2-el and 3-el). Local and non-local arrows The second extension to the basic representation scheme is to distinguish two different abstract kinds of arrows -local and non-local. Fundamentally we are representing just a homogeneous network of objects and relationships. In the example above we saw a network of arrows that we might want to view as a single data structure, and other major data types might similarly appear as networks. Additionally, we want to be able to express relationships between these larger 'structures' -between structures of the same type (alternative solutions, or revised versions) or of different types (semantic and syntactic for example). To capture these distinctions among arrows, we classify our arrow types as local or non-local (we could do this in the type system itself, or leave it as an informal distinction). Local arrows are used to build up networks that we think of as single data structures. Non-local arrows express relationships between such data structures.",
236
- "cite_spans": [
237
- {
238
- "start": 587,
239
- "end": 611,
240
- "text": "(1-el, 2-el, 3-el, ... )",
241
- "ref_id": null
242
- }
243
- ],
244
- "ref_spans": [],
245
- "eq_spans": [],
246
- "section": "The Representation Scheme",
247
- "sec_num": "3"
248
- },
249
- {
250
- "text": "All the arrow types we saw above were local. Examples of non-local arrows might include:",
251
- "cite_spans": [],
252
- "ref_spans": [],
253
- "eq_spans": [],
254
- "section": "The Representation Scheme",
255
- "sec_num": "3"
256
- },
257
- {
258
- "text": "realises These arro~vs link something more abstract to something less abstract that realises it. Chains of realises arrows might lead from the original conceptual input to the generator through rhetorical, semantic and syntactic structures to the actual words that express the input.",
259
- "cite_spans": [],
260
- "ref_spans": [],
261
- "eq_spans": [],
262
- "section": "The Representation Scheme",
263
- "sec_num": "3"
264
- },
265
- {
266
- "text": "revises These arrows link a structure to another one of the same type, which is considered to be a 'better' solution -perhaps because it is more instantiated. It is important to note that parts of larger structures can be revised without revising the entire structure.",
267
- "cite_spans": [],
268
- "ref_spans": [],
269
- "eq_spans": [],
270
- "section": "The Representation Scheme",
271
- "sec_num": "3"
272
- },
273
- {
274
- "text": "coreference These arrows link structures which are somehow \"parallel\" and which perhaps share some substructure, i.e., tangled structures. For instance, document representations may be linked to rhetorical representations, either as whole isomorphic structures or at the level of individual constituents.",
275
- "cite_spans": [],
276
- "ref_spans": [],
277
- "eq_spans": [],
278
- "section": "The Representation Scheme",
279
- "sec_num": "3"
280
- },
281
- {
282
- "text": "Notice that the representation scheme does not enforce any kind of well-formedness with respect to local and non-local arrows. In fact, although it is natural to think of a 'structure' as being a maximal network of local arrows with a single root object, there's no reason why this should be so -networks with multiple roots represent tangled structures (structures that share content), networks that include non-local links might be mixed representations, containing information of more than one sort. Such techniques might be useful for improving generator efficiency, or representing canned text or templates, cf. (Calder et al., 1999) .",
283
- "cite_spans": [
284
- {
285
- "start": 617,
286
- "end": 638,
287
- "text": "(Calder et al., 1999)",
288
- "ref_id": "BIBREF2"
289
- }
290
- ],
291
- "ref_spans": [],
292
- "eq_spans": [],
293
- "section": "The Representation Scheme",
294
- "sec_num": "3"
295
- },
296
- {
297
- "text": "Partial and Opaque structures Partial structures are essential when a module needs to produce a skeleton of a representation that it does not have the competence to completely fill out. For instance, lexical choice brings with it certain syntactic commitments, but in most NLG systems lexical choice occurs some time before a grammar is consulted to flesh out syntactic structure in detail. By simply leaving out local arrows, we can represent a range of partial structures. Consider Fig. 2 , where the triangles represent local structure, representing a sentence object and its component verb phrase. There is a link to a subject noun phrase object, but none of the local arrows of the actual noun phrase are present. In subsequent processing this local structure might be filled in. This is possible as long as the noun phrase object has been declared to be of the right type.",
298
- "cite_spans": [],
299
- "ref_spans": [
300
- {
301
- "start": 484,
302
- "end": 490,
303
- "text": "Fig. 2",
304
- "ref_id": "FIGREF2"
305
- }
306
- ],
307
- "eq_spans": [],
308
- "section": "The Representation Scheme",
309
- "sec_num": "3"
310
- },
311
- {
312
- "text": "An opaque structure is one which has an incomplete derivational history -for example part of a syntactic structure without any corresponding semantic structure. Three possible reasons for having such structures are (a) to allow structure to be introduced that the generator is not capable of producing directly, (b) to prevent the generator from interfering with the structure thus built (for example, by trying to modify an idiom in an inappropriate way), or (c) to improve generator efficiency by hiding detail that may lead to wasteful processing. An opaque structure is represented simply by the failure to include a realises arrow to that structure. Such structures provide the basis for a generalised approach to \"canning\".",
313
- "cite_spans": [],
314
- "ref_spans": [],
315
- "eq_spans": [],
316
- "section": "The Representation Scheme",
317
- "sec_num": "3"
318
- },
319
- {
320
- "text": "There are many ways that modules in an NLG system could communicate information using the representation scheme just outlined.",
321
- "cite_spans": [],
322
- "ref_spans": [],
323
- "eq_spans": [],
324
- "section": "Implementation",
325
- "sec_num": "4"
326
- },
327
- {
328
- "text": "Here we describe a particularly general model of inter-module communication, based around modules communicating with a single centralised repository of data called the whiteboard (Calder et al., 1999) . A whiteboard is a cumulative typed relational blackboard:",
329
- "cite_spans": [
330
- {
331
- "start": 179,
332
- "end": 200,
333
- "text": "(Calder et al., 1999)",
334
- "ref_id": "BIBREF2"
335
- }
336
- ],
337
- "ref_spans": [],
338
- "eq_spans": [],
339
- "section": "Implementation",
340
- "sec_num": "4"
341
- },
342
- {
343
- "text": "\u2022 typed and relational: because it is based on using the above representation scheme;",
344
- "cite_spans": [],
345
- "ref_spans": [],
346
- "eq_spans": [],
347
- "section": "Implementation",
348
- "sec_num": "4"
349
- },
350
- {
351
- "text": "\u2022 a blackboard: a control architecture and data store shared between processing modules; typically, modules add/change/remove objects in the data store, examine its contents, and/or ask to be notified of changes;",
352
- "cite_spans": [],
353
- "ref_spans": [],
354
- "eq_spans": [],
355
- "section": "Implementation",
356
- "sec_num": "4"
357
- },
358
- {
359
- "text": "\u2022 cumulative: unlike standard blackboards, once data is added, it can't be changed or removed. So a structure is built incrementally by making successive copies of it (or of constituents of it) linked by revises links (although actually, there's no constraint on the order in which they are built).",
360
- "cite_spans": [],
361
- "ref_spans": [],
362
- "eq_spans": [],
363
- "section": "Implementation",
364
- "sec_num": "4"
365
- },
366
- {
367
- "text": "A whiteboard allows modules to add arrows (typically forming networks through arrows sharing source or target objects), to inspect the set of arrows looking for particular configurations of types, or to be informed when a particular type of arrow (or group of arrows) is added.",
368
- "cite_spans": [],
369
- "ref_spans": [],
370
- "eq_spans": [],
371
- "section": "Implementation",
372
- "sec_num": "4"
373
- },
374
- {
375
- "text": "The whiteboard is an active database server. This means that it runs as an independent process that other modules connect to by appropriate means. There are essentially three kinds of interaction that a module might have with the whiteboard server:",
376
- "cite_spans": [],
377
- "ref_spans": [],
378
- "eq_spans": [],
379
- "section": "Implementation",
380
- "sec_num": "4"
381
- },
382
- {
383
- "text": "\u2022 publish -add an arrow or arrows to the whiteboard;",
384
- "cite_spans": [],
385
- "ref_spans": [],
386
- "eq_spans": [],
387
- "section": "Implementation",
388
- "sec_num": "4"
389
- },
390
- {
391
- "text": "\u2022 query -look for an arrow or arrows in the whiteboard;",
392
- "cite_spans": [],
393
- "ref_spans": [],
394
- "eq_spans": [],
395
- "section": "Implementation",
396
- "sec_num": "4"
397
- },
398
- {
399
- "text": "\u2022 wait -register interest in an arrow or arrows appearing in the whiteboard.",
400
- "cite_spans": [],
401
- "ref_spans": [],
402
- "eq_spans": [],
403
- "section": "Implementation",
404
- "sec_num": "4"
405
- },
406
- {
407
- "text": "In both query and wait, arrows are specified by type, and with a hierarchical type system on objects and relations, this amounts to a pattern that matches arrows of subtypes as well. The wait function allows the whiteboard to take the initiative in processing -if a module waits on a query then the whiteboard waits until the query is satisfied, and then tells the module about it. So the module does not have to continuously scan the whiteboard for work to do, but can let the whiteboard tell it as soon as anything interesting happens.",
408
- "cite_spans": [],
409
- "ref_spans": [],
410
- "eq_spans": [],
411
- "section": "Implementation",
412
- "sec_num": "4"
413
- },
414
- {
415
- "text": "Typically a module will start up and register interest in the kind of arrow that represents the module's input data. It will then wait for the whiteboard to notify it of instances of that data (produced by other modules), and whenever anything turns up, it processes it, adding its own results to the whiteboard. All the modules do this asynchronously, and processing continues until no module has any more work to do. This may sound like a recipe for confusion, but more standard pipelined behaviour is not much different. In fact, pipelining is exactly a data-based constraint -the second module in a pipeline does not start until the first one produces its output.",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "Implementation",
420
- "sec_num": "4"
421
- },
422
- {
423
- "text": "However, to be a strict pipeline, the first module must produce all of its output before the second one starts. This can be achieved simply by making the first module produce all its output at once, but sometimes that is not ideal -for example if the module is recursive and wishes to react to its own output. Alternative strategies include the use of markers in the whiteboard, so that modules can tell each other that they've finished processing (by adding a marker), or extending the whiteboard architecture itself so that modules can tell the whiteboard that they have finished processing, and other modules can wait for that to occur.",
424
- "cite_spans": [],
425
- "ref_spans": [],
426
- "eq_spans": [],
427
- "section": "Implementation",
428
- "sec_num": "4"
429
- },
430
- {
431
- "text": "In order to prove this representation scheme in practice, we have implemented the white-board in Sicstus Prolog and used it to support data communications between modules in a reconstruction of the Caption Generation System (Mittal et al., 1995) . CGS is a system developed at the University of Pittsburgh, which takes input from the SAGE graphics presentation system (Roth et al., 1994) and generates captions for the graphics SAGE produces. We selected it for this effort because it appeared to be a fairly simple pipelined system, with modules performing clearly defined linguistic tasks. As such, we thought it would be a good test case for our whiteboard specification. Although the CGS is organised as a pipeline, shown in Figure 3 , the representations communicated between the modules do not correspond to complete, separate instances of RAGS datatype representations. Instead, the representations at the various levels accumulate along the pipeline or are revised in a way that does not correspond exactly to module boundaries. Figure 3 gives a simple picture of how the different levels of representation build up. The labels for the RAGS representations refer to the following:",
432
- "cite_spans": [
433
- {
434
- "start": 224,
435
- "end": 245,
436
- "text": "(Mittal et al., 1995)",
437
- "ref_id": "BIBREF4"
438
- },
439
- {
440
- "start": 368,
441
- "end": 387,
442
- "text": "(Roth et al., 1994)",
443
- "ref_id": "BIBREF7"
444
- }
445
- ],
446
- "ref_spans": [
447
- {
448
- "start": 729,
449
- "end": 737,
450
- "text": "Figure 3",
451
- "ref_id": null
452
- },
453
- {
454
- "start": 1037,
455
- "end": 1045,
456
- "text": "Figure 3",
457
- "ref_id": null
458
- }
459
- ],
460
- "eq_spans": [],
461
- "section": "Reconstruction of the Caption Generation System",
462
- "sec_num": "5"
463
- },
464
- {
465
- "text": "\u2022 I = conceptual;",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Reconstruction of the Caption Generation System",
470
- "sec_num": "5"
471
- },
472
- {
473
- "text": "\u2022 II --semantic;",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "Reconstruction of the Caption Generation System",
478
- "sec_num": "5"
479
- },
480
- {
481
- "text": "\u2022III = rhetorical;",
482
- "cite_spans": [],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "Reconstruction of the Caption Generation System",
486
- "sec_num": "5"
487
- },
488
- {
489
- "text": "\u2022 IV = document;",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "Reconstruction of the Caption Generation System",
494
- "sec_num": "5"
495
- },
496
- {
497
- "text": "\u2022 V = syntactic.",
498
- "cite_spans": [],
499
- "ref_spans": [],
500
- "eq_spans": [],
501
- "section": "Reconstruction of the Caption Generation System",
502
- "sec_num": "5"
503
- },
504
- {
505
- "text": "For instance, some semantic (II) information is produced by the Text Planning module, and more work is done on this by Aggregation, but the semantic level of representation is not complete and final until the Referring Expression module has run. Also, for instance, at the point where the Ordering module has run, there are partially finished versions of three different types of representation. It is clear from this that the interfaces between the modules are more complex than could be accounted for by just referring to the individual levels of representation of RAGS. The ability to express combinations of structures and partial structures was fundamental to the reimplementation of CGS. We highlight below a few of the interesting places where these features were used.",
506
- "cite_spans": [],
507
- "ref_spans": [],
508
- "eq_spans": [],
509
- "section": "Reconstruction of the Caption Generation System",
510
- "sec_num": "5"
511
- },
512
- {
513
- "text": "AbsSemRep I-el ~~ .................................... SemRep --(~------ Figure 3 : A RAGS view of the CGS system",
514
- "cite_spans": [
515
- {
516
- "start": 10,
517
- "end": 72,
518
- "text": "I-el ~~ .................................... SemRep --(~------",
519
- "ref_id": null
520
- }
521
- ],
522
- "ref_spans": [
523
- {
524
- "start": 73,
525
- "end": 81,
526
- "text": "Figure 3",
527
- "ref_id": null
528
- }
529
- ],
530
- "eq_spans": [],
531
- "section": "Reconstruction of the Caption Generation System",
532
- "sec_num": "5"
533
- },
534
- {
535
- "text": "In many NLG systems, (nominal) referring expression generation is an operation that is invoked at a relatively late stage, after the structure of individual sentences is fairly well specified (at least semantically). However, referring expression generation needs to go right back to the original world model/knowledge base to select appropriate semantic content to realise a particular conceptual item as an NP (whereas all other content has been determined much earlier). In fact, there seems to be no place to put referring expression generation in a pipeline without there being some resulting awkwardness.",
536
- "cite_spans": [],
537
- "ref_spans": [],
538
- "eq_spans": [],
539
- "section": "Referring Expression Generation",
540
- "sec_num": "5.1"
541
- },
542
- {
543
- "text": "In RAGS, pointers to conceptual items can be included inside the first, \"abstract\", level of semantic representation (AbsSemRep), which is intended to correspond to an initial bundling of conceptual material under semantic predicates. On the other hand, the final, \"concrete\", level of semantic representation (SemRep) is more like a fully-fledged logical form and it is no longer appropriate for conceptual material to be included there. In the CGS reimplementation, it is necessary for the Aggregation module to reason about the final high-level semantic representation of sentences, which means that this module must have access to \"concrete\" semantic representations. The Referring Expression generation module does not run until later, which means that these representations cannot be complete.",
544
- "cite_spans": [],
545
- "ref_spans": [],
546
- "eq_spans": [],
547
- "section": "Referring Expression Generation",
548
- "sec_num": "5.1"
549
- },
550
- {
551
- "text": "Our way around this was to ensure that the initial computation of concrete semantics from abstract semantics (done as part of Aggregation here) left a record of the relationship by including realises arrows between corresponding structures. That computation could not be completed whenever it reached conceptual material -at that point it left a \"hole\" (an object with no further specification) in the concrete semantic representation linked back to the conceptual material. When referring expression was later invoked, by following the arrows in the resulting mixed structure, it could tell exactly which conceptual entity needed to be referred to and where in the semantic structure the resulting semantic expression should be placed. Figure 4 shows the resulting arrangement for one example CGS sentence. The dashed lines indicate realises, i.e. non-local, arrows.",
552
- "cite_spans": [],
553
- "ref_spans": [
554
- {
555
- "start": 737,
556
- "end": 745,
557
- "text": "Figure 4",
558
- "ref_id": null
559
- }
560
- ],
561
- "eq_spans": [],
562
- "section": "Referring Expression Generation",
563
- "sec_num": "5.1"
564
- },
565
- {
566
- "text": "The CGS Centering module reasons about the entities that will be referred to in each sentence and produces a representation which records the forward and backward-looking centers (Grosz et al., 1995) . This representation is later used by the Referring Expression generation module in making pronominalisation decisions. This information could potentially also be used in the Realisation module.",
567
- "cite_spans": [
568
- {
569
- "start": 179,
570
- "end": 199,
571
- "text": "(Grosz et al., 1995)",
572
- "ref_id": "BIBREF3"
573
- }
574
- ],
575
- "ref_spans": [],
576
- "eq_spans": [],
577
- "section": "Handling Centering Information",
578
- "sec_num": "5.2"
579
- },
580
- {
581
- "text": "Since Centering is not directly producing referring expressions, its results have to sit around until they can actually be used. This posed a possible problem for us, because the RAGS framework does not provide a specific level of representation for Centering information and therefore seems on first sight unable to account for this information being communicated between modules. The solution to the problem came when we realised that Centering information is in fact a kind of abstract syntactic information. Although one might not expect abstract syntactic structure to be determined until the Realisation module (or perhaps slightly earlier), the CGS system starts this computation in the Centering module.",
582
- "cite_spans": [],
583
- "ref_spans": [],
584
- "eq_spans": [],
585
- "section": "Handling Centering Information",
586
- "sec_num": "5.2"
587
- },
588
- {
589
- "text": "Thus in the reimplementation, the Centering module computes (very partial) abstract syntactic representations for the entities that will eventually be realised as NPs. These representations basically just indicate the relevant Centering statuses using syntactic features. Figure 5 shows an example of the semantics for a typical output sentence and the two partial abstract syntactic representations computed by the Centering module for what will be the two NPs in that sentence 2. As before, dashed lines indicate realises arrows. Of course, given the discussion of the last section, the semantic representation objects that are the source of these arrows are in fact themselves linked back to conceptual entities by being the destination of realises arrows 2FVM = Feature Value Matrix. from them.",
590
- "cite_spans": [],
591
- "ref_spans": [
592
- {
593
- "start": 272,
594
- "end": 281,
595
- "text": "Figure 5",
596
- "ref_id": "FIGREF3"
597
- }
598
- ],
599
- "eq_spans": [],
600
- "section": "Handling Centering Information",
601
- "sec_num": "5.2"
602
- },
603
- {
604
- "text": "When the Referring Expression generation module runs, it can recover the Centering information by inspecting the partial syntactic representations for the phrases it is supposed to generate. These partial representations are then further instantiated by, e.g., Lexical Choice at later stages of the pipeline.",
605
- "cite_spans": [],
606
- "ref_spans": [],
607
- "eq_spans": [],
608
- "section": "Handling Centering Information",
609
- "sec_num": "5.2"
610
- },
611
- {
612
- "text": "The representation scheme we have proposed here is designed specifically to support the requirements of the current state-of-the-art NLG systems, and our pilot implementation demonstrates the practical applicability of the proposal. Tangled, partial and mixed structures are of obvious utility to any system with a flexible control strategy and we have shown here how the proposed representation scheme supports them. By recording the derivational history of computations, it also supports decisions which partly depend on earlier stages of the generation process (e.g., possibly, lexical choice) and revision-based architectures which typically make use of such information. We have shown how the representation scheme might be the basis for an inter-module communication model, the whiteboard, which supports a wide range of processing strategies that require the representation of complex and evolving data dependem cies. The fact that the whiteboard is cumulative, or monotonic in a logical sense, means that the whiteboard also supports reasoning about the behaviour of NLG systems implemented in terms of it. This is something that we would like to exploit directly in the future.",
613
- "cite_spans": [],
614
- "ref_spans": [],
615
- "eq_spans": [],
616
- "section": "Conclusion",
617
- "sec_num": "6"
618
- },
619
- {
620
- "text": "The reimplementation of the CGS system in the RAGS framework was a challenge to the framework because it was a system that had already been developed completely independently. Even though we did not always understand the detailed motivation for the structure of CGS being as it was, within a short time we reconstructed a working system with modules that corresponded closely to the original CGS modules. The representation scheme we have proposed here was a key ingredient in giving us the flexibility to achieve the particular processing scheme used by CGS whilst remaining faithful to the (relatively simple) RAGS data model. The representation scheme is useful in situations where modules need to be defined and implemented to work with other modules, possibly developed by different people. In such cases, the representation scheme we propose permits precise definition of the interfaces of the modules, even where they are not restricted to a single 'level' of representation. Even though the control structure of CGS is quite simple, we found that the use of a centralised whiteboard was useful in helping us to agree on interfaces and on the exact contribution that each module should be making. Ultimately, it is hoped that the use of a scheme of this type will permit much more widespread 'plug-and-play' among members of the NLG community.",
621
- "cite_spans": [],
622
- "ref_spans": [],
623
- "eq_spans": [],
624
- "section": "Conclusion",
625
- "sec_num": "6"
626
- }
627
- ],
628
- "back_matter": [],
629
- "bib_entries": {
630
- "BIBREF0": {
631
- "ref_id": "b0",
632
- "title": "In Search of a Reference Architecture for NLG Systems",
633
- "authors": [
634
- {
635
- "first": "Lynne",
636
- "middle": [],
637
- "last": "Cahill",
638
- "suffix": ""
639
- },
640
- {
641
- "first": "Christy",
642
- "middle": [],
643
- "last": "Doran",
644
- "suffix": ""
645
- },
646
- {
647
- "first": "Roger",
648
- "middle": [],
649
- "last": "Evans",
650
- "suffix": ""
651
- },
652
- {
653
- "first": "Chris",
654
- "middle": [],
655
- "last": "Mellish",
656
- "suffix": ""
657
- },
658
- {
659
- "first": "Daniel",
660
- "middle": [],
661
- "last": "Paiva",
662
- "suffix": ""
663
- },
664
- {
665
- "first": "Mike",
666
- "middle": [],
667
- "last": "Reape",
668
- "suffix": ""
669
- },
670
- {
671
- "first": "Donia",
672
- "middle": [],
673
- "last": "Scott",
674
- "suffix": ""
675
- },
676
- {
677
- "first": "Neil",
678
- "middle": [],
679
- "last": "Tipper",
680
- "suffix": ""
681
- }
682
- ],
683
- "year": 1999,
684
- "venue": "Proceedings of the 7th European Workshop on Natural Language Generation",
685
- "volume": "",
686
- "issue": "",
687
- "pages": "77--85",
688
- "other_ids": {},
689
- "num": null,
690
- "urls": [],
691
- "raw_text": "Lynne Cahill, Christy Doran, Roger Evans, Chris Mellish, Daniel Paiva, Mike Reape, Donia Scott, and Neil Tipper. 1999a. In Search of a Reference Architecture for NLG Systems. In Proceedings of the 7th European Workshop on Natural Language Generation, pages 77-85, Toulouse.",
692
- "links": null
693
- },
694
- "BIBREF1": {
695
- "ref_id": "b1",
696
- "title": "Towards a Reference Architecture for Natural Language Generation Systems",
697
- "authors": [
698
- {
699
- "first": "Lynne",
700
- "middle": [],
701
- "last": "Cahill",
702
- "suffix": ""
703
- },
704
- {
705
- "first": "Christy",
706
- "middle": [],
707
- "last": "Doran",
708
- "suffix": ""
709
- },
710
- {
711
- "first": "Roger",
712
- "middle": [],
713
- "last": "Evans",
714
- "suffix": ""
715
- },
716
- {
717
- "first": "Chris",
718
- "middle": [],
719
- "last": "Mellish",
720
- "suffix": ""
721
- },
722
- {
723
- "first": "Daniel",
724
- "middle": [],
725
- "last": "Paiva",
726
- "suffix": ""
727
- },
728
- {
729
- "first": "Mike",
730
- "middle": [],
731
- "last": "Reape",
732
- "suffix": ""
733
- },
734
- {
735
- "first": "Donia",
736
- "middle": [],
737
- "last": "Scott",
738
- "suffix": ""
739
- },
740
- {
741
- "first": "Neil",
742
- "middle": [],
743
- "last": "Tipper",
744
- "suffix": ""
745
- }
746
- ],
747
- "year": 1999,
748
- "venue": "",
749
- "volume": "",
750
- "issue": "",
751
- "pages": "",
752
- "other_ids": {},
753
- "num": null,
754
- "urls": [],
755
- "raw_text": "Lynne Cahill, Christy Doran, Roger Evans, Chris Mellish, Daniel Paiva, Mike Reape, Donia Scott, and Neil Tipper. 1999b. Towards a Reference Architecture for Natural Language Genera- tion Systems. Technical Report ITRI-99-14, Information Technology Research Institute (ITRI), University of Brighton. Available at http://www, itri .brighton. ac. uk/proj ects/rags.",
756
- "links": null
757
- },
758
- "BIBREF2": {
759
- "ref_id": "b2",
760
- "title": "May I speak freely?\" Between templates and free choice in natural language generation",
761
- "authors": [
762
- {
763
- "first": "Jo",
764
- "middle": [],
765
- "last": "Calder",
766
- "suffix": ""
767
- },
768
- {
769
- "first": "Roger",
770
- "middle": [],
771
- "last": "Evans",
772
- "suffix": ""
773
- },
774
- {
775
- "first": "Chris",
776
- "middle": [],
777
- "last": "Mellish",
778
- "suffix": ""
779
- },
780
- {
781
- "first": "Mike",
782
- "middle": [],
783
- "last": "Reape",
784
- "suffix": ""
785
- }
786
- ],
787
- "year": 1999,
788
- "venue": "",
789
- "volume": "",
790
- "issue": "",
791
- "pages": "19--24",
792
- "other_ids": {},
793
- "num": null,
794
- "urls": [],
795
- "raw_text": "Jo Calder, Roger Evans, Chris Mellish, and Mike Reape. 1999. \"Free choice\" and templates: how to get both at the same time. In \"May I speak freely?\" Between templates and free choice in nat- ural language generation, number D-99-01, pages 19-24. Saarbriicken.",
796
- "links": null
797
- },
798
- "BIBREF3": {
799
- "ref_id": "b3",
800
- "title": "Centering: a framework for modelling the local coherence of discourse",
801
- "authors": [
802
- {
803
- "first": "B",
804
- "middle": [
805
- "J"
806
- ],
807
- "last": "Grosz",
808
- "suffix": ""
809
- },
810
- {
811
- "first": "A",
812
- "middle": [
813
- "K"
814
- ],
815
- "last": "Joshi",
816
- "suffix": ""
817
- },
818
- {
819
- "first": "S",
820
- "middle": [],
821
- "last": "Weinstein",
822
- "suffix": ""
823
- }
824
- ],
825
- "year": 1995,
826
- "venue": "Computational Linguistics",
827
- "volume": "21",
828
- "issue": "2",
829
- "pages": "203--226",
830
- "other_ids": {},
831
- "num": null,
832
- "urls": [],
833
- "raw_text": "B.J. Grosz, A.K. Joshi, and S. Weinstein. 1995. Centering: a framework for modelling the local co- herence of discourse. Computational Linguistics, 21 (2):203-226.",
834
- "links": null
835
- },
836
- "BIBREF4": {
837
- "ref_id": "b4",
838
- "title": "Generating explanatory captions for information graphics",
839
- "authors": [
840
- {
841
- "first": "V",
842
- "middle": [
843
- "O"
844
- ],
845
- "last": "Mittal",
846
- "suffix": ""
847
- },
848
- {
849
- "first": "S",
850
- "middle": [],
851
- "last": "Roth",
852
- "suffix": ""
853
- },
854
- {
855
- "first": "J",
856
- "middle": [
857
- "D"
858
- ],
859
- "last": "Moore",
860
- "suffix": ""
861
- },
862
- {
863
- "first": "J",
864
- "middle": [],
865
- "last": "Mattis",
866
- "suffix": ""
867
- },
868
- {
869
- "first": "G",
870
- "middle": [],
871
- "last": "Carenini",
872
- "suffix": ""
873
- }
874
- ],
875
- "year": 1995,
876
- "venue": "Proceedings of the 15th International Joint Conference on Artificial Intelligence (IJCAI'95)",
877
- "volume": "",
878
- "issue": "",
879
- "pages": "1276--1283",
880
- "other_ids": {},
881
- "num": null,
882
- "urls": [],
883
- "raw_text": "V. O. Mittal, S. Roth, J. D. Moore, J. Mattis, and G. Carenini. 1995. Generating explanatory cap- tions for information graphics. In Proceedings of the 15th International Joint Conference on Ar- tificial Intelligence (IJCAI'95), pages 1276-1283, Montreal, Canada, August.",
884
- "links": null
885
- },
886
- "BIBREF5": {
887
- "ref_id": "b5",
888
- "title": "Describing complex charts in natural language: A caption generation system",
889
- "authors": [
890
- {
891
- "first": "V",
892
- "middle": [
893
- "O"
894
- ],
895
- "last": "Mittal",
896
- "suffix": ""
897
- },
898
- {
899
- "first": "J",
900
- "middle": [
901
- "D"
902
- ],
903
- "last": "Moore",
904
- "suffix": ""
905
- },
906
- {
907
- "first": "G",
908
- "middle": [],
909
- "last": "Carenini",
910
- "suffix": ""
911
- },
912
- {
913
- "first": "S",
914
- "middle": [],
915
- "last": "Roth",
916
- "suffix": ""
917
- }
918
- ],
919
- "year": 1998,
920
- "venue": "Computational Linguistics",
921
- "volume": "24",
922
- "issue": "3",
923
- "pages": "431--468",
924
- "other_ids": {},
925
- "num": null,
926
- "urls": [],
927
- "raw_text": "V. O. Mittal, J. D. Moore, G. Carenini, and S. Roth. 1998. Describing complex charts in natural lan- guage: A caption generation system. Computa- tional Linguistics, 24(3):431-468.",
928
- "links": null
929
- },
930
- "BIBREF6": {
931
- "ref_id": "b6",
932
- "title": "Has a consensus NL generation architecture appeared and is it psycholinguistically plausible?",
933
- "authors": [
934
- {
935
- "first": "Ehud",
936
- "middle": [],
937
- "last": "Reiter",
938
- "suffix": ""
939
- }
940
- ],
941
- "year": 1994,
942
- "venue": "Proceedings of the Seventh International Workshop on Natural Language Generation",
943
- "volume": "",
944
- "issue": "",
945
- "pages": "163--170",
946
- "other_ids": {},
947
- "num": null,
948
- "urls": [],
949
- "raw_text": "Ehud Reiter. 1994. Has a consensus NL generation architecture appeared and is it psycholinguisti- cally plausible? In Proceedings of the Seventh In- ternational Workshop on Natural Language Gen- eration, pages 163-170, Kennebunkport, Maine.",
950
- "links": null
951
- },
952
- "BIBREF7": {
953
- "ref_id": "b7",
954
- "title": "Interactive graphic design using automatic presentation knowledge",
955
- "authors": [
956
- {
957
- "first": "F",
958
- "middle": [],
959
- "last": "Steven",
960
- "suffix": ""
961
- },
962
- {
963
- "first": "John",
964
- "middle": [],
965
- "last": "Roth",
966
- "suffix": ""
967
- },
968
- {
969
- "first": "Joe",
970
- "middle": [],
971
- "last": "Kolojejchick",
972
- "suffix": ""
973
- },
974
- {
975
- "first": "Jade",
976
- "middle": [],
977
- "last": "Mattis",
978
- "suffix": ""
979
- },
980
- {
981
- "first": "",
982
- "middle": [],
983
- "last": "Goldstein",
984
- "suffix": ""
985
- }
986
- ],
987
- "year": 1994,
988
- "venue": "Proceedings of CHI'9~: Human Factors in Computing Systems",
989
- "volume": "",
990
- "issue": "",
991
- "pages": "",
992
- "other_ids": {},
993
- "num": null,
994
- "urls": [],
995
- "raw_text": "Steven F. Roth, John Kolojejchick, Joe Mattis, and Jade Goldstein. 1994. Interactive graphic design using automatic presentation knowledge. In Pro- ceedings of CHI'9~: Human Factors in Computing Systems, Boston, MA.",
996
- "links": null
997
- }
998
- },
999
- "ref_entries": {
1000
- "FIGREF0": {
1001
- "uris": null,
1002
- "type_str": "figure",
1003
- "text": "The partial semantic representation of \"The second chart shows the number of days on the market\" As an example, consider Figure 1, which shows a semantic representation (SemRep) from the CGS reimplementation.",
1004
- "num": null
1005
- },
1006
- "FIGREF1": {
1007
- "uris": null,
1008
- "type_str": "figure",
1009
- "text": "Its first arrow's target is an object of type DR (Discourse Referent). Its second represents a set of SemPred (Semantic Predicate) objects, and in this case there's just one, of type show. Its third element is a (partial) function, from Role arrow types (agent and affected are both subtypes of Role) to SemReps. (In this case, the SemReps have not yet been fully specified.)",
1010
- "num": null
1011
- },
1012
- "FIGREF2": {
1013
- "uris": null,
1014
- "type_str": "figure",
1015
- "text": "A partial structure",
1016
- "num": null
1017
- },
1018
- "FIGREF3": {
1019
- "uris": null,
1020
- "type_str": "figure",
1021
- "text": "Arrangement of centering information for the output sentence above",
1022
- "num": null
1023
- },
1024
- "TABREF0": {
1025
- "type_str": "table",
1026
- "html": null,
1027
- "content": "<table><tr><td colspan=\"3\">~_set{KBPredl ~</td><td>fun(Role,set(KBId))</td><td colspan=\"2\">I-el ~3-el</td></tr><tr><td>el</td><td/><td colspan=\"2\">agent ~i/ ~ ..... affected</td><td>.... DR ~</td><td colspan=\"2\">fun(Role,set(SemRep)) el ?set(SemPred\u0129t A ~ . \u2022</td></tr><tr><td/><td/><td/><td/><td/><td/><td>agen,/</td><td>\\a]Jec,ea</td></tr><tr><td>/</td><td>el /</td><td colspan=\"2\">\\ el \"k~</td><td>.....</td><td>\" .......... present</td><td>~ S~mRep SemRep \u00a2J</td></tr><tr><td colspan=\"2\">chart1</td><td colspan=\"2\">chart2</td><td/><td/></tr><tr><td colspan=\"7\">Figure 4: Combined Abstract Semantic Representation and Concrete Semantic Representation for</td></tr><tr><td colspan=\"7\">the output: \"These two charts present information about house sales from data-set ts-1740\"</td></tr><tr><td>CG$ arohita,~lu'e</td><td/><td colspan=\"2\">RAGS representat/on$</td><td/><td/></tr><tr><td/><td/><td>II</td><td>Ill IV ~'</td><td/><td/></tr><tr><td colspan=\"2\">--' ..........</td><td colspan=\"2\">I1 I11 iV</td><td/><td/></tr><tr><td/><td/><td>I[</td><td>I11 IV</td><td/><td/></tr><tr><td colspan=\"2\">..........</td><td colspan=\"2\">I;11@</td><td/><td/></tr><tr><td colspan=\"2\">.........</td><td colspan=\"2\">11 III Iv v</td><td/><td/></tr><tr><td/><td/><td>II</td><td>I11 IV V</td><td/><td/></tr><tr><td colspan=\"2\">.........</td><td>III1</td><td/><td/><td/></tr><tr><td/><td/><td>II</td><td>111 IV V</td><td/><td/></tr><tr><td colspan=\"2\">l--.......... FUF</td><td colspan=\"2\">IIIII</td><td/><td/></tr></table>",
1028
- "text": "... /X ........ KSld) 0 ...... v \u2022 ~--\"-................. /",
1029
- "num": null
1030
- }
1031
- }
1032
- }
1033
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1018.json DELETED
@@ -1,989 +0,0 @@
1
- {
2
- "paper_id": "A00-1018",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:55.328524Z"
6
- },
7
- "title": "An Automatic Reviser: The TransCheck System",
8
- "authors": [
9
- {
10
- "first": "Jean-Marc",
11
- "middle": [],
12
- "last": "Jutras",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "RALI",
17
- "location": {
18
- "addrLine": "Universit6 de Montr6al C.P. 6128, succ. Centre-ville",
19
- "settlement": "Montr6al",
20
- "region": "QC)",
21
- "country": "Canada"
22
- }
23
- },
24
- "email": "[email protected]"
25
- }
26
- ],
27
- "year": "",
28
- "venue": null,
29
- "identifiers": {},
30
- "abstract": "Over the past decade or so, a lot of work in computational linguistics has been directed at finding ways to exploit the ever increasing volume of electronic bilingual corpora. These efforts have allowed for substantial expansion of the computational toolbox. We describe a system, TransCheck, which makes intensive use of these new tools in order to detect potential translation errors in preliminary or non-revised translations.",
31
- "pdf_parse": {
32
- "paper_id": "A00-1018",
33
- "_pdf_hash": "",
34
- "abstract": [
35
- {
36
- "text": "Over the past decade or so, a lot of work in computational linguistics has been directed at finding ways to exploit the ever increasing volume of electronic bilingual corpora. These efforts have allowed for substantial expansion of the computational toolbox. We describe a system, TransCheck, which makes intensive use of these new tools in order to detect potential translation errors in preliminary or non-revised translations.",
37
- "cite_spans": [],
38
- "ref_spans": [],
39
- "eq_spans": [],
40
- "section": "Abstract",
41
- "sec_num": null
42
- }
43
- ],
44
- "body_text": [
45
- {
46
- "text": "For the sake of argument, let's consider a translator to be a black box with source text in and target text out. We feed that box with texts and, to be really tricky, we input the same text a couple of times. Looking at the results, the first thing we notice is that though the different translations are quite similar, they're not exactly the same. Nothing to worry about, this may simply exemplify the potential for synonymy and paraphrase. But let's further suppose the text to translate is too big for one individual to translate in the given time frame. In realistic conditions, such a text would be split among perhaps half a dozen translators, each with his own vocabulary, experience and stylistic preferences, which would normally lead to the well known problem of non-uniformity of the translation.",
47
- "cite_spans": [],
48
- "ref_spans": [],
49
- "eq_spans": [],
50
- "section": "Introduction",
51
- "sec_num": null
52
- },
53
- {
54
- "text": "It is therefore part of the normal translation process to have a reviser look at a translator's output. His job will be to spot any typos (taken in a very broad sense to include missing chapters!). Usually, at this point the translator probably has submitted the preliminary version to a spell checker, so what could be done automatically at that level has already been done. No automatic detection of typical translation mistakes has been attempted though. That's the gap TransCheck is designed to fill. The concept of a \"translation checker\" was initially proposed in Isabelle and al. [8] and eventually led to a demonstration prototype concerned with the detection of a very restricted type of mistake: deceptive cognates. In comparison, the system described in this paper goes much further toward a \"real \" usable translation checker by allowing for the detection of errors of omission, the comparison of diverse numerical expressions and the flagging of inconsistent terminology.",
55
- "cite_spans": [
56
- {
57
- "start": 587,
58
- "end": 590,
59
- "text": "[8]",
60
- "ref_id": "BIBREF7"
61
- }
62
- ],
63
- "ref_spans": [],
64
- "eq_spans": [],
65
- "section": "Introduction",
66
- "sec_num": null
67
- },
68
- {
69
- "text": "On the interface side, it allows for the automatic alignment of the source and target texts, the flagging of potential mistakes and the possibility of saving any modifications made to the target text.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Introduction",
74
- "sec_num": null
75
- },
76
- {
77
- "text": "Complete automatic modelling of the translation process is still far beyond our technical ability. The same is true of our ability to detect all types of translation mistakes. We can however, for certain welldefined sub-types of mistake, devise specific mechanisms. And if a program capable of detecting all mistakes of translation would undoubtedly be extremely useful, so would one capable of detecting frequent mistakes, especially when time is short and a thorough revision isn't possible. Errors are then bound to escape the reviser's attention from time to time. This will not necessary be the case of an \"automatic reviser\", though. In that respect, we can compare TransCheck's behaviour to the familiar \"find and replace\" now common to every text editors. Who would know consider doing that particular task by hand? We now give a short description of those sub-problems TransCheck is addressing.",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "I Error detection",
82
- "sec_num": null
83
- },
84
- {
85
- "text": "The ability to automatically detect unintended errors of omission would be much valued, as they can prove quite embarrassing to the translator. Yet a diversity of situations can lead to such errors among which translator's fatigue and the accidental pressing of a key in a text editor, as was pointed out by Melamed [12] . Unfortunately, detecting an omission is far from being simple when taken in all its generality (from omission of single words to whole chapters). This is due in part to the fact that one language may express some ideas with a greater economy of means than another, so length difference alone isn't sufficient to identify omitted text. Consider: \u2022 French: Quant ~ la section 5, elle fournit les rrsultats de nos simulations, que suit notre conclusion, h la sixi~me et dernirre section. \u2022 English: Section 5 describes our simulation results and the final section concludes.",
86
- "cite_spans": [
87
- {
88
- "start": 316,
89
- "end": 320,
90
- "text": "[12]",
91
- "ref_id": "BIBREF11"
92
- }
93
- ],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "Errors of omission",
97
- "sec_num": "1.1"
98
- },
99
- {
100
- "text": "Excluding punctuation, the French sentence in the example above has twice as many words as its English counterpart. Yet there's nothing wrong with the French translation. The task is therefore to determine whether or not correspondence at the word level is scattered throughout the whole aligned segment. Word alignment in general tends to be rather fuzzy though, as the following example shows: What's particular about some of these words, and of interest for an automatic reviser, is that they cannot be detected by a simple dictionary lookup, for they do appear in a monolingual dictionary. What's wrong isn't the words themselves but the context in which they are used. Consider, for example, the English word definitely (en effet) together with the French ddfinitivement (for good, once and for all). Though very similar in form, and both acceptable adverbs in their respective languages, they simply do not mean the same thing. TransCheck, therefore, looks through aligned pairs of sentences for such forbidden word pairs. It also looks for other types of mistakes, for example caiques, which could potentially be detected by a complex dictionary lookup. Calques consist of sequences of legitimate words that incorrectly mimic the structure of the other language by being sort of literal translations.",
101
- "cite_spans": [],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "Errors of omission",
105
- "sec_num": "1.1"
106
- },
107
- {
108
- "text": "A variety of phenomena can be found under this heading (telephone numbers, percentages, fractions, etc.). One important point these otherwise very diverse types of constructions have in common is that, being open sets, they cannot be listed in repositories. Therefore, their detection will require the use of grammatical tools of some sort. But identification is not enough in most cases. Having simply identified \"2\" in one text and \"two\" in the other will not alone permit their comparison. Conversion toward a common form is required. Part of this normalised form must also indicate the type of phenomenon observed. This is so because, though there is a 6 underlying the ordinal sixth, only alignment with an other ordinal of the same value could be considered an appropriate match. In TransCheck, recognition, normalisation and phenomenon identification of numerical expressions are done through appropriate transducers as will be shown in the next section.",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "Numerical expressions",
113
- "sec_num": "1.3"
114
- },
115
- {
116
- "text": "It's not rare for two or more terms to refer to the same concept. However, all things being equal, it's generally taken to be bad practice to use more than one of the synonyms for technical terms in a given translation. Failure to follow this is referred to as terminological inconsistency. To try and minimise this problem, each translator working on a project is given specific instructions that involve standardising terminology. Unfortunately, it's not rare for some translators to ignore these instructions or even for these instructions never to reach the translator. Inadequacies are therefore to be expected, and the bigger the project the more so. As an example, given the term air bag and possible translations sac gonflable and coussin gonflable (literally, inflatable bag/cushion), it shouldn't be allowed for both forms to appear in a given translation, though either one of the two could actually appear.",
117
- "cite_spans": [],
118
- "ref_spans": [],
119
- "eq_spans": [],
120
- "section": "Terminological coherence",
121
- "sec_num": "1.4"
122
- },
123
- {
124
- "text": "We have presented briefly the type of errors detection TransCheck seeks to accomplish automatically. We will now see in more details how they are currently being implemented.",
125
- "cite_spans": [],
126
- "ref_spans": [],
127
- "eq_spans": [],
128
- "section": "Tracking mistakes",
129
- "sec_num": "2"
130
- },
131
- {
132
- "text": "In order for TransCheck to detect potential translation errors, a relatively impressive set of mechanisms is required. These include:",
133
- "cite_spans": [],
134
- "ref_spans": [],
135
- "eq_spans": [],
136
- "section": "Prerequisites",
137
- "sec_num": "2.1"
138
- },
139
- {
140
- "text": "1. An aligner. After identification of word and sentence boundaries the text is processed into a bi-text by an alignment program. This alignment is done on the basis of both length (Gale and Church [7]) and a notion of cognateness (Simard [161). 2. Transducers. In order to compare numerical expressions, which often diverge in format between given pairs of languages, normalisation toward a common format is required. This is done with transducers (Kaplan and Kay, [10] ). 3. Part-of-speech tagger. Misleading similarities in graphical form can sometime induce translation mistakes (deceptive cognates). ~ These forbidden pairs normally involve only one of several possible parts of speech, hence the need to disambiguate them. We do this with a first-order HMM part-ofspeech tagger (Merialdo [13] ). I In the rest of the paper, we will use deceptive cognate very Iosely often to refer to normative usage of word in general.",
141
- "cite_spans": [
142
- {
143
- "start": 466,
144
- "end": 470,
145
- "text": "[10]",
146
- "ref_id": "BIBREF9"
147
- },
148
- {
149
- "start": 794,
150
- "end": 798,
151
- "text": "[13]",
152
- "ref_id": "BIBREF13"
153
- }
154
- ],
155
- "ref_spans": [],
156
- "eq_spans": [],
157
- "section": "Prerequisites",
158
- "sec_num": "2.1"
159
- },
160
- {
161
- "text": "4. Translation models. Being robust, the alignment program will align a pair of texts regardless of possible omissions in the target text. To detect such omissions of text, a probabilistic bilingual dictionary is called upon. This dictionary was estimated along the line of Brown and al.'s first translation model [2] . It is used to align (coarsely) at the word level.",
162
- "cite_spans": [
163
- {
164
- "start": 314,
165
- "end": 317,
166
- "text": "[2]",
167
- "ref_id": "BIBREF1"
168
- }
169
- ],
170
- "ref_spans": [],
171
- "eq_spans": [],
172
- "section": "Prerequisites",
173
- "sec_num": "2.1"
174
- },
175
- {
176
- "text": "In what follows, we assume the reader to be at least remotely familiar with most of these mechanisms. We will however go into more technical details concerning the transducers considering the central role they play in TransCheck.",
177
- "cite_spans": [],
178
- "ref_spans": [],
179
- "eq_spans": [],
180
- "section": "Prerequisites",
181
- "sec_num": "2.1"
182
- },
183
- {
184
- "text": "Grammatical correctors greatly relies on complex grammars to identify \"typical\" mistakes.",
185
- "cite_spans": [],
186
- "ref_spans": [],
187
- "eq_spans": [],
188
- "section": "Identifying omissions",
189
- "sec_num": "2.2"
190
- },
191
- {
192
- "text": "We could imagine doing something similar for omission detection trying to construct the meaning of every sentences in a text and then \"flag\" those where semantic discontinuity were found, not unlike what a human would do. This is, of course, in our wildest dreams as, semantic analyses still remain to this day extremely elusive. Not only that, but unlike grammatical errors, we cannot anticipate something like a \"typical\" omission as they will appear randomly and span over any possible length of text. We must therefore recast what appears as a semantic problem in terms of more readily accessible data. The basic idea here is to assimilate an omission to a particular type of alignment where an important contiguous set of words present in the source text cannot be level with the target text. mechanisms similar to Russell [15] .",
193
- "cite_spans": [
194
- {
195
- "start": 828,
196
- "end": 832,
197
- "text": "[15]",
198
- "ref_id": "BIBREF15"
199
- }
200
- ],
201
- "ref_spans": [],
202
- "eq_spans": [],
203
- "section": "Identifying omissions",
204
- "sec_num": "2.2"
205
- },
206
- {
207
- "text": "We can distinguish aligned at the word For this we rely on those described in between small (a couple of sentences) and big omissions (any thing bigger than a few paragraphs). One might expect the detection of whole missing pages and chapters not to be difficult, but that's not necessarily true, as the burden of the problem then falls on the aligning program instead of the checker per se. Robustness here is the key-word since an alignment program that couldn't fall back on its feet after seeing big chunks of missing text would cause TransCheck to output only noise thereafter. The alignment program we use is one such robust program which, as a first step, seeks to approximate the real alignment by drawing lines in regions with high densities of cognate words. Since the distribution of cognates is a priori uniform throughout the text, omitted sections, when big enough, will show up on the appropriate graph as an important discontinuity in those approximation lines. As the omissions become smaller and smaller, however, the cognate's uniform distribution hypothesis becomes increasingly questionable. 2 Still, we are interested in detecting missing sentences with acceptable precision. Ideally, this should be reflected as an X to zero alignment, but alignment programs tend to associate a high penalty to these cases, preferring to distribute extra text on adjacent regions. In order to recover from these mergings, TransCheck takes a closer look at pairs of aligned texts whenever the length ratio between source and target text falls under a certain threshold. It then attempts to aligned those pairs at the word level using a probabilistic bilingual dictionary that was estimated on the Canadian Hansard.",
208
- "cite_spans": [
209
- {
210
- "start": 1113,
211
- "end": 1114,
212
- "text": "2",
213
- "ref_id": "BIBREF1"
214
- }
215
- ],
216
- "ref_spans": [],
217
- "eq_spans": [],
218
- "section": "Identifying omissions",
219
- "sec_num": "2.2"
220
- },
221
- {
222
- "text": "The \"Art\" of omission detection can be seen as one of trial and error in adjusting precision and recall by choosing appropriate values for what will constitute a significant difference in length ratio, a significant span of words that can't be aligned, and the penalty to be imposed if some words z The probability for there to be only a few cognates between say two paragraphs is very low for French and English, but not that low for two sentences.",
223
- "cite_spans": [],
224
- "ref_spans": [],
225
- "eq_spans": [],
226
- "section": "Identifying omissions",
227
- "sec_num": "2.2"
228
- },
229
- {
230
- "text": "accidentally align due to the imprecision of the word to word alignment algorithm.",
231
- "cite_spans": [],
232
- "ref_spans": [],
233
- "eq_spans": [],
234
- "section": "Identifying omissions",
235
- "sec_num": "2.2"
236
- },
237
- {
238
- "text": "As we have just seen, the problem of detecting a missing portion of text is, in TransCheck, closely related to that of alignment, as it can be reduced to a misalignment at the word level. All the other types of errors TransCheck is concerned with are different in that respect. Correct alignment is presupposed, and when given specific pairs of aligned \"tokens\" the task will be to decide whether they represent valid translations. We now present the steps involved in this evaluation.",
239
- "cite_spans": [],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "Identifying omissions",
243
- "sec_num": "2.2"
244
- },
245
- {
246
- "text": "In order for TransCheck to evaluate a translation pair, their constitutive elements must first be identified. In some cases, this process requires morphological analysis and, in other, a limited type of syntactical analysis. Both type of analysis serve, to a certain extend, a single purpose: that of expressing compactly what would otherwise be a big list of tokens (in some cases, involving numerical expressions, an infinite one). This identification step is done through appropriate transducers. Basically, there are two things to keep in mind when dealing with transducers. One is that, like finitestate-automaton, they behave like recognisers; that is, when applied to an input string, if it can parse it from start to finish, the string is accepted and otherwise rejected. The second is that when doing so, it will produce an output as a result. TransCheck relies on that last property of transducers to produce a unique representation for tokens that are different in form, but semantically identical, as we will now see.",
247
- "cite_spans": [],
248
- "ref_spans": [],
249
- "eq_spans": [],
250
- "section": "Identification",
251
- "sec_num": "2.3"
252
- },
253
- {
254
- "text": "Though we will normally be interested in the identification of every morphological form for a given \"interesting\" token, once identified, these differences will be discarded by TranCheck. Compare the examples below.",
255
- "cite_spans": [],
256
- "ref_spans": [],
257
- "eq_spans": [],
258
- "section": "Normalisation",
259
- "sec_num": "2.4"
260
- },
261
- {
262
- "text": "\u2022 Air bag / air bags \u2022 $2,000,000 / two million dollars / $2 million \u2022 June 1st, 2000 / the first of June, 2000 The examples above are all in English, but the same type of diversity can be found in French too. In Figure 1 we can see an example showing the result of both the process of identification (underlined) and normalisation (=>). Notice that the central part of figure 1 acts somewhat like a partial transfer* component (in a word to word translation model) between the French and the English texts. Though we haven't implemented it yet, this could be used to present the user with proper translation suggestions. 5",
263
- "cite_spans": [],
264
- "ref_spans": [
265
- {
266
- "start": 213,
267
- "end": 221,
268
- "text": "Figure 1",
269
- "ref_id": "FIGREF0"
270
- }
271
- ],
272
- "eq_spans": [],
273
- "section": "Normalisation",
274
- "sec_num": "2.4"
275
- },
276
- {
277
- "text": "The normalisation process depicted in figure 1, can be slightly complicated by two factors. One is the need to disambiguate the part of speech of the identified token. Consider:",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Normalisation",
282
- "sec_num": "2.4"
283
- },
284
- {
285
- "text": "\u2022 French and English:",
286
- "cite_spans": [],
287
- "ref_spans": [],
288
- "eq_spans": [],
289
- "section": "Normalisation",
290
- "sec_num": "2.4"
291
- },
292
- {
293
- "text": "Local --) (POS) NomC(FAC)22",
294
- "cite_spans": [],
295
- "ref_spans": [],
296
- "eq_spans": [],
297
- "section": "Normalisation",
298
- "sec_num": "2.4"
299
- },
300
- {
301
- "text": "Here, the condition field ((POS)NomC)) state that only when nouns are involved will we be in presence of deceptive cognates (but not, say, when adjectives are involved). Consider now:",
302
- "cite_spans": [],
303
- "ref_spans": [],
304
- "eq_spans": [],
305
- "section": "Normalisation",
306
- "sec_num": "2.4"
307
- },
308
- {
309
- "text": "\u2022 from May 19th to 24th, 1999",
310
- "cite_spans": [],
311
- "ref_spans": [],
312
- "eq_spans": [],
313
- "section": "Normalisation",
314
- "sec_num": "2.4"
315
- },
316
- {
317
- "text": "Here, the dates are intermingled. The transducers we use to analyse such constructs will produced two distinct normalised forms that will both be involved in the comparison process that follows.",
318
- "cite_spans": [],
319
- "ref_spans": [],
320
- "eq_spans": [],
321
- "section": "Normalisation",
322
- "sec_num": "2.4"
323
- },
324
- {
325
- "text": "The identification and normalisation process described in the previous two sections are common to deceptive cognates, technical terms and numerical expressions altogether. However, the comparison of the resulting normalised forms as well as the processing they should further undergo is of a rather case specific nature.",
326
- "cite_spans": [],
327
- "ref_spans": [],
328
- "eq_spans": [],
329
- "section": "Comparison",
330
- "sec_num": "2.5"
331
- },
332
- {
333
- "text": "During the comparison process, TransCheck will only be concerned with the normalised forms resulting from the previous transduction process (the two central columns in figure 1). Each of these two columns will be considered as a set in the mathematical sense. As a consequence, the English sentence in figure 1 and the one given below are indistinguishable from TransCheck's point of view.",
334
- "cite_spans": [],
335
- "ref_spans": [],
336
- "eq_spans": [],
337
- "section": "Comparison",
338
- "sec_num": "2.5"
339
- },
340
- {
341
- "text": "\u2022 It will definitely, and I mean definitely, be done by January first, 2001.",
342
- "cite_spans": [],
343
- "ref_spans": [],
344
- "eq_spans": [],
345
- "section": "Comparison",
346
- "sec_num": "2.5"
347
- },
348
- {
349
- "text": "Of course, both occurrences of the word definitely will be flagged if the decision to flag either one is eventually taken. Each of these two sets will then be split into up to three subsets depending on whether they correspond to numerical expressions, deceptive cognates or technical terms. At this point the comparison process will be very simple. Given these subsets, the matching conditions will simply amount to the following:",
350
- "cite_spans": [],
351
- "ref_spans": [],
352
- "eq_spans": [],
353
- "section": "Comparison",
354
- "sec_num": "2.5"
355
- },
356
- {
357
- "text": "\u2022 If a numeral expression appears in one language but not in the other, flag it. \u2022 If a deceptive cognate appears in both languages, flag it. \u2022 If a term was requested to be flagged, flag it.",
358
- "cite_spans": [],
359
- "ref_spans": [],
360
- "eq_spans": [],
361
- "section": "Comparison",
362
- "sec_num": "2.5"
363
- },
364
- {
365
- "text": "To recapitulate, the transducers we use in TransCheck all have the general form:-",
366
- "cite_spans": [],
367
- "ref_spans": [],
368
- "eq_spans": [],
369
- "section": "Putting it all together",
370
- "sec_num": "2.6"
371
- },
372
- {
373
- "text": "If a transducer identifies a string of interest and if boundary conditions are met, information about the nature of the string will be outputted. In a second step, the information from one language will have to be matched against the information from the other in accordance with the condition imposed by the specific nature of the identified strings.",
374
- "cite_spans": [],
375
- "ref_spans": [],
376
- "eq_spans": [],
377
- "section": "String of interest -') (condition )(type )identifier",
378
- "sec_num": null
379
- },
380
- {
381
- "text": "In the previous section, we have described what happens when a bi-text is submitted to",
382
- "cite_spans": [],
383
- "ref_spans": [],
384
- "eq_spans": [],
385
- "section": "The TransCheck Prototype",
386
- "sec_num": "3"
387
- },
388
- {
389
- "text": "TransCheck. We now turn to the steps that will lead to a request. Currently, TransCheck's interface is implemented in Tcl/Tk. This has allowed us to develop a proof of concept without preoccupying ourselves with word processing particularities. The down side to this is a limitation to ascii characters that will eventually have to be overcome by making TransCheck part of a text editor not unlike a spell checker.",
390
- "cite_spans": [],
391
- "ref_spans": [],
392
- "eq_spans": [],
393
- "section": "The TransCheck Prototype",
394
- "sec_num": "3"
395
- },
396
- {
397
- "text": "But for the time being, a TransCheck session would look something like this: The user selects through an interface a French and an English text specifying with a radiobutton which of the two is the source text. 6 6 The system was initialy developed having in mind Then the name of an alignment file is supplied (it will be created if it doesn't already exist). These are the minimal steps that must be taken before any analysis can take place. If, at this point, the bi-text is submitted for analysis, TransCheck will use all of it's default values and, after some window pop-up and progress report, a window containing the target text will appear on screen together with the source text facing it. All the potential errors will appear highlighted. At this point, the user can modify the target text to correct any found errors. When the session ends, the modified text will be saved (together with the appropriately modified alignment file).",
398
- "cite_spans": [
399
- {
400
- "start": 213,
401
- "end": 214,
402
- "text": "6",
403
- "ref_id": "BIBREF5"
404
- }
405
- ],
406
- "ref_spans": [],
407
- "eq_spans": [],
408
- "section": "The TransCheck Prototype",
409
- "sec_num": "3"
410
- },
411
- {
412
- "text": "We've just seen TransCheck's default behaviour. The user is also offered some customisation possibilities. This includes highlighting only those type of errors of interest to the user and setting the alignment parameters.",
413
- "cite_spans": [],
414
- "ref_spans": [],
415
- "eq_spans": [],
416
- "section": "The TransCheck Prototype",
417
- "sec_num": "3"
418
- },
419
- {
420
- "text": "The omission detection parameters can also be modified through an interface. Also, since as with any normative judgement, what is and what isn't a \"correct\" form will always be subject to debate, TransCheck allows the user to silence those alleged mistakes causing too much noise on a given text. Finally, the human reviser is allowed, any time during a session, to modify TransCheck's behaviour so that newly identified incorrect terms will be flagged thereafter, this to ensure that none of subsequent occurrences of these errors will escape his attention. This list of forbidden terms can be saved in order to constitute client specific databases so that identified problems will not be lost between projects.",
421
- "cite_spans": [],
422
- "ref_spans": [],
423
- "eq_spans": [],
424
- "section": "The TransCheck Prototype",
425
- "sec_num": "3"
426
- },
427
- {
428
- "text": "At present, TransCheck allows for only limited customisation. However, we are well aware that the repositories available for say deceptive cognates are costly to develop and English as the source text. Currently, this is still reflected only in the deceptive cognate database.",
429
- "cite_spans": [],
430
- "ref_spans": [],
431
- "eq_spans": [],
432
- "section": "Further development and discussion",
433
- "sec_num": "4"
434
- },
435
- {
436
- "text": "tend to include only those mistakes having a certain \"history\" (stability over time). That suggests the user should be allowed to add new pairs of prohibited translations on the fly. In most cases, however, adding new behaviour is a complex process available only to the system's designer because of morphology and part-of-speech considerations. Added flexibility in this regard seems mandatory. Since we cannot expect the human reviser to concern himself with such technical details, these would have to be hidden from him through adequate input interfaces. This flexibility seems to be desired independently from the now emerging problem of localisation. 7 We are currently addressing these issues one at a time.",
437
- "cite_spans": [],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": "Further development and discussion",
441
- "sec_num": "4"
442
- },
443
- {
444
- "text": "So far, we have described the types of errors TransCheck is concerned with, the way they are handled and how some aspects of the processing can be customised. No figures as to precision and recall have been given though. This is in part due to the difficulty of finding preliminary translations and in part to TransCheck's customisability. For example, performance on omission detection will ultimately depend on the user's selected values. It seems to us that the best way to address both of these problems should be to actually put the system in the hands of human revisers and monitor the changes they would actually choose to make. Efforts in that direction are currently being made.",
445
- "cite_spans": [],
446
- "ref_spans": [],
447
- "eq_spans": [],
448
- "section": "Further development and discussion",
449
- "sec_num": "4"
450
- },
451
- {
452
- "text": "To our knowledge, TransCheck is still unique among text checkers in addressing the problem of translation errors. For a long time, only a concept without form, TransCheck, as presented in this paper, has shown the concept of a translation checker to be sound and realistic. Admittedly, a lot of work, especially on the specific grammars, still has to be done. But all this now seems like a worthwhile effort considering that the resulting program could help translators considerably in their efforts to meet the quality requirements and tight deadlines they are frequently facing. We have also stressed TransCheck's adaptability to be somewhat limited. The problem seems more one of ergonomics than of principle, though. Interfaces would have to be devised to guide users through the sometime complicated steps associated with adding new restrictions. We are now considering the possibility of integrating TransCheck in an off-the-shelf text editor to cross the ascii barrier.",
453
- "cite_spans": [],
454
- "ref_spans": [],
455
- "eq_spans": [],
456
- "section": "Conclusion",
457
- "sec_num": null
458
- },
459
- {
460
- "text": "FAC stands for \"faux-amis complets\" (deceptive cognates in all contexts) 4 In the case of deceptive congnates, we could talk of a forbidden transfer. 5 Transducers can be inverted to create new transducers that will recognise what was previously outpuned and output what was recognised.",
461
- "cite_spans": [],
462
- "ref_spans": [],
463
- "eq_spans": [],
464
- "section": "",
465
- "sec_num": null
466
- },
467
- {
468
- "text": "Adaptation of a text for use in a different region. For example, Canadian postal code (AIB 2C3) compared to American Zip Code (12345).",
469
- "cite_spans": [],
470
- "ref_spans": [],
471
- "eq_spans": [],
472
- "section": "",
473
- "sec_num": null
474
- }
475
- ],
476
- "back_matter": [
477
- {
478
- "text": "Lapalme for their invaluable comments on drafts of this paper.",
479
- "cite_spans": [],
480
- "ref_spans": [],
481
- "eq_spans": [],
482
- "section": "I would like to thank Elliott Macklovich, Claude B6dard, Mich~le Lamarche and Guy",
483
- "sec_num": null
484
- }
485
- ],
486
- "bib_entries": {
487
- "BIBREF0": {
488
- "ref_id": "b0",
489
- "title": "Statistical Approach to Machine Translation",
490
- "authors": [
491
- {
492
- "first": "P",
493
- "middle": [],
494
- "last": "Brown",
495
- "suffix": ""
496
- },
497
- {
498
- "first": "J",
499
- "middle": [],
500
- "last": "Cocke",
501
- "suffix": ""
502
- },
503
- {
504
- "first": "S",
505
- "middle": [],
506
- "last": "Della Pietra",
507
- "suffix": ""
508
- },
509
- {
510
- "first": "V",
511
- "middle": [],
512
- "last": "Della Pietra",
513
- "suffix": ""
514
- },
515
- {
516
- "first": "F",
517
- "middle": [],
518
- "last": "Jelinek",
519
- "suffix": ""
520
- },
521
- {
522
- "first": "J",
523
- "middle": [],
524
- "last": "Lafferty",
525
- "suffix": ""
526
- },
527
- {
528
- "first": "R",
529
- "middle": [],
530
- "last": "Mercer",
531
- "suffix": ""
532
- },
533
- {
534
- "first": "P",
535
- "middle": [],
536
- "last": "Roosin",
537
- "suffix": ""
538
- },
539
- {
540
- "first": "A",
541
- "middle": [],
542
- "last": "",
543
- "suffix": ""
544
- }
545
- ],
546
- "year": 1990,
547
- "venue": "Computational Linguistics",
548
- "volume": "16",
549
- "issue": "",
550
- "pages": "79--85",
551
- "other_ids": {},
552
- "num": null,
553
- "urls": [],
554
- "raw_text": "Brown P., Cocke J., Della Pietra S., Della Pietra V., Jelinek F., Lafferty J., Mercer R., Roosin P., A. (1990) Statistical Approach to Machine Translation. Computational Linguistics, 16, pp. 79-85.",
555
- "links": null
556
- },
557
- "BIBREF1": {
558
- "ref_id": "b1",
559
- "title": "The Mathematics of Machine Translation: Parameter Estimation tatistical Approach to Machine Translation",
560
- "authors": [
561
- {
562
- "first": "P",
563
- "middle": [],
564
- "last": "Brown",
565
- "suffix": ""
566
- },
567
- {
568
- "first": "S",
569
- "middle": [],
570
- "last": "Della Pietra",
571
- "suffix": ""
572
- },
573
- {
574
- "first": "V",
575
- "middle": [],
576
- "last": "Della Pietra",
577
- "suffix": ""
578
- },
579
- {
580
- "first": "R",
581
- "middle": [],
582
- "last": "Mercer",
583
- "suffix": ""
584
- }
585
- ],
586
- "year": 1993,
587
- "venue": "Computational Linguistics",
588
- "volume": "19",
589
- "issue": "",
590
- "pages": "263--311",
591
- "other_ids": {},
592
- "num": null,
593
- "urls": [],
594
- "raw_text": "Brown P., Della Pietra S., Della Pietra V., Mercer R. (1993) The Mathematics of Machine Translation: Parameter Estimation tatistical Approach to Machine Translation. Computational Linguistics, 19, pp. 263-311.",
595
- "links": null
596
- },
597
- "BIBREF2": {
598
- "ref_id": "b2",
599
- "title": "Dictionnaire d'anglicismes. Laval (Qu6bec)",
600
- "authors": [
601
- {
602
- "first": "G",
603
- "middle": [],
604
- "last": "Colpron",
605
- "suffix": ""
606
- }
607
- ],
608
- "year": 1982,
609
- "venue": "",
610
- "volume": "",
611
- "issue": "",
612
- "pages": "",
613
- "other_ids": {},
614
- "num": null,
615
- "urls": [],
616
- "raw_text": "Colpron, G. (1982) Dictionnaire d'anglicismes. Laval (Qu6bec), t~ditions Beauchemin.",
617
- "links": null
618
- },
619
- "BIBREF3": {
620
- "ref_id": "b3",
621
- "title": "Termight: Coordinating Humans and Machines in Bilingual Terminology Acquisition",
622
- "authors": [
623
- {
624
- "first": "",
625
- "middle": [],
626
- "last": "Dagan",
627
- "suffix": ""
628
- },
629
- {
630
- "first": "K",
631
- "middle": [],
632
- "last": "Church",
633
- "suffix": ""
634
- }
635
- ],
636
- "year": 1997,
637
- "venue": "Machine Translation",
638
- "volume": "12",
639
- "issue": "",
640
- "pages": "89--107",
641
- "other_ids": {},
642
- "num": null,
643
- "urls": [],
644
- "raw_text": "Dagan, I and Church K. (1997) Termight: Coordinating Humans and Machines in Bilingual Terminology Acquisition. Machine Translation, 12, pp. 89-107.",
645
- "links": null
646
- },
647
- "BIBREF4": {
648
- "ref_id": "b4",
649
- "title": "Dictionnaire des diJficultds de la langue fran(aise au Canada",
650
- "authors": [
651
- {
652
- "first": "G",
653
- "middle": [],
654
- "last": "Dagenais",
655
- "suffix": ""
656
- }
657
- ],
658
- "year": 1984,
659
- "venue": "Les l~ditions franqaises",
660
- "volume": "",
661
- "issue": "",
662
- "pages": "",
663
- "other_ids": {},
664
- "num": null,
665
- "urls": [],
666
- "raw_text": "Dagenais, G. (1984) Dictionnaire des diJficultds de la langue fran(aise au Canada. Boucherville, Les l~ditions franqaises.",
667
- "links": null
668
- },
669
- "BIBREF5": {
670
- "ref_id": "b5",
671
- "title": "Multidictionnaire des difficult$s de la langue fran(aise. Montr6al, l~ditions Qu6bec/Am&ique",
672
- "authors": [
673
- {
674
- "first": ", J.-L~",
675
- "middle": [],
676
- "last": "De Villiers",
677
- "suffix": ""
678
- }
679
- ],
680
- "year": 1988,
681
- "venue": "",
682
- "volume": "",
683
- "issue": "",
684
- "pages": "",
685
- "other_ids": {},
686
- "num": null,
687
- "urls": [],
688
- "raw_text": "De Villiers, J.-l~. (1988) Multidictionnaire des difficult$s de la langue fran(aise. Montr6al, l~ditions Qu6bec/Am&ique.",
689
- "links": null
690
- },
691
- "BIBREF6": {
692
- "ref_id": "b6",
693
- "title": "A Program for Aligning Sentences in Bilingual Corpora",
694
- "authors": [
695
- {
696
- "first": "W",
697
- "middle": [],
698
- "last": "Gale",
699
- "suffix": ""
700
- },
701
- {
702
- "first": "K",
703
- "middle": [],
704
- "last": "Church",
705
- "suffix": ""
706
- }
707
- ],
708
- "year": 1991,
709
- "venue": "Proceedings of the 29 ~h Annual Meeting of the Association for Computational Linguistics",
710
- "volume": "",
711
- "issue": "",
712
- "pages": "177--184",
713
- "other_ids": {},
714
- "num": null,
715
- "urls": [],
716
- "raw_text": "Gale, W., Church K. (1991) A Program for Aligning Sentences in Bilingual Corpora. Proceedings of the 29 ~h Annual Meeting of the Association for Computational Linguistics, Berkeley, pp. 177-184.",
717
- "links": null
718
- },
719
- "BIBREF7": {
720
- "ref_id": "b7",
721
- "title": "Translation Analysis and Translation Automation",
722
- "authors": [
723
- {
724
- "first": "P",
725
- "middle": [],
726
- "last": "Isabelle",
727
- "suffix": ""
728
- }
729
- ],
730
- "year": 1993,
731
- "venue": "Proceedings of the Fifth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-93)",
732
- "volume": "",
733
- "issue": "",
734
- "pages": "201--217",
735
- "other_ids": {},
736
- "num": null,
737
- "urls": [],
738
- "raw_text": "Isabelle P. and al. (1993) Translation Analysis and Translation Automation. Proceedings of the Fifth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-93), Kyoto, pp. 201-217.",
739
- "links": null
740
- },
741
- "BIBREF8": {
742
- "ref_id": "b8",
743
- "title": "Technical Terminology: some linguistic properties and an algorithm for identification in text",
744
- "authors": [
745
- {
746
- "first": "J",
747
- "middle": [],
748
- "last": "Justeson",
749
- "suffix": ""
750
- },
751
- {
752
- "first": "K",
753
- "middle": [],
754
- "last": "Slava",
755
- "suffix": ""
756
- }
757
- ],
758
- "year": 1995,
759
- "venue": "Natural Language Engineering",
760
- "volume": "1",
761
- "issue": "",
762
- "pages": "9--28",
763
- "other_ids": {},
764
- "num": null,
765
- "urls": [],
766
- "raw_text": "Justeson, J. and Slava K. (1995) Technical Terminology: some linguistic properties and an algorithm for identification in text. Natural Language Engineering, 1, pp. 9-28.",
767
- "links": null
768
- },
769
- "BIBREF9": {
770
- "ref_id": "b9",
771
- "title": "Regular Models of Phonological Rule Systems",
772
- "authors": [
773
- {
774
- "first": "R",
775
- "middle": [
776
- "M"
777
- ],
778
- "last": "Kaplan",
779
- "suffix": ""
780
- },
781
- {
782
- "first": "M",
783
- "middle": [],
784
- "last": "Kay",
785
- "suffix": ""
786
- }
787
- ],
788
- "year": 1994,
789
- "venue": "Computational Linguistics",
790
- "volume": "20",
791
- "issue": "",
792
- "pages": "331--378",
793
- "other_ids": {},
794
- "num": null,
795
- "urls": [],
796
- "raw_text": "Kaplan, R. M., Kay, M. (1994) Regular Models of Phonological Rule Systems, Computational Linguistics, 20, pp. 331-378.",
797
- "links": null
798
- },
799
- "BIBREF10": {
800
- "ref_id": "b10",
801
- "title": "Peut-on v~rifier automatiquement la cohdrence terminologique? Meta, 41",
802
- "authors": [
803
- {
804
- "first": "E",
805
- "middle": [],
806
- "last": "Macklovitch",
807
- "suffix": ""
808
- }
809
- ],
810
- "year": 1996,
811
- "venue": "",
812
- "volume": "",
813
- "issue": "",
814
- "pages": "299--316",
815
- "other_ids": {},
816
- "num": null,
817
- "urls": [],
818
- "raw_text": "Macklovitch, E. (1996) Peut-on v~rifier automatiquement la cohdrence terminologique? Meta, 41, pp. 299-316.",
819
- "links": null
820
- },
821
- "BIBREF11": {
822
- "ref_id": "b11",
823
- "title": "Automatic Detection of Omissions in Translations",
824
- "authors": [
825
- {
826
- "first": "I",
827
- "middle": [
828
- "D"
829
- ],
830
- "last": "Melamed",
831
- "suffix": ""
832
- }
833
- ],
834
- "year": 1996,
835
- "venue": "the 16 tn International Conference on Computational Linguistics",
836
- "volume": "",
837
- "issue": "",
838
- "pages": "",
839
- "other_ids": {},
840
- "num": null,
841
- "urls": [],
842
- "raw_text": "Melamed, I. D. (1996) Automatic Detection of Omissions in Translations. In the 16 tn International Conference on Computational Linguistics.",
843
- "links": null
844
- },
845
- "BIBREF13": {
846
- "ref_id": "b13",
847
- "title": "Tagging English Text with a Probabilistic Model",
848
- "authors": [
849
- {
850
- "first": "B",
851
- "middle": [],
852
- "last": "Merialdo",
853
- "suffix": ""
854
- }
855
- ],
856
- "year": 1994,
857
- "venue": "Computational Linguistics",
858
- "volume": "20",
859
- "issue": "",
860
- "pages": "155--168",
861
- "other_ids": {},
862
- "num": null,
863
- "urls": [],
864
- "raw_text": "Merialdo, B. (1994) Tagging English Text with a Probabilistic Model. Computational Linguistics, 20, pp. 155-168.",
865
- "links": null
866
- },
867
- "BIBREF14": {
868
- "ref_id": "b14",
869
- "title": "Dictionnaire s~lectif et comment~ des difficult~s de la version anglaise",
870
- "authors": [
871
- {
872
- "first": "J",
873
- "middle": [],
874
- "last": "Rey",
875
- "suffix": ""
876
- }
877
- ],
878
- "year": 1984,
879
- "venue": "",
880
- "volume": "",
881
- "issue": "",
882
- "pages": "",
883
- "other_ids": {},
884
- "num": null,
885
- "urls": [],
886
- "raw_text": "Rey J. (1984) Dictionnaire s~lectif et comment~ des difficult~s de la version anglaise. Paris, l~ditions Ophrys.",
887
- "links": null
888
- },
889
- "BIBREF15": {
890
- "ref_id": "b15",
891
- "title": "Errors of omission in translation",
892
- "authors": [
893
- {
894
- "first": "G",
895
- "middle": [],
896
- "last": "Russell",
897
- "suffix": ""
898
- }
899
- ],
900
- "year": 1999,
901
- "venue": "Proceedings of the Eighth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-99)",
902
- "volume": "",
903
- "issue": "",
904
- "pages": "128--138",
905
- "other_ids": {},
906
- "num": null,
907
- "urls": [],
908
- "raw_text": "Russell, G. (1999) Errors of omission in translation. Proceedings of the Eighth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-99), Chester, 1999, pp. 128-138.",
909
- "links": null
910
- },
911
- "BIBREF16": {
912
- "ref_id": "b16",
913
- "title": "Using Cognates to Align Sentences in Parallel Corpora",
914
- "authors": [
915
- {
916
- "first": "M",
917
- "middle": [],
918
- "last": "Simard",
919
- "suffix": ""
920
- },
921
- {
922
- "first": "G",
923
- "middle": [],
924
- "last": "Foster",
925
- "suffix": ""
926
- },
927
- {
928
- "first": "P",
929
- "middle": [],
930
- "last": "Isabelle",
931
- "suffix": ""
932
- }
933
- ],
934
- "year": 1992,
935
- "venue": "Proceedings of the Fourth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-92)",
936
- "volume": "6",
937
- "issue": "",
938
- "pages": "67--81",
939
- "other_ids": {},
940
- "num": null,
941
- "urls": [],
942
- "raw_text": "Simard M., Foster G. and Isabelle P. (1992) Using Cognates to Align Sentences in Parallel Corpora. Proceedings of the Fourth International Conference on Theoretical and Methodological Issues in Machine Translation (TMI-92), Montr6al, pp. 67-81.",
943
- "links": null
944
- },
945
- "BIBREF17": {
946
- "ref_id": "b17",
947
- "title": "Dictionnaire des faux amis fran(ais-anglais",
948
- "authors": [
949
- {
950
- "first": "J",
951
- "middle": [],
952
- "last": "Van Roey",
953
- "suffix": ""
954
- },
955
- {
956
- "first": "S",
957
- "middle": [],
958
- "last": "Granger",
959
- "suffix": ""
960
- },
961
- {
962
- "first": "J",
963
- "middle": [],
964
- "last": "Swallow",
965
- "suffix": ""
966
- }
967
- ],
968
- "year": 1988,
969
- "venue": "",
970
- "volume": "",
971
- "issue": "",
972
- "pages": "",
973
- "other_ids": {},
974
- "num": null,
975
- "urls": [],
976
- "raw_text": "Van Roey, J., Granger S. and Swallow J. (1988) Dictionnaire des faux amis fran(ais-anglais. Paris, Duculot.",
977
- "links": null
978
- }
979
- },
980
- "ref_entries": {
981
- "FIGREF0": {
982
- "num": null,
983
- "uris": null,
984
- "type_str": "figure",
985
- "text": "Token identification and normalisation. 3"
986
- }
987
- }
988
- }
989
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1019.json DELETED
@@ -1,1240 +0,0 @@
1
- {
2
- "paper_id": "A00-1019",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:51.717016Z"
6
- },
7
- "title": "Unit Completion for a Computer-aided Translation System",
8
- "authors": [
9
- {
10
- "first": "Philippe",
11
- "middle": [],
12
- "last": "Langlais",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "succursale Centre-ville Montral (Qubec)",
17
- "location": {
18
- "postCode": "H3C 3J7",
19
- "country": "Canada"
20
- }
21
- },
22
- "email": ""
23
- },
24
- {
25
- "first": "George",
26
- "middle": [],
27
- "last": "Foster",
28
- "suffix": "",
29
- "affiliation": {
30
- "laboratory": "",
31
- "institution": "succursale Centre-ville Montral (Qubec)",
32
- "location": {
33
- "postCode": "H3C 3J7",
34
- "country": "Canada"
35
- }
36
- },
37
- "email": ""
38
- },
39
- {
40
- "first": "Guy",
41
- "middle": [],
42
- "last": "Lapalme",
43
- "suffix": "",
44
- "affiliation": {
45
- "laboratory": "",
46
- "institution": "succursale Centre-ville Montral (Qubec)",
47
- "location": {
48
- "postCode": "H3C 3J7",
49
- "country": "Canada"
50
- }
51
- },
52
- "email": ""
53
- },
54
- {
55
- "first": "Rali",
56
- "middle": [
57
- "/"
58
- ],
59
- "last": "Diro",
60
- "suffix": "",
61
- "affiliation": {
62
- "laboratory": "",
63
- "institution": "succursale Centre-ville Montral (Qubec)",
64
- "location": {
65
- "postCode": "H3C 3J7",
66
- "country": "Canada"
67
- }
68
- },
69
- "email": ""
70
- }
71
- ],
72
- "year": "",
73
- "venue": null,
74
- "identifiers": {},
75
- "abstract": "This work is in the context of TRANSTYPE, a system that observes its user as he or she types a translation and repeatedly suggests completions for the text already entered. The user may either accept, modify, or ignore these suggestions. We describe the design, implementation, and performance of a prototype which suggests completions of units of texts that are longer than one word.",
76
- "pdf_parse": {
77
- "paper_id": "A00-1019",
78
- "_pdf_hash": "",
79
- "abstract": [
80
- {
81
- "text": "This work is in the context of TRANSTYPE, a system that observes its user as he or she types a translation and repeatedly suggests completions for the text already entered. The user may either accept, modify, or ignore these suggestions. We describe the design, implementation, and performance of a prototype which suggests completions of units of texts that are longer than one word.",
82
- "cite_spans": [],
83
- "ref_spans": [],
84
- "eq_spans": [],
85
- "section": "Abstract",
86
- "sec_num": null
87
- }
88
- ],
89
- "body_text": [
90
- {
91
- "text": "TRANSTYPE is part of a project set up to explore an appealing solution to Interactive Machine Translation (IMT) . In constrast to classical IMT systems, where the user's role consists mainly of assisting the computer to analyse the source text (by answering questions about word sense, ellipses, phrasal attachments, etc), in TRANSTYPE the interaction is directly concerned with establishing the target text.",
92
- "cite_spans": [
93
- {
94
- "start": 106,
95
- "end": 111,
96
- "text": "(IMT)",
97
- "ref_id": null
98
- }
99
- ],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "Introduction",
103
- "sec_num": null
104
- },
105
- {
106
- "text": "Our interactive translation system works as follows: a translator selects a sentence and begins typing its translation. After each character typed by the translator, the system displays a proposed completion, which may either be accepted using a special key or rejected by continuing to type. Thus the translator remains in control of the translation process and the machine must continually adapt its suggestions in response to his or her input. We are currently undertaking a study to measure the extent to which our word-completion prototype can improve translator productivity. The conclusions of this study will be presented elsewhere.",
107
- "cite_spans": [],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "Introduction",
111
- "sec_num": null
112
- },
113
- {
114
- "text": "The first version of TrtANSTYPE (Foster et al., 1997) only proposed completions for the current word. This paper deals with predictions which extend to the next several words in the text. The potential gain from multiple-word predictions can be appreciated in the one-sentence translation task reported in table 1, where a hypothetical user saves over 60% of the keystrokes needed to produce a translation in a word completion scenario, and about 85% in a \"unit\" completion scenario.",
115
- "cite_spans": [
116
- {
117
- "start": 32,
118
- "end": 53,
119
- "text": "(Foster et al., 1997)",
120
- "ref_id": "BIBREF4"
121
- }
122
- ],
123
- "ref_spans": [],
124
- "eq_spans": [],
125
- "section": "Introduction",
126
- "sec_num": null
127
- },
128
- {
129
- "text": "In all the figures that follow, we use different fonts to differentiate the various input and output: italics are used for the source text, sans-serif for characters typed by the user and typewriter-like for characters completed by the system. The first few lines of the table 1 give an idea of how TransType functions. Let us assume the unit scenario (see column 2 of the table) and suppose that the user wants to produce the sentence \"Ce projet de loi est examin~ ~ la chambre des communes\" as a translation for the source sentence \"This bill is examined in the house of commons\". The first hypothesis that the system produces before the user enters a character is loi (law). As this is not a good guess from TRANSTYPE the user types the first character (c) of the words he or she wants as a translation. Taking this new input into account, TRANSTYPE then modifies its proposal so that it is compatible whith what the translator has typed. It suggests the desired sequence ce projet de Ioi, which the user can simply validate by typing a dedicated key. Continuing in this way, the user and TRANSTYPE alternately contribute to the final translation. A screen copy of this prototype is provided in figure 1.",
130
- "cite_spans": [],
131
- "ref_spans": [],
132
- "eq_spans": [],
133
- "section": "Introduction",
134
- "sec_num": null
135
- },
136
- {
137
- "text": "The Core Engine",
138
- "cite_spans": [],
139
- "ref_spans": [],
140
- "eq_spans": [],
141
- "section": "2",
142
- "sec_num": null
143
- },
144
- {
145
- "text": "The core of TRANSTYPE is a completion engine which comprises two main parts: an evaluator which assigns probabilistic scores to completion hypotheses and a generator which uses the evaluation function to select the best candidate for completion.",
146
- "cite_spans": [],
147
- "ref_spans": [],
148
- "eq_spans": [],
149
- "section": "2",
150
- "sec_num": null
151
- },
152
- {
153
- "text": "The evaluator is a function p(t [t', s) which assigns to each target-text unit t an estimate of its probability given a source text s and the tokens t' which precede t in the current translation of s. 1 Our approach to modeling this distribution is based to a large extent on that of the IBM group (Brown et al., 1993) , but it differs in one significant aspect: whereas the IB-M model involves a \"noisy channel\" decomposition, we use a linear combination of separate prediction- : A one-sentence session illustrating the word-and unit-completion tasks. The first column indicates the target words the user is expected to produce. The next two columns indicate respectively the prefixes typed by the user and the completions proposed by the system in a word-completion task. The last two columns provide the same information for the unit-completion task. The total number of keystrokes for both tasks is reported in the last line. + indicates the acceptance key typed by the user. A completion is denoted by a/13 where a is the typed prefix and 13 the completed part. Completions for different prefixes are separated by \u2022.",
154
- "cite_spans": [
155
- {
156
- "start": 32,
157
- "end": 39,
158
- "text": "[t', s)",
159
- "ref_id": null
160
- },
161
- {
162
- "start": 298,
163
- "end": 318,
164
- "text": "(Brown et al., 1993)",
165
- "ref_id": "BIBREF1"
166
- }
167
- ],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "The Evaluator",
171
- "sec_num": "2.1"
172
- },
173
- {
174
- "text": "is powerful, it has the disadvantage that p(slt' , t) is more expensive to compute than p(tls ) when using IBM-style translation models. Since speed is crucial for our application, we chose to forego the noisy channel approach in the work described here. Our linear combination model is described as follows:",
175
- "cite_spans": [],
176
- "ref_spans": [],
177
- "eq_spans": [],
178
- "section": "The Evaluator",
179
- "sec_num": "2.1"
180
- },
181
- {
182
- "text": "pCtlt',s) = pCtlt') a(t',s) + pCtls) [1 -exit',s)] (1)",
183
- "cite_spans": [],
184
- "ref_spans": [],
185
- "eq_spans": [],
186
- "section": "The Evaluator",
187
- "sec_num": "2.1"
188
- },
189
- {
190
- "text": "\u2022 ~ \u2022 \u2022 v J language translation",
191
- "cite_spans": [],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "The Evaluator",
195
- "sec_num": "2.1"
196
- },
197
- {
198
- "text": "where a(t', s) E [0, 1] are context-dependent interpolation coefficients. For example, the translation model could have a higher weight at the start of a sentence but the contribution of the language model might become more important in the middle or the end of the sentence\u2022 A study of the weightings for these two models is described elsewhere\u2022 In the work described here we did not use the contribution of the language model (that is,",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "The Evaluator",
203
- "sec_num": "2.1"
204
- },
205
- {
206
- "text": "a(t', s) = O, V t', s).",
207
- "cite_spans": [],
208
- "ref_spans": [],
209
- "eq_spans": [],
210
- "section": "The Evaluator",
211
- "sec_num": "2.1"
212
- },
213
- {
214
- "text": "Techniques for weakening the independence assumptions made by the IBM models 1 and 2 have been proposed in recent work (Brown et al., 1993; Berger et al., 1996; Och and Weber, 98; Wang and Waibel, 98; Wu and Wong, 98) . These studies report improvements on some specific tasks (task-oriented limited vocabulary) which by nature are very different from the task TRANSTYPE is devoted to. Furthermore, the underlying decoding strategies are too time consuming for our application\u2022 We therefore use a translation model based on the simple linear interpolation given in equation 2 which combines predictions of two translation models --Ms and M~ -both based on IBM-like model 2 (Brown et al., 1993) . Ms was trained on single words and Mu, described in section 3, was trained on both words and units.",
215
- "cite_spans": [
216
- {
217
- "start": 119,
218
- "end": 139,
219
- "text": "(Brown et al., 1993;",
220
- "ref_id": "BIBREF1"
221
- },
222
- {
223
- "start": 140,
224
- "end": 160,
225
- "text": "Berger et al., 1996;",
226
- "ref_id": "BIBREF0"
227
- },
228
- {
229
- "start": 161,
230
- "end": 179,
231
- "text": "Och and Weber, 98;",
232
- "ref_id": null
233
- },
234
- {
235
- "start": 180,
236
- "end": 200,
237
- "text": "Wang and Waibel, 98;",
238
- "ref_id": null
239
- },
240
- {
241
- "start": 201,
242
- "end": 217,
243
- "text": "Wu and Wong, 98)",
244
- "ref_id": null
245
- },
246
- {
247
- "start": 673,
248
- "end": 693,
249
- "text": "(Brown et al., 1993)",
250
- "ref_id": "BIBREF1"
251
- }
252
- ],
253
- "ref_spans": [],
254
- "eq_spans": [],
255
- "section": "The Evaluator",
256
- "sec_num": "2.1"
257
- },
258
- {
259
- "text": "EQUATION",
260
- "cite_spans": [],
261
- "ref_spans": [],
262
- "eq_spans": [
263
- {
264
- "start": 0,
265
- "end": 8,
266
- "text": "EQUATION",
267
- "ref_id": "EQREF",
268
- "raw_str": "-- _",
269
- "eq_num": "(2)"
270
- }
271
- ],
272
- "section": "The Evaluator",
273
- "sec_num": "2.1"
274
- },
275
- {
276
- "text": "word unit",
277
- "cite_spans": [],
278
- "ref_spans": [],
279
- "eq_spans": [],
280
- "section": "The Evaluator",
281
- "sec_num": "2.1"
282
- },
283
- {
284
- "text": "where Ps and Pu stand for the probabilities given respectively by Ms and M~. G(s) represents the new sequence of tokens obtained after grouping the tokens of s into units. The grouping operator G is illustrated in table 2 and is described in section 3.",
285
- "cite_spans": [],
286
- "ref_spans": [],
287
- "eq_spans": [],
288
- "section": "The Evaluator",
289
- "sec_num": "2.1"
290
- },
291
- {
292
- "text": "The task of the generator is to identify units that match the current prefix typed by the user, and pick the best candidate according to the evaluator. Due to time considerations, the generator introduces a division of the target vocabulary into two parts: a small active component whose contents are always searched for a match to the current prefix, and a much larger passive part over (380,000 word forms) which comes into play only when no candidates are found in the active vocabulary. The active part is computed dynamically when a new sentence is selected by the translator. It is composed of a few entities (tokens and units) that are likely to appear in the translation. It is a union of the best candidates provided by each model Ms and M~ over the set of all possible target tokens (resp. units) that have a non-null translation probability of being translated by any of the current source tokens (resp. units). Table 2 : Role of the generator for a sample pair of sentences (t is the translation of s in our corpus). G(s) is the sequence of source tokens recasted by the grouping operator G. A8 indicates the 10 best tokens according to the word model, Au the 10 best units according to the unit model.",
293
- "cite_spans": [],
294
- "ref_spans": [
295
- {
296
- "start": 923,
297
- "end": 930,
298
- "text": "Table 2",
299
- "ref_id": "TABREF2"
300
- }
301
- ],
302
- "eq_spans": [],
303
- "section": "The Generator",
304
- "sec_num": "2.2"
305
- },
306
- {
307
- "text": "Automatically identifying which source words or groups of words will give rise to which target words or groups of words is a fundamental problem which remains open. In this work, we decided to proceed in two steps: a) monolingually identifying groups of words that would be better handled as units in a given context, and b) mapping the resulting source and target units. To train our unit models, we used a segment of the Hansard corpus consisting of 15,377 pairs of sentences, totaling 278,127 english tokens (13,543 forms) and 292,865 french tokens (16,399 forms).",
308
- "cite_spans": [],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "Modeling Unit Associations",
312
- "sec_num": "3"
313
- },
314
- {
315
- "text": "Finding relevant units in a text has been explored in many areas of natural language processing. Our approach relies on distributional and frequency statistics computed on each sequence of words found in a training corpus. For sake of efficiency, we used the suffix array technique to get a compact representation of our training corpus. This method allows the efficient retrieval of arbitrary length n-grams (Nagao and Mori, 94; Haruno et al., 96; Ikehara et al., 96; Shimohata et al., 1997; Russell, 1998) . The literature abounds in measures that can help to decide whether words that co-occur are linguistically significant or not. In this work, the strength of association of a sequence of words w[ = wl,..., wn is computed by two measures: a likelihood-based one p(w'~) (where g is the likelihood ratio given in (Dunning, 93)) and an entropy-based one e(w'~) (Shimohata et al., 1997) . Letting T stand for the training text and m a token:",
316
- "cite_spans": [
317
- {
318
- "start": 409,
319
- "end": 429,
320
- "text": "(Nagao and Mori, 94;",
321
- "ref_id": null
322
- },
323
- {
324
- "start": 430,
325
- "end": 448,
326
- "text": "Haruno et al., 96;",
327
- "ref_id": null
328
- },
329
- {
330
- "start": 449,
331
- "end": 468,
332
- "text": "Ikehara et al., 96;",
333
- "ref_id": null
334
- },
335
- {
336
- "start": 469,
337
- "end": 492,
338
- "text": "Shimohata et al., 1997;",
339
- "ref_id": "BIBREF15"
340
- },
341
- {
342
- "start": 493,
343
- "end": 507,
344
- "text": "Russell, 1998)",
345
- "ref_id": "BIBREF14"
346
- },
347
- {
348
- "start": 865,
349
- "end": 889,
350
- "text": "(Shimohata et al., 1997)",
351
- "ref_id": "BIBREF15"
352
- }
353
- ],
354
- "ref_spans": [],
355
- "eq_spans": [],
356
- "section": "Finding Monolingual Units",
357
- "sec_num": "3.1"
358
- },
359
- {
360
- "text": "p(w~) = argming(w~, uS1 ) (3) ie]l,n[ e(w'~) = 0.5x +k ~rnlw,~meT h ( Ireq(w'~ m) k Ir~q(wT) ]",
361
- "cite_spans": [],
362
- "ref_spans": [],
363
- "eq_spans": [],
364
- "section": "Finding Monolingual Units",
365
- "sec_num": "3.1"
366
- },
367
- {
368
- "text": "Intuitively, the first measurement accounts for the fact that parts of a sequence of words that should be considered as a whole should not appear often by themselves. The second one reflects the fact that a salient unit should appear in various contexts (i.e. should have a high entropy score).",
369
- "cite_spans": [],
370
- "ref_spans": [],
371
- "eq_spans": [],
372
- "section": "Finding Monolingual Units",
373
- "sec_num": "3.1"
374
- },
375
- {
376
- "text": "We implemented a cascade filtering strategy based on the likelihood score p, the frequency f, the length l and the entropy value e of the sequences. A first filter (.~\"1 (lmin, fmin, Pmin, emin)) removes any sequence s for which l(s) < lmin or p(s) < Pmin or e(s) < e,nin or f(s) < fmin. A second filter (~'2) removes sequences that are included in preferred ones. In terms of sequence reduction, applying ~1 (2, 2, 5.0, 0.2) on the 81,974 English sequences of at least two tokens seen at least twice in our training corpus, less than 50% of them (39,093) were filtered: 17,063 (21%) were removed because of their low entropy value, 25,818 (31%) because of their low likelihood value.",
377
- "cite_spans": [],
378
- "ref_spans": [],
379
- "eq_spans": [],
380
- "section": "Finding Monolingual Units",
381
- "sec_num": "3.1"
382
- },
383
- {
384
- "text": "Mapping the identified units (tokens or sequences) to their equivalents in the other language was achieved by training a new translation model (IBM 2) using the EM algorithm as described in (Brown et al., 1993) . This required grouping the tokens in our training corpus into sequences, on the basis of the unit lexicons identified in the previous step (we will refer to the results of this grouping as the sequencebased corpus). To deal with overlapping possibilities, we used a dynamic programming scheme which optimized a criterion C given by equation 4 over a set S of all units collected for a given language plus all single words. G(w~) is obtained by returning the path that maximized B(n). We investigated several Ccriteria and we found C~--a length-based measurc to be the most satisfactory. Table 2 shows an output of the grouping function. Table 3 : Bilingual associations. The first column indicates a source unit, the second one its frequency in the training corpus. The third column reports its 3-best ranked target associations (a being a token or a unit, p being the translation probability). The second half of the table reports NP-associations obtained after the filter described in the text.",
385
- "cite_spans": [
386
- {
387
- "start": 190,
388
- "end": 210,
389
- "text": "(Brown et al., 1993)",
390
- "ref_id": "BIBREF1"
391
- }
392
- ],
393
- "ref_spans": [
394
- {
395
- "start": 800,
396
- "end": 807,
397
- "text": "Table 2",
398
- "ref_id": "TABREF2"
399
- },
400
- {
401
- "start": 850,
402
- "end": 857,
403
- "text": "Table 3",
404
- "ref_id": null
405
- }
406
- ],
407
- "eq_spans": [],
408
- "section": "Mapping",
409
- "sec_num": "3.2"
410
- },
411
- {
412
- "text": "We investigated three ways of estimating the parameters of the unit model. In the first one, El, the translation parameters are estimated by applying the EM algorithm in a straightforward fashion over all entities (tokens and units) present at least twice in the sequence-based corpus 2. The two next methods filter the probabilities obtained with the Ez method. In E2, all probabilities p(tls ) are set to 0 whenever s is a token (not a unit), thus forcing the model to contain only associations between source units and target entities (tokens or units). In E3 any parameter of the model that involves a token is removed (that is, p(tls ) = 0 if t or s is a token).",
413
- "cite_spans": [],
414
- "ref_spans": [],
415
- "eq_spans": [],
416
- "section": "Mapping",
417
- "sec_num": "3.2"
418
- },
419
- {
420
- "text": "The resulting model will thus contain only unit associations. In both cases, the final probabilities are renormalized. Table 3 shows a few entries from a unit model (Mu) obtained after 15 iterations of the EM-algorithm on a sequence corpus resulting from the application of the length-grouping criterion (dr) over a lexicon of units whose likelihood score is above 5.0. The probabilities have been obtained by application of the method E2.",
421
- "cite_spans": [],
422
- "ref_spans": [
423
- {
424
- "start": 119,
425
- "end": 126,
426
- "text": "Table 3",
427
- "ref_id": null
428
- }
429
- ],
430
- "eq_spans": [],
431
- "section": "Mapping",
432
- "sec_num": "3.2"
433
- },
434
- {
435
- "text": "We found many partially correct associations",
436
- "cite_spans": [],
437
- "ref_spans": [],
438
- "eq_spans": [],
439
- "section": "Mapping",
440
- "sec_num": "3.2"
441
- },
442
- {
443
- "text": "Cover the years/au fils des, we have/nous, etc) that illustrate the weakness of decoupling the unit identification from the mapping problem. In most cas-2The entities seen only once are mapped to a special \"unknown\" word es however, these associations have a lower probability than the good ones. We also found few erratic associations (the first time/e'dtait, some hon. members/t, etc) due to distributional artifacts. It is also interesting to note that the good associations we found are not necessary compositional in nature (we must/il Iaut, people of canada/les canadiens, of eourse/6videmment, etc).",
444
- "cite_spans": [],
445
- "ref_spans": [],
446
- "eq_spans": [],
447
- "section": "Mapping",
448
- "sec_num": "3.2"
449
- },
450
- {
451
- "text": "One way to increase the precision of the mapping process is to impose some linguistic constraints on the sequences such as simple noun-phrase contraints (Ganssier, 1995; Kupiec, 1993; hua Chen and Chen, 94; Fung, 1995; Evans and Zhai, 1996) . It is also possible to focus on non-compositional compounds, a key point in bilingual applications (Su et al., 1994; Melamed, 1997; Lin, 99) . Another interesting approach is to restrict sequences to those that do not cross constituent boundary patterns (Wu, 1995; Furuse and Iida, 96) . In this study, we filtered for potential sequences that are likely to be noun phrases, using simple regular expressions over the associated part-of-speech tags. An excerpt of the association probabilities of a unit model trained considering only the NP-sequences is given in table 3. Applying this filter (referred to as JrNp in the following) to the 39,093 english sequences still surviving after previous filters ~'1 and ~'2 removes 35,939 of them (92%). Table 4 : Completion results of several translation models, spared: theoretical proportion of characters saved; ok: number of target units accepted by the user; good: number of target units that matched the expected whether they were proposed or not; nu: number of sentences for which no target unit was found by the translation model; u: number of sentences for which at least one helpful unit has been found by the model, but not necessarily proposed.",
452
- "cite_spans": [
453
- {
454
- "start": 153,
455
- "end": 169,
456
- "text": "(Ganssier, 1995;",
457
- "ref_id": null
458
- },
459
- {
460
- "start": 170,
461
- "end": 183,
462
- "text": "Kupiec, 1993;",
463
- "ref_id": "BIBREF9"
464
- },
465
- {
466
- "start": 184,
467
- "end": 206,
468
- "text": "hua Chen and Chen, 94;",
469
- "ref_id": null
470
- },
471
- {
472
- "start": 207,
473
- "end": 218,
474
- "text": "Fung, 1995;",
475
- "ref_id": "BIBREF5"
476
- },
477
- {
478
- "start": 219,
479
- "end": 240,
480
- "text": "Evans and Zhai, 1996)",
481
- "ref_id": "BIBREF3"
482
- },
483
- {
484
- "start": 342,
485
- "end": 359,
486
- "text": "(Su et al., 1994;",
487
- "ref_id": "BIBREF16"
488
- },
489
- {
490
- "start": 360,
491
- "end": 374,
492
- "text": "Melamed, 1997;",
493
- "ref_id": "BIBREF11"
494
- },
495
- {
496
- "start": 375,
497
- "end": 383,
498
- "text": "Lin, 99)",
499
- "ref_id": null
500
- },
501
- {
502
- "start": 497,
503
- "end": 507,
504
- "text": "(Wu, 1995;",
505
- "ref_id": "BIBREF19"
506
- },
507
- {
508
- "start": 508,
509
- "end": 528,
510
- "text": "Furuse and Iida, 96)",
511
- "ref_id": null
512
- }
513
- ],
514
- "ref_spans": [
515
- {
516
- "start": 988,
517
- "end": 995,
518
- "text": "Table 4",
519
- "ref_id": null
520
- }
521
- ],
522
- "eq_spans": [],
523
- "section": "Filtering",
524
- "sec_num": "3.3"
525
- },
526
- {
527
- "text": "More than half of the 3,154 remaining NP-sequences contain only two words.",
528
- "cite_spans": [],
529
- "ref_spans": [],
530
- "eq_spans": [],
531
- "section": "Filtering",
532
- "sec_num": "3.3"
533
- },
534
- {
535
- "text": "We collected completion results on a test corpus of 747 sentences (13,386 english tokens and 14,506 french ones) taken from the Hansard corpus. These sentences have been selected randomly among sentences that have not been used for the training. Around 18% of the source and target words are not known by the translation model. The baseline models (line 1 and 2) are obtained without any unit model (i.e. /~ = 1 in equation 2). The first one is obtained with an IBM-like model 1 while the second is an IBM-like model 2. We observe that for the pair of languages we considered, model 2 improves the amount of saved keystrokes of almost 3% compared to model 1. Therefore we made use of alignment probabilities for the other models.",
536
- "cite_spans": [],
537
- "ref_spans": [],
538
- "eq_spans": [],
539
- "section": "Results",
540
- "sec_num": "4"
541
- },
542
- {
543
- "text": "The three next blocks in table 4 show how the parameter estimation method affects performance. Training models under the C1 method gives the worst results. This results from the fact that the wordto-word probabilities trained on the sequence based corpus (predicted by Mu in equation 2) are less accurate than the ones learned from the token based corpus. The reason is simply that there are less occurrences of each token, especially if many units are identified by the grouping operator.",
544
- "cite_spans": [],
545
- "ref_spans": [],
546
- "eq_spans": [],
547
- "section": "Results",
548
- "sec_num": "4"
549
- },
550
- {
551
- "text": "In methods C2 and C3, the unit model of equation 2 only makes predictions pu(tls ) when s is a source unit, thus lowering the noise compared to method \u00a31.",
552
- "cite_spans": [],
553
- "ref_spans": [],
554
- "eq_spans": [],
555
- "section": "Results",
556
- "sec_num": "4"
557
- },
558
- {
559
- "text": "We also observe in these three blocks the influence of sequence filtering: the more we filter, the better the results. This holds true for all estimation methods tried. In the fifth block of table 4 we observe the positive influence of the NP-filtering, especially when using the third estimation method.",
560
- "cite_spans": [],
561
- "ref_spans": [],
562
- "eq_spans": [],
563
- "section": "Results",
564
- "sec_num": "4"
565
- },
566
- {
567
- "text": "The best combination we found is reported in line 15. It outperforms the baseline by around 1.5%. This model has been obtained by retaining all sequences seen at least two times in the training corpus for which the likelihood test value was above 5 and the entropy score above 0.2 (5rl (2, 2, 5, 0.2)). In terms of the coverage of this unit model, it is interesting to note that among the 747 sentences of the test session, there were 228 for which the model did not propose any units at all. For 425 of the remaining sentences, the model proposed at least one helpful (good or partially good) unit. The active vocabulary for these sentences contained an average of around 2.5 good units per sentence, of which only half (495) were proposed during the session. The fact that this model outperforms others despite its relatively poor coverage (compared to the others) may be explained by the fact that it also removes part of the noise introduced by decoupling the identification of the salient units from the training procedure. Furthermore, as we mentionned earlier, the more we filter, the less the grouping scheeme presented in equation 4 remains necessary, thus reducing a possible source of noise.",
568
- "cite_spans": [],
569
- "ref_spans": [],
570
- "eq_spans": [],
571
- "section": "Results",
572
- "sec_num": "4"
573
- },
574
- {
575
- "text": "The fact that this model outperforms others, despite its relatively poor coverage, is due to the fact that it also removes part of the noise that is introduced by dissociating the identification of the salient units from the training procedure. ~rthermore, as we mentioned earlier, the more we filter, the less the grouping scheme presented in equation 4 remains necessary, thus further reducing an other possible source of noise.",
576
- "cite_spans": [],
577
- "ref_spans": [],
578
- "eq_spans": [],
579
- "section": "Results",
580
- "sec_num": "4"
581
- },
582
- {
583
- "text": "We have described a prototype system called TRANSTYPE which embodies an innovative approach to interactive machine translation in which the interaction is directly concerned with establishing the target text. We proposed and tested a mechanism to enhance TRANSTYPE by having it predict sequences of words rather than just completions for the current word. The results show a modest improvement in prediction performance which will serve as a baseline for our future investigations. One obvious direction for future research is to revise our current strategy of decoupling the selection of units from their bilingual context.",
584
- "cite_spans": [],
585
- "ref_spans": [],
586
- "eq_spans": [],
587
- "section": "Conclusion",
588
- "sec_num": "5"
589
- },
590
- {
591
- "text": "1We assume the existence of a deterministic procedure for tokenizing the target text.",
592
- "cite_spans": [],
593
- "ref_spans": [],
594
- "eq_spans": [],
595
- "section": "",
596
- "sec_num": null
597
- }
598
- ],
599
- "back_matter": [
600
- {
601
- "text": "TRANSTYPE is a project funded by the Natural Sciences and Engineering Research Council of Canada. We are undebted to Elliott Macklovitch and Pierre Isabelle for the fruitful orientations they gave to this work.",
602
- "cite_spans": [],
603
- "ref_spans": [],
604
- "eq_spans": [],
605
- "section": "Acknowlegments",
606
- "sec_num": null
607
- }
608
- ],
609
- "bib_entries": {
610
- "BIBREF0": {
611
- "ref_id": "b0",
612
- "title": "A maximum entropy approach to natural language processing",
613
- "authors": [
614
- {
615
- "first": "Adam",
616
- "middle": [
617
- "L"
618
- ],
619
- "last": "Berger",
620
- "suffix": ""
621
- },
622
- {
623
- "first": "Stephen",
624
- "middle": [
625
- "A"
626
- ],
627
- "last": "Della Pietra",
628
- "suffix": ""
629
- },
630
- {
631
- "first": "Vincent",
632
- "middle": [
633
- "J Della"
634
- ],
635
- "last": "Pietra",
636
- "suffix": ""
637
- }
638
- ],
639
- "year": 1996,
640
- "venue": "Computational Linguistics",
641
- "volume": "22",
642
- "issue": "1",
643
- "pages": "39--71",
644
- "other_ids": {},
645
- "num": null,
646
- "urls": [],
647
- "raw_text": "Adam L. Berger, Stephen A. Della Pietra, and Vin- cent J. Della Pietra. 1996. A maximum entropy approach to natural language processing. Compu- tational Linguistics, 22(1):39-71.",
648
- "links": null
649
- },
650
- "BIBREF1": {
651
- "ref_id": "b1",
652
- "title": "The mathematics of machine trmaslation: Parameter estimation",
653
- "authors": [
654
- {
655
- "first": "F",
656
- "middle": [],
657
- "last": "Peter",
658
- "suffix": ""
659
- },
660
- {
661
- "first": "Stephen",
662
- "middle": [
663
- "A"
664
- ],
665
- "last": "Brown",
666
- "suffix": ""
667
- },
668
- {
669
- "first": "Vincent",
670
- "middle": [],
671
- "last": "Della Pietra",
672
- "suffix": ""
673
- },
674
- {
675
- "first": "J",
676
- "middle": [],
677
- "last": "Della",
678
- "suffix": ""
679
- },
680
- {
681
- "first": "Robert",
682
- "middle": [
683
- "L"
684
- ],
685
- "last": "Pietra",
686
- "suffix": ""
687
- },
688
- {
689
- "first": "",
690
- "middle": [],
691
- "last": "Mercer",
692
- "suffix": ""
693
- }
694
- ],
695
- "year": 1993,
696
- "venue": "Computational Linguistics",
697
- "volume": "19",
698
- "issue": "2",
699
- "pages": "263--312",
700
- "other_ids": {},
701
- "num": null,
702
- "urls": [],
703
- "raw_text": "Peter F. Brown, Stephen A. Della Pietra, Vincen- t Della J. Pietra, and Robert L. Mercer. 1993. The mathematics of machine trmaslation: Pa- rameter estimation. Computational Linguistics, 19(2):263-312, June.",
704
- "links": null
705
- },
706
- "BIBREF2": {
707
- "ref_id": "b2",
708
- "title": "Accurate methods for the statistics of surprise and coincidence",
709
- "authors": [
710
- {
711
- "first": "Ted",
712
- "middle": [],
713
- "last": "Dunning",
714
- "suffix": ""
715
- }
716
- ],
717
- "year": null,
718
- "venue": "Computational Linguistics",
719
- "volume": "93",
720
- "issue": "1",
721
- "pages": "61--74",
722
- "other_ids": {},
723
- "num": null,
724
- "urls": [],
725
- "raw_text": "Ted Dunning. 93. Accurate methods for the statis- tics of surprise and coincidence. Computational Linguistics, 19(1):61-74.",
726
- "links": null
727
- },
728
- "BIBREF3": {
729
- "ref_id": "b3",
730
- "title": "Nounphrase analysis in unrestricted text for information retrieval",
731
- "authors": [
732
- {
733
- "first": "A",
734
- "middle": [],
735
- "last": "David",
736
- "suffix": ""
737
- },
738
- {
739
- "first": "Chengxiang",
740
- "middle": [],
741
- "last": "Evans",
742
- "suffix": ""
743
- },
744
- {
745
- "first": "",
746
- "middle": [],
747
- "last": "Zhai",
748
- "suffix": ""
749
- }
750
- ],
751
- "year": 1996,
752
- "venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics",
753
- "volume": "",
754
- "issue": "",
755
- "pages": "17--24",
756
- "other_ids": {},
757
- "num": null,
758
- "urls": [],
759
- "raw_text": "David A. Evans and Chengxiang Zhai. 1996. Noun- phrase analysis in unrestricted text for informa- tion retrieval. In Proceedings of the 34th Annu- al Meeting of the Association for Computational Linguistics, pages 17-24, Santa Cruz, California.",
760
- "links": null
761
- },
762
- "BIBREF4": {
763
- "ref_id": "b4",
764
- "title": "Target-text Mediated Interactive Machine Translation",
765
- "authors": [
766
- {
767
- "first": "George",
768
- "middle": [],
769
- "last": "Foster",
770
- "suffix": ""
771
- },
772
- {
773
- "first": "Pierre",
774
- "middle": [],
775
- "last": "Isabelle",
776
- "suffix": ""
777
- },
778
- {
779
- "first": "Pierre",
780
- "middle": [],
781
- "last": "Plamondon",
782
- "suffix": ""
783
- }
784
- ],
785
- "year": 1997,
786
- "venue": "Machine Translation",
787
- "volume": "12",
788
- "issue": "",
789
- "pages": "175--194",
790
- "other_ids": {},
791
- "num": null,
792
- "urls": [],
793
- "raw_text": "George Foster, Pierre Isabelle, and Pierre Plamon- don. 1997. Target-text Mediated Interactive Ma- chine Translation. Machine Translation, 12:175- 194.",
794
- "links": null
795
- },
796
- "BIBREF5": {
797
- "ref_id": "b5",
798
- "title": "A pattern matching method for finding noun and proper noun translations from noisy parallel corpora",
799
- "authors": [
800
- {
801
- "first": "Pascale",
802
- "middle": [],
803
- "last": "Fung",
804
- "suffix": ""
805
- }
806
- ],
807
- "year": 1995,
808
- "venue": "Proceedings of the 33rd Annual Meeting of the Association for Computational Linguistics",
809
- "volume": "",
810
- "issue": "",
811
- "pages": "236--243",
812
- "other_ids": {},
813
- "num": null,
814
- "urls": [],
815
- "raw_text": "Pascale Fung. 1995. A pattern matching method for finding noun and proper noun translations from noisy parallel corpora. In Proceedings of the 33rd Annual Meeting of the Association for Compu- tational Linguistics, pages 236-243, Cambridge, Massachusetts.",
816
- "links": null
817
- },
818
- "BIBREF6": {
819
- "ref_id": "b6",
820
- "title": "Eric Gaussier. 1995. Modles statistiques et patrons morphosyntaxiques pour l'extraction de lcxiques bilingues",
821
- "authors": [
822
- {
823
- "first": "Osamu",
824
- "middle": [],
825
- "last": "Furuse",
826
- "suffix": ""
827
- },
828
- {
829
- "first": "Hitoshi",
830
- "middle": [],
831
- "last": "Iida",
832
- "suffix": ""
833
- }
834
- ],
835
- "year": null,
836
- "venue": "Proceedings of the 16th International Conference On Computational Linguistics",
837
- "volume": "96",
838
- "issue": "",
839
- "pages": "jan-- vier",
840
- "other_ids": {},
841
- "num": null,
842
- "urls": [],
843
- "raw_text": "Osamu Furuse and Hitoshi Iida. 96. Incremen- tal translation utilizing constituent boundray pat- terns. In Proceedings of the 16th International Conference On Computational Linguistics, pages 412-417, Copenhagen, Denmark. Eric Gaussier. 1995. Modles statistiques et patron- s morphosyntaxiques pour l'extraction de lcxiques bilingues. Ph.D. thesis, Universit de Paris 7, jan- vier.",
844
- "links": null
845
- },
846
- "BIBREF7": {
847
- "ref_id": "b7",
848
- "title": "Kuang hua Chen and Hsin-Hsi Chen. 94. Extracting noun phrases from large-scale texts: A hybrid approach and its automatic evaluation",
849
- "authors": [
850
- {
851
- "first": "Masahiko",
852
- "middle": [],
853
- "last": "Haruno",
854
- "suffix": ""
855
- },
856
- {
857
- "first": "Satoru",
858
- "middle": [],
859
- "last": "Ikehara",
860
- "suffix": ""
861
- },
862
- {
863
- "first": "Takefumi",
864
- "middle": [],
865
- "last": "Yamazaki",
866
- "suffix": ""
867
- }
868
- ],
869
- "year": null,
870
- "venue": "Proceedings of the 32nd Annual Meeting of the Association for Computational Linguistics",
871
- "volume": "96",
872
- "issue": "",
873
- "pages": "234--241",
874
- "other_ids": {},
875
- "num": null,
876
- "urls": [],
877
- "raw_text": "Masahiko Haruno, Satoru Ikehara, and Takefumi Yamazaki. 96. Learning bilingual collocations by word-level sorting. In Proceedings of the 16th In- ternational Conference On Computational Lin- guistics, pages 525-530, Copenhagen, Denmark. Kuang hua Chen and Hsin-Hsi Chen. 94. Extract- ing noun phrases from large-scale texts: A hybrid approach and its automatic evaluation. In Pro- ceedings of the 32nd Annual Meeting of the Asso- ciation for Computational Linguistics, pages 234- 241, Las Cruces, New Mexico.",
878
- "links": null
879
- },
880
- "BIBREF8": {
881
- "ref_id": "b8",
882
- "title": "96. A statistical method for extracting uinterupted and interrupted collocations from very large corpora",
883
- "authors": [
884
- {
885
- "first": "Satoru",
886
- "middle": [],
887
- "last": "Ikehara",
888
- "suffix": ""
889
- },
890
- {
891
- "first": "Satoshi",
892
- "middle": [],
893
- "last": "Shirai",
894
- "suffix": ""
895
- },
896
- {
897
- "first": "Hajine",
898
- "middle": [],
899
- "last": "Uchino",
900
- "suffix": ""
901
- }
902
- ],
903
- "year": null,
904
- "venue": "Proceedings of the 16th International Conference On Computational Linguistics",
905
- "volume": "",
906
- "issue": "",
907
- "pages": "574--579",
908
- "other_ids": {},
909
- "num": null,
910
- "urls": [],
911
- "raw_text": "Satoru Ikehara, Satoshi Shirai, and Hajine Uchino. 96. A statistical method for extracting uinterupt- ed and interrupted collocations from very large corpora. In Proceedings of the 16th International Conference On Computational Linguistics, pages 574-579, Copenhagen, Denmark.",
912
- "links": null
913
- },
914
- "BIBREF9": {
915
- "ref_id": "b9",
916
- "title": "An algorithm for finding noun phrase correspondences in bilingual corpora",
917
- "authors": [
918
- {
919
- "first": "Julian",
920
- "middle": [],
921
- "last": "Kupiec",
922
- "suffix": ""
923
- }
924
- ],
925
- "year": 1993,
926
- "venue": "Proceedings of the 31st Annual Meeting of the Association for Computational Linguistics",
927
- "volume": "",
928
- "issue": "",
929
- "pages": "17--22",
930
- "other_ids": {},
931
- "num": null,
932
- "urls": [],
933
- "raw_text": "Julian Kupiec. 1993. An algorithm for finding noun phrase correspondences in bilingual corpora. In Proceedings of the 31st Annual Meeting of the Association for Computational Linguistics, pages 17-22, Colombus, Ohio.",
934
- "links": null
935
- },
936
- "BIBREF10": {
937
- "ref_id": "b10",
938
- "title": "Automatic identification of noncompositional phrases",
939
- "authors": [],
940
- "year": null,
941
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics",
942
- "volume": "",
943
- "issue": "",
944
- "pages": "317--324",
945
- "other_ids": {},
946
- "num": null,
947
- "urls": [],
948
- "raw_text": "Dekang Lin. 99. Automatic identification of non- compositional phrases. In Proceedings of the 37th Annual Meeting of the Association for Computa- tional Linguistics, pages 317-324, College Park, Maryland.",
949
- "links": null
950
- },
951
- "BIBREF11": {
952
- "ref_id": "b11",
953
- "title": "Automatic discovery of noncompositional coumpounds in parallel data",
954
- "authors": [
955
- {
956
- "first": "Dan",
957
- "middle": [],
958
- "last": "Melamed",
959
- "suffix": ""
960
- }
961
- ],
962
- "year": 1997,
963
- "venue": "Proceedings of the 2nd Conference on Empirical Methods in Natural Language Processing",
964
- "volume": "",
965
- "issue": "",
966
- "pages": "97--108",
967
- "other_ids": {},
968
- "num": null,
969
- "urls": [],
970
- "raw_text": "Dan Melamed. 1997. Automatic discovery of non- compositional coumpounds in parallel data. In Proceedings of the 2nd Conference on Empirical Methods in Natural Language Processing, pages 97-108, Providence, RI, August, lst-2nd.",
971
- "links": null
972
- },
973
- "BIBREF12": {
974
- "ref_id": "b12",
975
- "title": "A new method of n-gram statistics for large number of n and automatic extraction of words and phrases from large text data of japanese",
976
- "authors": [
977
- {
978
- "first": "Makoto",
979
- "middle": [],
980
- "last": "Nagao",
981
- "suffix": ""
982
- },
983
- {
984
- "first": "Shinsuke",
985
- "middle": [],
986
- "last": "Mori",
987
- "suffix": ""
988
- }
989
- ],
990
- "year": null,
991
- "venue": "Proceedings of the 16th International Conference On Computational Linguistics",
992
- "volume": "94",
993
- "issue": "",
994
- "pages": "611--615",
995
- "other_ids": {},
996
- "num": null,
997
- "urls": [],
998
- "raw_text": "Makoto Nagao and Shinsuke Mori. 94. A new method of n-gram statistics for large number of n and automatic extraction of words and phrases from large text data of japanese. In Proceedings of the 16th International Conference On Com- putational Linguistics, volume 1, pages 611-615, Copenhagen, Denmark.",
999
- "links": null
1000
- },
1001
- "BIBREF13": {
1002
- "ref_id": "b13",
1003
- "title": "Improving statistical natural language translation with categories and rules",
1004
- "authors": [
1005
- {
1006
- "first": "Josef",
1007
- "middle": [],
1008
- "last": "Franz",
1009
- "suffix": ""
1010
- },
1011
- {
1012
- "first": "Hans",
1013
- "middle": [],
1014
- "last": "Och",
1015
- "suffix": ""
1016
- },
1017
- {
1018
- "first": "",
1019
- "middle": [],
1020
- "last": "Weber",
1021
- "suffix": ""
1022
- }
1023
- ],
1024
- "year": null,
1025
- "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics",
1026
- "volume": "98",
1027
- "issue": "",
1028
- "pages": "985--989",
1029
- "other_ids": {},
1030
- "num": null,
1031
- "urls": [],
1032
- "raw_text": "Franz Josef Och and Hans Weber. 98. Improving statistical natural language translation with cate- gories and rules. In Proceedings of the 36th Annu- al Meeting of the Association for Computational Linguistics, pages 985-989, Montreal, Canada.",
1033
- "links": null
1034
- },
1035
- "BIBREF14": {
1036
- "ref_id": "b14",
1037
- "title": "Identification of salient token sequences",
1038
- "authors": [
1039
- {
1040
- "first": "Graham",
1041
- "middle": [],
1042
- "last": "Russell",
1043
- "suffix": ""
1044
- }
1045
- ],
1046
- "year": 1998,
1047
- "venue": "",
1048
- "volume": "",
1049
- "issue": "",
1050
- "pages": "",
1051
- "other_ids": {},
1052
- "num": null,
1053
- "urls": [],
1054
- "raw_text": "Graham Russell. 1998. Identification of salient to- ken sequences. Internal report, RALI, University of Montreal, Canada.",
1055
- "links": null
1056
- },
1057
- "BIBREF15": {
1058
- "ref_id": "b15",
1059
- "title": "Retrieving collocations by cooccurrences and word order constraints",
1060
- "authors": [
1061
- {
1062
- "first": "Sayori",
1063
- "middle": [],
1064
- "last": "Shimohata",
1065
- "suffix": ""
1066
- },
1067
- {
1068
- "first": "Toshiyuki",
1069
- "middle": [],
1070
- "last": "Sugio",
1071
- "suffix": ""
1072
- },
1073
- {
1074
- "first": "Junji",
1075
- "middle": [],
1076
- "last": "Nagata",
1077
- "suffix": ""
1078
- }
1079
- ],
1080
- "year": 1997,
1081
- "venue": "Proceedings of the 35th Annual Meeting of the Association for Computational Linguistics",
1082
- "volume": "",
1083
- "issue": "",
1084
- "pages": "476--481",
1085
- "other_ids": {},
1086
- "num": null,
1087
- "urls": [],
1088
- "raw_text": "Sayori Shimohata, Toshiyuki Sugio, and Junji Nagata. 1997. Retrieving collocations by co- occurrences and word order constraints. In Pro- ceedings of the 35th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 476- 481, Madrid Spain.",
1089
- "links": null
1090
- },
1091
- "BIBREF16": {
1092
- "ref_id": "b16",
1093
- "title": "A corpus-based approach to automatic compound extraction",
1094
- "authors": [
1095
- {
1096
- "first": "Keh-Yih",
1097
- "middle": [],
1098
- "last": "Su",
1099
- "suffix": ""
1100
- },
1101
- {
1102
- "first": "Ming-Wen",
1103
- "middle": [],
1104
- "last": "Wu",
1105
- "suffix": ""
1106
- },
1107
- {
1108
- "first": "Jing-Shin",
1109
- "middle": [],
1110
- "last": "Chang",
1111
- "suffix": ""
1112
- }
1113
- ],
1114
- "year": 1994,
1115
- "venue": "Proceedings of the 32nd Annual Meeting of the Association for Computational Linguistics",
1116
- "volume": "",
1117
- "issue": "",
1118
- "pages": "242--247",
1119
- "other_ids": {},
1120
- "num": null,
1121
- "urls": [],
1122
- "raw_text": "Keh-Yih Su, Ming-Wen Wu, and Jing-Shin Chang. 1994. A corpus-based approach to automatic com- pound extraction. In Proceedings of the 32nd An- nual Meeting of the Association for Computation- al Linguistics, pages 242-247, Las Cruces, New Mexico.",
1123
- "links": null
1124
- },
1125
- "BIBREF17": {
1126
- "ref_id": "b17",
1127
- "title": "Modeling with structures in statistical machine translation",
1128
- "authors": [
1129
- {
1130
- "first": "Ye-",
1131
- "middle": [],
1132
- "last": "",
1133
- "suffix": ""
1134
- },
1135
- {
1136
- "first": "Yi",
1137
- "middle": [],
1138
- "last": "Wang",
1139
- "suffix": ""
1140
- },
1141
- {
1142
- "first": "Alex",
1143
- "middle": [],
1144
- "last": "Waibel",
1145
- "suffix": ""
1146
- }
1147
- ],
1148
- "year": null,
1149
- "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics",
1150
- "volume": "98",
1151
- "issue": "",
1152
- "pages": "1357--1363",
1153
- "other_ids": {},
1154
- "num": null,
1155
- "urls": [],
1156
- "raw_text": "Ye-Yi Wang and Alex Waibel. 98. Modeling with structures in statistical machine translation. In Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics, vol- ume 2, pages 1357-1363, Montreal, Canada.",
1157
- "links": null
1158
- },
1159
- "BIBREF18": {
1160
- "ref_id": "b18",
1161
- "title": "Machine translation with a stochastic grammatical channel",
1162
- "authors": [
1163
- {
1164
- "first": "Dekai",
1165
- "middle": [],
1166
- "last": "Wu",
1167
- "suffix": ""
1168
- },
1169
- {
1170
- "first": "Hongsing",
1171
- "middle": [],
1172
- "last": "Wong",
1173
- "suffix": ""
1174
- }
1175
- ],
1176
- "year": null,
1177
- "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics",
1178
- "volume": "98",
1179
- "issue": "",
1180
- "pages": "1408--1414",
1181
- "other_ids": {},
1182
- "num": null,
1183
- "urls": [],
1184
- "raw_text": "Dekai Wu and Hongsing Wong. 98. Machine trans- lation with a stochastic grammatical channel. In Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics, pages 1408-1414, Montreal, Canada.",
1185
- "links": null
1186
- },
1187
- "BIBREF19": {
1188
- "ref_id": "b19",
1189
- "title": "Stochastic inversion transduction grammars, with application to segmentation, bracketing, and alignment of parallel corpora",
1190
- "authors": [
1191
- {
1192
- "first": "Dekai",
1193
- "middle": [],
1194
- "last": "Wu",
1195
- "suffix": ""
1196
- }
1197
- ],
1198
- "year": 1995,
1199
- "venue": "Proceedings of the International Joint Conference on Artificial Intelligence",
1200
- "volume": "2",
1201
- "issue": "",
1202
- "pages": "1328--1335",
1203
- "other_ids": {},
1204
- "num": null,
1205
- "urls": [],
1206
- "raw_text": "Dekai Wu. 1995. Stochastic inversion transduc- tion grammars, with application to segmentation, bracketing, and alignment of parallel corpora. In Proceedings of the International Joint Conference on Artificial Intelligence, volume 2, pages 1328- 1335, Montreal, Canada.",
1207
- "links": null
1208
- }
1209
- },
1210
- "ref_entries": {
1211
- "FIGREF2": {
1212
- "text": "Example of an interaction in TRANSTYPE with the source text in the top half of the screen. The target text is typed in the bottom half with suggestions given by the menu at the insertion point.",
1213
- "uris": null,
1214
- "type_str": "figure",
1215
- "num": null
1216
- },
1217
- "TABREF0": {
1218
- "text": "",
1219
- "num": null,
1220
- "content": "<table><tr><td/><td/><td/><td colspan=\"3\">This bill is examined in the house of commons</td></tr><tr><td/><td/><td colspan=\"3\">word-completion task</td><td>unit-completion task</td></tr><tr><td/><td colspan=\"3\">preL completions</td><td colspan=\"2\">pref. completions</td></tr><tr><td>ce</td><td>ce+</td><td colspan=\"2\">/loi \u2022 C/'</td><td>c-l-</td><td>/loJ. \u2022 c</td></tr><tr><td>projet</td><td>p+</td><td>/est\u2022</td><td>p/rojet</td><td/></tr><tr><td>de</td><td>d+</td><td colspan=\"2\">/trbs \u2022 d/e</td><td/></tr><tr><td>Ioi</td><td>I+</td><td colspan=\"2\">/t=~s \u2022 I/oi</td><td/></tr><tr><td>est</td><td>e+</td><td colspan=\"2\">/de \u2022 e/st</td><td/></tr><tr><td>examin~</td><td>e+</td><td colspan=\"2\">/en \u2022 e/xamin6</td><td/></tr><tr><td/><td>~+</td><td colspan=\"2\">/par \u2022 ~/ 1~</td><td/></tr><tr><td>chambre</td><td>+</td><td colspan=\"2\">/chambre</td><td/></tr><tr><td>des</td><td>de+</td><td colspan=\"2\">/co,~unes \u2022 d/e</td><td>\u2022 de/s</td></tr><tr><td>communes</td><td>+</td><td colspan=\"2\">/communes</td><td/></tr></table>",
1221
- "html": null,
1222
- "type_str": "table"
1223
- },
1224
- "TABREF1": {
1225
- "text": "",
1226
- "num": null,
1227
- "content": "<table/>",
1228
- "html": null,
1229
- "type_str": "table"
1230
- },
1231
- "TABREF2": {
1232
- "text": "shows the 10 most likely tokens and units in the active vocabulary for an example source sentence.that. is \u2022 what. the . prime, minister . said \u2022 and. i \u2022 have. outlined\u2022 what. has. happened . since\u2022 then.. c' -est. ce-que, le-premier -ministre, adit.,.et.j',",
1233
- "num": null,
1234
- "content": "<table><tr><td/><td colspan=\"3\">ai. r4sum4-ce. qui.s'-est-</td></tr><tr><td/><td>produit -depuis \u2022 .</td><td/><td/></tr><tr><td colspan=\"4\">g(s) that is what \u2022 the prime minister said \u2022 , and i</td></tr><tr><td/><td colspan=\"3\">\u2022 have . outlined \u2022 what has happened \u2022 since</td></tr><tr><td/><td>then \u2022 .</td><td/><td/></tr><tr><td>As</td><td>\u2022 \u2022 \u2022 est \u2022 ce \u2022 ministre</td><td>\u2022 que.</td><td>et \u2022 a \u2022</td></tr><tr><td>A~</td><td/><td/><td/></tr></table>",
1235
- "html": null,
1236
- "type_str": "table"
1237
- }
1238
- }
1239
- }
1240
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1020.json DELETED
@@ -1,1300 +0,0 @@
1
- {
2
- "paper_id": "A00-1020",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:11:59.447619Z"
6
- },
7
- "title": "Multilingual Coreference Resolution",
8
- "authors": [
9
- {
10
- "first": "Sanda",
11
- "middle": [
12
- "M"
13
- ],
14
- "last": "Harabagiu",
15
- "suffix": "",
16
- "affiliation": {
17
- "laboratory": "",
18
- "institution": "Southern Methodist University Dallas",
19
- "location": {
20
- "postCode": "75275-0122",
21
- "region": "TX"
22
- }
23
- },
24
- "email": ""
25
- },
26
- {
27
- "first": "Steven",
28
- "middle": [
29
- "J"
30
- ],
31
- "last": "Maiorano",
32
- "suffix": "",
33
- "affiliation": {
34
- "laboratory": "",
35
- "institution": "Southern Methodist University Dallas",
36
- "location": {
37
- "postCode": "75275-0122",
38
- "region": "TX"
39
- }
40
- },
41
- "email": ""
42
- },
43
- {
44
- "first": "Ipo",
45
- "middle": [],
46
- "last": "Washington",
47
- "suffix": "",
48
- "affiliation": {
49
- "laboratory": "",
50
- "institution": "Southern Methodist University Dallas",
51
- "location": {
52
- "postCode": "75275-0122",
53
- "region": "TX"
54
- }
55
- },
56
- "email": ""
57
- }
58
- ],
59
- "year": "",
60
- "venue": null,
61
- "identifiers": {},
62
- "abstract": "In this paper we present a new, multilingual data-driven method for coreference resolution as implemented in the SWIZZLE system. The results obtained after training this system on a bilingual corpus of English and Romanian tagged texts, outperformed coreference resolution in each of the individual languages.",
63
- "pdf_parse": {
64
- "paper_id": "A00-1020",
65
- "_pdf_hash": "",
66
- "abstract": [
67
- {
68
- "text": "In this paper we present a new, multilingual data-driven method for coreference resolution as implemented in the SWIZZLE system. The results obtained after training this system on a bilingual corpus of English and Romanian tagged texts, outperformed coreference resolution in each of the individual languages.",
69
- "cite_spans": [],
70
- "ref_spans": [],
71
- "eq_spans": [],
72
- "section": "Abstract",
73
- "sec_num": null
74
- }
75
- ],
76
- "body_text": [
77
- {
78
- "text": "The recent availability of large bilingual corpora has spawned interest in several areas of multilingual text processing. Most of the research has focused on bilingual terminology identification, either as parallel multiwords forms (e.g. the ChampoUion system (Smadja et a1.1996) ), technical terminology (e.g.",
79
- "cite_spans": [
80
- {
81
- "start": 260,
82
- "end": 279,
83
- "text": "(Smadja et a1.1996)",
84
- "ref_id": null
85
- }
86
- ],
87
- "ref_spans": [],
88
- "eq_spans": [],
89
- "section": "Introduction",
90
- "sec_num": "1"
91
- },
92
- {
93
- "text": "the Termight system (Dagan and Church, 1994) or broad-coverage translation lexicons (e.g. the SABLE system (Resnik and Melamed, 1997) ). In addition, the Multilingual Entity Task (MET) from the TIP-STER program 1 (http://www-nlpir.nist.gov/relatedprojeets/tipster/met.htm) challenged the participants in the Message Understanding Conference (MUC) to extract named entities across several foreign language corpora, such as Chinese, Japanese and Spanish.",
94
- "cite_spans": [
95
- {
96
- "start": 20,
97
- "end": 44,
98
- "text": "(Dagan and Church, 1994)",
99
- "ref_id": "BIBREF9"
100
- },
101
- {
102
- "start": 107,
103
- "end": 133,
104
- "text": "(Resnik and Melamed, 1997)",
105
- "ref_id": "BIBREF22"
106
- }
107
- ],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "Introduction",
111
- "sec_num": "1"
112
- },
113
- {
114
- "text": "In this paper we present a new application of aligned multilinguai texts. Since coreference resolution is a pervasive discourse phenomenon causing performance impediments in current IE systems, we considered a corpus of aligned English and Romanian texts to identify coreferring expressions. Our task focused on the same kind of coreference as considered in the past MUC competitions, namely 1The TIPSTER Text Program was a DARPA-Ied government effort to advance the state of the art in text processing technologies. the identity coreference. Identity coreference links nouns, pronouns and noun phrases (including proper names) to their corresponding antecedents.",
115
- "cite_spans": [],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "Introduction",
119
- "sec_num": "1"
120
- },
121
- {
122
- "text": "We created our bilingual collection by translating the MUC-6 and MUC-7 coreference training texts into Romanian using native speakers. The training data set for Romanian coreference used, wherever possible, the same coreference identifiers as the English data and incorporated additional tags as needed. Our claim is that by adding the wealth of coreferential features provided by multilingual data, new powerful heuristics for coreference resolution can be developed that outperform monolingual coreference resolution systems.",
123
- "cite_spans": [],
124
- "ref_spans": [],
125
- "eq_spans": [],
126
- "section": "Introduction",
127
- "sec_num": "1"
128
- },
129
- {
130
- "text": "For both languages, we resolved coreference by using SWIZZLE, our implementation of a bilingual coreference resolver. SWIZZLE is a multilingual enhancement of COCKTAIL (Harabagiu and Maiorano, 1999) , a coreference resolution system that operates on a mixture of heuristics that combine semantic and textual cohesive information 2. When COCKTAIL was applied separately on the English and the Romanian texts, coreferring links were identified for each English and Romanian document respectively. When aligned referential expressions corefer with non-aligned anaphors, SWIZZLE derived new heuristics for coreference. Our experiments show that SWIZZLE outperformed COCKTAIL on both English and Romanian test documents.",
131
- "cite_spans": [
132
- {
133
- "start": 168,
134
- "end": 198,
135
- "text": "(Harabagiu and Maiorano, 1999)",
136
- "ref_id": "BIBREF11"
137
- }
138
- ],
139
- "ref_spans": [],
140
- "eq_spans": [],
141
- "section": "Introduction",
142
- "sec_num": "1"
143
- },
144
- {
145
- "text": "The rest of the paper is organized as follows. Section 2 presents COCKTAIL, a monolingnai coreference resolution system used separately on both the English and Romanian texts. Section 3 details the data-driven approach used in SWIZZLE and presents some of its resources. Section 4 reports and discusses the experimental results. Section 5 summarizes the 2The name of COCKTAIL is a pun on CogNIAC because COCKTAIL combines a larger number of heuristics than those reported in (Baldwin, 1997) . SWIZZLE, moreover, adds new heuristics, discovered from the bilingual aligned corpus.",
146
- "cite_spans": [
147
- {
148
- "start": 475,
149
- "end": 490,
150
- "text": "(Baldwin, 1997)",
151
- "ref_id": "BIBREF1"
152
- }
153
- ],
154
- "ref_spans": [],
155
- "eq_spans": [],
156
- "section": "Introduction",
157
- "sec_num": "1"
158
- },
159
- {
160
- "text": "conclusions.",
161
- "cite_spans": [],
162
- "ref_spans": [],
163
- "eq_spans": [],
164
- "section": "Introduction",
165
- "sec_num": "1"
166
- },
167
- {
168
- "text": "Currently, some of the best-performing and most robust coreference resolution systems employ knowledge-based techniques. Traditionally, these techniques have combined extensive syntactic, semantic, and discourse knowledge. The acquisition of such knowledge is time-consuming, difficult, and error-prone. Nevertheless, recent results show that knowledge-poor methods perform with amazing accuracy (cf. (Mitkov, 1998) , (Kennedy and Boguraev, 1996) (Kameyama, 1997) ). For example, CogNIAC (Baldwin, 1997) , a system based on seven ordered heuristics, generates high-precision resolution (over 90%) for some cases of pronominal reference. For this research, we used a coreference resolution system ( (Harabagiu and Malorano, 1999) ) that implements different sets of heuristics corresponding to various forms of coreference. This system, called COCKTAIL, resolves coreference by exploiting several textual cohesion constraints (e.g. term repetition) combined with lexical and textual coherence cues (e.g. subjects of communication verbs are more likely to refer to the last person mentioned in the text). These constraints are implemented as a set of heuristics ordered by their priority. Moreover, the COCKTAIL framework uniformly addresses the problem of interaction between different forms of coreference, thus making the extension to multilingual coreference very natural.",
169
- "cite_spans": [
170
- {
171
- "start": 401,
172
- "end": 415,
173
- "text": "(Mitkov, 1998)",
174
- "ref_id": "BIBREF19"
175
- },
176
- {
177
- "start": 418,
178
- "end": 446,
179
- "text": "(Kennedy and Boguraev, 1996)",
180
- "ref_id": null
181
- },
182
- {
183
- "start": 447,
184
- "end": 463,
185
- "text": "(Kameyama, 1997)",
186
- "ref_id": "BIBREF16"
187
- },
188
- {
189
- "start": 488,
190
- "end": 503,
191
- "text": "(Baldwin, 1997)",
192
- "ref_id": "BIBREF1"
193
- },
194
- {
195
- "start": 698,
196
- "end": 728,
197
- "text": "(Harabagiu and Malorano, 1999)",
198
- "ref_id": null
199
- }
200
- ],
201
- "ref_spans": [],
202
- "eq_spans": [],
203
- "section": "C O C K T A I L",
204
- "sec_num": "2"
205
- },
206
- {
207
- "text": "In general, we define a data-driven methodology as a sequence of actions that captures the data patterns capable of resolving a problem with both a high degree of precision and recall. Our data-driven methodology reported here generated sets of heuristics for the coreference resolution problem. Precision is the number of correct references out of the total number of coreferences resolved, whereas the recall measures the number of resolved references out of the total number of keys, i.e., the annotated coreference data.",
208
- "cite_spans": [],
209
- "ref_spans": [],
210
- "eq_spans": [],
211
- "section": "D a t a -D r i v e n C o r e f e r e n c e R e s o l u t i o n",
212
- "sec_num": "2.1"
213
- },
214
- {
215
- "text": "The data-driven methodology used in COCKTAIL is centered around the notion of a coreference chain. Given a corpus annotated with coreference data, the data-driven methodology first generates all coreference chains in the data set and then considers all possible combinations of coreference relations that would generate the same coreference chains. For a coreference chain of length l with nodes nl, n2, ... nt+l, each node nk ( l < k~/ ) can be connected to any of the l -k nodes preceding it. From this observation, we find that a number of 1 x 2 x ... x (l -k)... x I = l! coreference structures can generate the same coreference chain. This result is very important, since it allows for the automatic generation of coreference data. For each coreference relation T~ from an annotated corpus we created a median of (l -1)! new coreference relations, where l is the length of the coreference chain containing relation 7~. This observation gave us the possibility of expanding the test data provided by the coreference keys available in the MUC-6 and MUC-7 competitions (MUC-6 1996) , (MUC-7 1998) . The MUC-6 coreference annotated corpus contains 1626 coreference relations, while the MUC-7 corpus has 2245 relations. The average length of a coreference chain is 7.21 for the MUC-6 data, and 8.57 for the MUC-7 data. We were able to expand the number of annotated coreference relations to 6,095,142 for the MUC-6 corpus and to 8,269,403 relations for the MUC-7 corpus; this represents an expansion factor of 3,710. We are not aware of any other automated way of creating coreference annotated data, and we believe that much of the COCKTAIL's impressive performance is due to the plethora of data provided by this method. ",
216
- "cite_spans": [
217
- {
218
- "start": 1071,
219
- "end": 1083,
220
- "text": "(MUC-6 1996)",
221
- "ref_id": null
222
- },
223
- {
224
- "start": 1086,
225
- "end": 1098,
226
- "text": "(MUC-7 1998)",
227
- "ref_id": null
228
- }
229
- ],
230
- "ref_spans": [],
231
- "eq_spans": [],
232
- "section": "D a t a -D r i v e n C o r e f e r e n c e R e s o l u t i o n",
233
- "sec_num": "2.1"
234
- },
235
- {
236
- "text": "The result of our data-driven methodology is the set of heuristics implemented in COCKTAIL which cover both nominal and pronoun coreference. Each heuristic represents a pattern of coreference that was mined from the large set of coreference data. COCKTAIL uses knowledge-poor methods because (a) it is based only on a limited number of heuristics and (b) text processing is limited to part-of-speech tagging, named-entity recognition, and approximate phrasal parsing. The heuristics from COCKTAIL can be classified along two directions. First of all, they can be grouped according to the type of coreference they resolve, e.g., heuristics that resolve the anaphors of reflexive pronouns operate differently than those resolving bare nominals. Currently, in COCKTAIL there are heuristics that resolve five types of pronouns (personal, possessive, reflexive, demonstrative and relative) and three forms of nominals (definite, bare and indefinite).",
237
- "cite_spans": [],
238
- "ref_spans": [],
239
- "eq_spans": [],
240
- "section": "Knowledge-Poor Coreference Resolution",
241
- "sec_num": "2.2"
242
- },
243
- {
244
- "text": "Secondly, for each type of coreference, there are three classes of heuristics categorized according to their suitability to resolve coreference. The first class is comprised of strong indicators of coreference. This class resulted from the analysis of the distribution of the antecedents in the MUC annotated data. For example, repetitions of named entities and appositives account for the majority of the nominal coreferences, and, therefore, represent anchors for the first class of heuristics.",
245
- "cite_spans": [],
246
- "ref_spans": [],
247
- "eq_spans": [],
248
- "section": "Knowledge-Poor Coreference Resolution",
249
- "sec_num": "2.2"
250
- },
251
- {
252
- "text": "The second class of coreference covers cases in which the arguments are recognized to be semantically consistent. COCKTAIL's test of semantic consistency blends together information available from WordNet and statistics gathered from Treebank. Different consistency checks are modeled for each of the heuristics.",
253
- "cite_spans": [],
254
- "ref_spans": [],
255
- "eq_spans": [],
256
- "section": "Knowledge-Poor Coreference Resolution",
257
- "sec_num": "2.2"
258
- },
259
- {
260
- "text": "Mr. Adams1, 69 years old, is the retired chairman of Canadian-based Emco Ltd., a maker of plumbing and petroleum equipment; he1 has served on the Woolworth board since 1981. Example of the application of heuristic H3Pron \"We have got to stop pointing our fingers at these kids2 who have no future,\" he said, \"and reach our hands out to them2.",
261
- "cite_spans": [],
262
- "ref_spans": [],
263
- "eq_spans": [],
264
- "section": "Example of the application of heuristic H2Pron",
265
- "sec_num": null
266
- },
267
- {
268
- "text": "The chairman and the chief executive officer3 of Woolworth Corp. have temporarily relinquished their posts while the retailer conducts its investigation into alleged accounting irregularities4.",
269
- "cite_spans": [],
270
- "ref_spans": [],
271
- "eq_spans": [],
272
- "section": "Example of the application of heuristic H2Nom",
273
- "sec_num": null
274
- },
275
- {
276
- "text": "Woolworth's board named John W. Adams, an outsider, to serve as interim chairman and executive officer3, while a special committee, appointed by the board last week and led by Mr. Adams, investigates the alleged irregularities4. The third class of heuristics resolves coreference by coercing nominals. Sometimes coercions involve only derivational morphology -linking verbs with their nominalizations. On other occasions, coercions are obtained as paths of meronyms (e.g. is-part relations) and hypernyms (e.g. is-a relations). Con-sistency checks implemented for this class of coreference are conservative: either the adjuncts must be identical or the adjunct of the referent must be less specific than the antecedent. Table 1 lists the top performing heuristics of COCKTAIL for pronominal and nominal coreference. Examples of the heuristics operation on the MUC data are presented presented in Table 2 . Details of the top performing heuristics of COCKTAIL were reported in (Harabagiu and Maiorano, 1999) .",
277
- "cite_spans": [
278
- {
279
- "start": 976,
280
- "end": 1006,
281
- "text": "(Harabagiu and Maiorano, 1999)",
282
- "ref_id": "BIBREF11"
283
- }
284
- ],
285
- "ref_spans": [
286
- {
287
- "start": 720,
288
- "end": 727,
289
- "text": "Table 1",
290
- "ref_id": "TABREF2"
291
- },
292
- {
293
- "start": 896,
294
- "end": 903,
295
- "text": "Table 2",
296
- "ref_id": "TABREF3"
297
- }
298
- ],
299
- "eq_spans": [],
300
- "section": "Example of the application of heuristic H2Nom",
301
- "sec_num": null
302
- },
303
- {
304
- "text": "One of the major drawbacks of existing coreference resolution systems is their inability to recognize many forms of coreference displayed by many real-world texts. Recall measures of current systems range between 36% and 59% for both knowledgebased and statistical techniques. Knowledge basedsystems would perform better if more coreference constraints were available whereas statistical methods would be improved if more annotated data were available. Since knowledge-based techniques outperform inductive methods, we used high-precision coreference heuristics as knowledge seeds for machine learning techniques that operate on large amounts of unlabeled data. One such technique is bootstrapping, which was recently presented in , (Jones et a1.1999) as an ideal framework for text learning tasks that have knowledge seeds. The method does not require large training sets. We extended COCKTAIL by using metabootstrapping of both new heuristics and clusters of nouns that display semantic consistency for coreference. The coreference heuristics are the seeds of our bootstrapping framework for coreference resolution. When applied to large collections of texts, the heuristics determine classes of coreferring expressions. By generating coreference chains out of all these coreferring expressions, often new heuristics are uncovered. For example, Figure 2 illustrates the application of three heuristics and the generation of data for a new heuristic rule. In COCKTAIL, after a heuristic is applied, a new coreference chain is calculated. For the example illustrated in Figure 2 , if the reference of expression A is sought, heuristic H1 indicates expression B to be the antecedent. When the coreference chain is built, expression A is directly linked to expression D, thus uncovering a new heuristic H0.",
305
- "cite_spans": [
306
- {
307
- "start": 733,
308
- "end": 751,
309
- "text": "(Jones et a1.1999)",
310
- "ref_id": null
311
- }
312
- ],
313
- "ref_spans": [
314
- {
315
- "start": 1347,
316
- "end": 1355,
317
- "text": "Figure 2",
318
- "ref_id": "FIGREF1"
319
- },
320
- {
321
- "start": 1570,
322
- "end": 1578,
323
- "text": "Figure 2",
324
- "ref_id": "FIGREF1"
325
- }
326
- ],
327
- "eq_spans": [],
328
- "section": "Bootstrapping for Coreferenee Resolution",
329
- "sec_num": "2.3"
330
- },
331
- {
332
- "text": "As a rule of thumb, we do not consider a new heuristic unless there is massive evidence of its coverage in the data. To measure the coverage we use the FOIL_Gain measure, as introduced by the FOIL inductive algorithm (Cameron- Jones and Quinlan 1993) . Let Ho be the new heuristic and/-/1 a heuristic that is already in the seed set. Let P0 be the number of positive coreference examples of Hn~w (i.e. the number of coreference relations produced by the heuristic that can be found in the test data) and no the number of negative examples of/-/new (i.e. the number of relations generated by the heuristic which cannot be found in the test data). Similarly, Pl and nl are the positive and negative examples of Ha. The new heuristics are scored by their FOIL_Gain distance to the existing set of heuristics, and the best scoring one is added to the COCKTAIL system. The FOIL_Gain formula is:",
333
- "cite_spans": [
334
- {
335
- "start": 227,
336
- "end": 250,
337
- "text": "Jones and Quinlan 1993)",
338
- "ref_id": "BIBREF5"
339
- }
340
- ],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "Bootstrapping for Coreferenee Resolution",
344
- "sec_num": "2.3"
345
- },
346
- {
347
- "text": "log2---~ ) FOIL_Gain(H1, Ho) = k(log2 Pl nl Po -k no",
348
- "cite_spans": [],
349
- "ref_spans": [],
350
- "eq_spans": [],
351
- "section": "Bootstrapping for Coreferenee Resolution",
352
- "sec_num": "2.3"
353
- },
354
- {
355
- "text": "where k is the number of positive examples covered by both//1 and Ho. Heuristic Ho is added to the seed set if there is no other heuristic providing larger FOIL_Gain to any of the seed heuristics. Since in COCKTAIL, semantic consistency of coreferring expressions is checked by comparing the similarity of noun classes, each new heuristic determines the adjustment of the similarity threshold of all known coreferring noun classes. The steps of the bootstrapping algorithm that learns both new heuristics and adjusts the similarity threshold of coreferential expressions is: note that the bootstrapping algorithm works well but its performance can deteriorate rapidly when non-coreferring data enter as candidate heuristics. To make the algorithm more robust, a second level of bootstrapping can be introduced. The outer bootstrapping mechanism, called recta-bootstrapping compiles the results of the inner (mutual) bootstrapping process and identifies the k most reliable heuristics, where k is a number determined experimentally. These k heuristics are retained and the rest of them are discarded.",
356
- "cite_spans": [],
357
- "ref_spans": [],
358
- "eq_spans": [],
359
- "section": "Bootstrapping for Coreferenee Resolution",
360
- "sec_num": "2.3"
361
- },
362
- {
363
- "text": "MUTUAL",
364
- "cite_spans": [],
365
- "ref_spans": [],
366
- "eq_spans": [],
367
- "section": "H3.j...~IB B [~ HO -New Heuristic",
368
- "sec_num": null
369
- },
370
- {
371
- "text": "To study the performance of a data-driven multilingual coreference resolution system, we prepared a corpus of Romanian texts by translating the MUC-6 and MUC-7 coreference training texts. The translations were performed by a group of four Romanian native speakers, and were checked for style by a certified translator from Romania. In addition, the Romanian texts were annotated with coreference keys. Two rules were followed when the annotations were done: o 1: Whenever an expression ER represents a translation of an expression EE from the corresponding English text, if Es is tagged as a coreference key with identification number ID, then the Romanian expression ER is also tagged with the same ID number. This rule allows for translations in which the textual position of the referent and the antecedent have been swapped. ",
372
- "cite_spans": [],
373
- "ref_spans": [],
374
- "eq_spans": [],
375
- "section": "MultiHngual Coreference Data",
376
- "sec_num": null
377
- },
378
- {
379
- "text": "The multilingual coreference resolution method implemented in SWIZZLE incorporates the heuristics derived from COKCTAIL's monolingual coreference resolution processing in both languages. To this end, COCKTAIL required both sets of texts to be tagged for part-of-speech and to recognize the noun phrases. The English texts were parsed with Brill's part-ofspeech tagger (Brill 1992 ) and the noun phrases were identified by the grammar rules implemented in the phrasal parser of FASTUS (Appelt et al., 1993) . Corresponding resources are not available in Romanian.",
380
- "cite_spans": [
381
- {
382
- "start": 368,
383
- "end": 379,
384
- "text": "(Brill 1992",
385
- "ref_id": "BIBREF4"
386
- },
387
- {
388
- "start": 484,
389
- "end": 505,
390
- "text": "(Appelt et al., 1993)",
391
- "ref_id": "BIBREF0"
392
- }
393
- ],
394
- "ref_spans": [],
395
- "eq_spans": [],
396
- "section": "Lexical Resources",
397
- "sec_num": "3.2"
398
- },
399
- {
400
- "text": "To minimize COCKTAIL's configuration for processing Romanian texts, we implemented a Romanian part-of-speech rule-based tagger that used the same Economic adviser Gene Sperling described <COREF ID=\"29\" TYPE='IDENT\" REF-\"30\"> it</COREF> as \"a true full-court press\" to pass <COREF ID=\"31\" TYPE=\"IDENT\" REF=\"26\" MIN='bilr' >the <COREF ID=\"32\" TYPE=\"IDENT\" REF-----\"10\" MIN=\"reduction\"> <COREF ID=\"33\" TYPE=\"IDENT\" REF=\"12\"> deficit</COREF>-reduction</COREF> bill, the final version of which is now being hammered out by <COREF ID=\" 43\" >House </COREF> and <COREF ID=\"41\" >Senate </COREF>negotiators</COREF>. <COREF ID=\" 34\" TYPE=\" IDENT\" REF-\" 2\" > The executives</COREF>' backing -however tepid -gives the administration a way to counter <COREF ID=\"35\" TYPE=\"IDENT\" REF=\"36\"> business</COREF:> critics of <COREF ID=\"500\" TYPE=\"IDENT\" REF=\"31\" MIN=\"package\" STATUS=\" OPT\" >the overall package </COREF>,... Consilierul cu probleme economice Gene Sperling a descris-<COREF ID=\" 29\" TYPE=\"IDENT\" REF=\"30\" >o</COREF> ca pe un efort de avengur~ menit s~ promoveze <COREF ID=\" 1125\" TYPE=\"IDENT\" REF=\"26\" MIN=\"legea\">legea </COREF> pentru <COREF TYPE=\"IDENT\" REF=\" 10\" MIN=\"reducerea\" > reducerea </COREF> <COREF ID=\" 33\" TYPE=\" IDENT\" REF=\" 12\"> deficitului in bugetul SUA</COREF>. Versiunea finals a acestei <COREF ID=\"1126\" TYPE=\"IDENT\" REF=\"l125\" MIN=\"legi\">legi </COI~EF> este desfiin~at~ chiax in aceste zile in cadrul dezbaterilor ce au loc in <COREF ID=\"43\" >Camera Reprezentatlvilor </CORJ~F> \u00a7i in <COREF ID=\"41\"> Senat</COREF></COREF>. Sprijinirea <COREF ID=\"127\" TYPE=\"IDENT\" REF=\" 1126\" MIN=\"legii\" >legii>/COREF> de c~tre speciali~ti in economic -de \u00a7i in manier~ moderat~ -ofer~ administra~iei o modalitate de a contrabalansa criticile aduse <COREF ID=\"500\" TYPE=\"IDENT\" REF=\"31\" MIN=\" legii\" STATUS=\" OPT\" >legii</COREF> de c~tre companiile americane,... Table 3 : Example of parallel English and Romanian text annotated for coreference. The elements from a coreference chain in the respective texts are underlined. The English text has only two elements in the coreference chain, whereas the Romanian text contains four different elements. The two additional elements of the Romanian coreference chain are derived due to (1) the need to translate the relative clause from the English fragment into a separate sentence in Romanian; and (2) the reordering of words in the second sentence. tags as generated by the Brill tagger. In addition, we implemented rules that identify noun phrases in Romanian.",
401
- "cite_spans": [],
402
- "ref_spans": [
403
- {
404
- "start": 1863,
405
- "end": 1870,
406
- "text": "Table 3",
407
- "ref_id": null
408
- }
409
- ],
410
- "eq_spans": [],
411
- "section": "Lexical Resources",
412
- "sec_num": "3.2"
413
- },
414
- {
415
- "text": "To take advantage of the aligned corpus, SWIZZLE also relied on bilingual lexical resources that help translate the referential expressions.",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "Lexical Resources",
420
- "sec_num": "3.2"
421
- },
422
- {
423
- "text": "For this purpose, we used a core Romanian WordNet (Harabagiu, 1999) which encoded, wherever possible, links between the English synsets and their Romanian counterparts. This resource also incorporated knowledge derived from several bilingual dictionaries (e.g. (Bantam, 1969) ).",
424
- "cite_spans": [
425
- {
426
- "start": 50,
427
- "end": 67,
428
- "text": "(Harabagiu, 1999)",
429
- "ref_id": "BIBREF10"
430
- },
431
- {
432
- "start": 261,
433
- "end": 275,
434
- "text": "(Bantam, 1969)",
435
- "ref_id": null
436
- }
437
- ],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": "Lexical Resources",
441
- "sec_num": "3.2"
442
- },
443
- {
444
- "text": "Having the parallel coreference annotations, we can easily identify their translations because they have the same identification coreference key. Looking at the example given in Table 3 , the expression \"legii', with ID=500 is the translation of the expression \"package\", having the same ID in the English text. However, in the test set, the REF fields are intentionally voided, entrusting COCKTAIL to identify the antecedents. The bilingual coreference resolution performed in SWIZZLE, however, requires the translations of the English and Romanian antecedents. The principles guiding the translations of the English and Romanian antecedents (A E-R and A R-E, respectively) are:",
445
- "cite_spans": [],
446
- "ref_spans": [
447
- {
448
- "start": 178,
449
- "end": 185,
450
- "text": "Table 3",
451
- "ref_id": null
452
- }
453
- ],
454
- "eq_spans": [],
455
- "section": "Lexical Resources",
456
- "sec_num": "3.2"
457
- },
458
- {
459
- "text": "\u2022 Circularity: Given an English antecedent, due to semantic ambiguity, it can belong to several English WordNet sysnsets. For each such sysnset S/~ we consider the Romanian corresponding sysnet(s) Sff. We filter out all Sff that do not contain A E-R. If only one Romanian sysnset is left, then we identified a translation. Otherwise, we start from the Romanian antecedent, find all synsets S R to which it belongs, and obtain the corresponding English sysnets S F. Similarly, all English synsets not containing the English antecedent are filtered out. If only one synset remains, we have again identified a translation. Finally, in the last case, the intersection of the multiple synsets in either language generates a legal translation. For example, the English synset S E ={bill, measure} translates into the Romanian synset S R ={lege}. First, none of the dictionary translations of bill into Romanian (e.g. politE, bac-notE, afi~) translate back into any of the elements of S E. However the translation of measure into the Romanian lege translates back into bill, its synonym. \u2022 Semantic density: Given an English and a Romanian antecedent, to establish whether they are translations of one another, we disambiguate them by first collapsing all sysnsets that have common elements. Then we apply the circularity principle, relying on the semantic alignment encoded in the Romanian WordNet. When this core lexical database was first implemented, several other principles were applied. In our experiment, we were satisfied with the quality of the translations recognized by following only these two principles.",
460
- "cite_spans": [],
461
- "ref_spans": [],
462
- "eq_spans": [],
463
- "section": "Lexical Resources",
464
- "sec_num": "3.2"
465
- },
466
- {
467
- "text": "The SWIZZLE system was run on a corpus of 2335 referential expressions in English (927 from MUC-6 and 1408 from MUC-7) and 2851 Romanian expressions (1219 from MUC-6 and 1632 from MUC-7). Initially, the heuristics implemented in COCKTAIL were applied separately to the two textual collections. Several special cases arose. ",
468
- "cite_spans": [],
469
- "ref_spans": [],
470
- "eq_spans": [],
471
- "section": "Multilingual Coreference Resolution",
472
- "sec_num": "3.3"
473
- },
474
- {
475
- "text": "\"~eference Case 1, which is the ideal case, is shown in Figure 3. It occurs when two referential expressions have antecedents that are translations of one another. This situation occurred in 63.3% of the referential expressions from MUC-6 and in 58.7% of the MUC-7 references. Over 50% of these are pronouns or named entities. However, all the non-ideal cases are more interesting for SWIZZLE, since they port knowledge that enhances system performance. Case 2 occurs when the antecedents are not translations, but belong to or corefer with elements of some coreference chains that were already established. Moreover, one of the antecedents is textually closer to its referent. Figure 4 illustrates the case when the English antecedent is closer to the referent than the Romanian one.",
476
- "cite_spans": [],
477
- "ref_spans": [
478
- {
479
- "start": 56,
480
- "end": 62,
481
- "text": "Figure",
482
- "ref_id": null
483
- },
484
- {
485
- "start": 678,
486
- "end": 686,
487
- "text": "Figure 4",
488
- "ref_id": "FIGREF3"
489
- }
490
- ],
491
- "eq_spans": [],
492
- "section": "Romani an Text",
493
- "sec_num": null
494
- },
495
- {
496
- "text": "(1) If the heuristic H(E) used to resolve the reference in the English text has higher priority than H(R), which was used to resolve the reference from the Romanian text, then we first search for RT, the Romanian translation of EA, the English antecedent. In the next step, we add heuristic H1 that resolves RR into RT, and give it a higher priority than H(R). Finally, we also add heuristic H2 that links RTto RA when there is at least one translation between the elements of the coreference chains containing EA and ET respectively.",
497
- "cite_spans": [],
498
- "ref_spans": [],
499
- "eq_spans": [],
500
- "section": "SWIZZLE Solutions:",
501
- "sec_num": null
502
- },
503
- {
504
- "text": "(2) If H(R) has higher priority than H(E), heuristic H3 is added while H(E) is removed. We also add //4 that relates ER to ET, the English translation of RA. Case 3 occurs when at least one of the antecedents starts a new coreference chain (i.e., no coreferring antecedent can be found in the current chains).",
505
- "cite_spans": [],
506
- "ref_spans": [],
507
- "eq_spans": [],
508
- "section": "SWIZZLE Solutions:",
509
- "sec_num": null
510
- },
511
- {
512
- "text": "If one of the antecedents corefers with an element from a coreference chain, then the antecedent in the opposite language is its translation. Otherwise, SNIZZLE chooses the antecedent returned by the heuristic with highest priority.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "SWIZZLE Solution:",
517
- "sec_num": null
518
- },
519
- {
520
- "text": "The foremost contribution of SWIZZLE was that it improved coreference resolution over both English and Romanian texts when compared to monolingual coreference resolution performance in terms of precision and recall. Also relevant was the contribution of SNIZZLE to the process of understanding the cultural differences expressed in language and the way these differences influence coreference resolution. Because we do not have sufficient space to discuss this issue in detail here, let us state, in short, that English is more economical than Romanian in terms of referential expressions. However the referential expressions in Romanian contribute to the resolution of some of the most difficult forms of coreference in English. Table 4 summarizes the precision results for both English and Romanian coreference. The results indicate that the English coreference is more precise than the Romanian coreference, but SNIZZLE improves coreference resolution in both languages. There were 64% cases when the English coreference was resolved by a heuristic with higher priority than the corresponding heuristic for the Romanian coun-terpart. This result explains why there is better precision enhancement for the English coreference. Table 5 also illustrates the recall results. The advantage of the data-driven coreference resolution over other methods is based on its better recall performance. This is explained by the fact that this method captures a larger variety of coreference patterns. Even though other coreference resolution systems perform better for some specific forms of reference, their recall results are surpassed by the datadriven approach. Multilingual coreference in turn improves more the precision than the recall of the monolingual data-driven coreference systems.",
521
- "cite_spans": [],
522
- "ref_spans": [
523
- {
524
- "start": 730,
525
- "end": 737,
526
- "text": "Table 4",
527
- "ref_id": "TABREF9"
528
- },
529
- {
530
- "start": 1229,
531
- "end": 1236,
532
- "text": "Table 5",
533
- "ref_id": "TABREF10"
534
- }
535
- ],
536
- "eq_spans": [],
537
- "section": "Results",
538
- "sec_num": "4"
539
- },
540
- {
541
- "text": "In addition, Table 5 shows that the English coreference results in better recall than Romanian coreference. However, the recall shows a decrease for both languages for SNIZZLE because imprecise coreference links are deleted. As is usually the case, deleting data lowers the recall. All results were obtained by using the automatic scorer program developed for the MUC evaluations.",
542
- "cite_spans": [],
543
- "ref_spans": [
544
- {
545
- "start": 13,
546
- "end": 20,
547
- "text": "Table 5",
548
- "ref_id": "TABREF10"
549
- }
550
- ],
551
- "eq_spans": [],
552
- "section": "English",
553
- "sec_num": null
554
- },
555
- {
556
- "text": "We have introduced a new data-driven method for multilingual coreference resolution, implemented in the SWIZZLE system. The results of this method are encouraging since they show clear improvements over monolingual coreference resolution. Currently, we are also considering the effects of a bootstrapping algorithm for multilingual coreference resolution. Through this procedure we would learn concurrently semantic consistency knowledge and better performing heuristic rules. To be able to develop such a learning approach, we must first develop a method for automatic recognition of multilingual referential expressions.",
557
- "cite_spans": [],
558
- "ref_spans": [],
559
- "eq_spans": [],
560
- "section": "Conclusions",
561
- "sec_num": "5"
562
- },
563
- {
564
- "text": "We also believe that a better performance evaluation of SidIZZLE can be achieved by measuring its impact on several complex applications. We intend to analyze the performance of SIdIZZLE when it is used as a module in an IE system, and separately in a Question/Answering system.",
565
- "cite_spans": [],
566
- "ref_spans": [],
567
- "eq_spans": [],
568
- "section": "Conclusions",
569
- "sec_num": "5"
570
- }
571
- ],
572
- "back_matter": [
573
- {
574
- "text": "Acknowledgements This paper is dedicated to the memory of our friend Megumi Kameyama, who inspired this work.",
575
- "cite_spans": [],
576
- "ref_spans": [],
577
- "eq_spans": [],
578
- "section": "acknowledgement",
579
- "sec_num": null
580
- }
581
- ],
582
- "bib_entries": {
583
- "BIBREF0": {
584
- "ref_id": "b0",
585
- "title": "The SRI MUC-5 JV-FASTUS Information Extraction System",
586
- "authors": [
587
- {
588
- "first": "Douglas",
589
- "middle": [
590
- "E"
591
- ],
592
- "last": "Appelt",
593
- "suffix": ""
594
- },
595
- {
596
- "first": "Jerry",
597
- "middle": [
598
- "R"
599
- ],
600
- "last": "Hobbs",
601
- "suffix": ""
602
- },
603
- {
604
- "first": "John",
605
- "middle": [],
606
- "last": "Bear",
607
- "suffix": ""
608
- },
609
- {
610
- "first": "David",
611
- "middle": [],
612
- "last": "Israel",
613
- "suffix": ""
614
- },
615
- {
616
- "first": "Megumi",
617
- "middle": [],
618
- "last": "Kameyama",
619
- "suffix": ""
620
- },
621
- {
622
- "first": "Mabry",
623
- "middle": [],
624
- "last": "Tyson",
625
- "suffix": ""
626
- }
627
- ],
628
- "year": 1993,
629
- "venue": "Proceedings of the Fifth Message Understanding Conference (MUC-5)",
630
- "volume": "",
631
- "issue": "",
632
- "pages": "",
633
- "other_ids": {},
634
- "num": null,
635
- "urls": [],
636
- "raw_text": "Douglas E. Appelt, Jerry R. Hobbs, John Bear, David Israel, Megumi Kameyama and Mabry Tyson. 1993. The SRI MUC-5 JV-FASTUS Information Extraction System. In Proceedings of the Fifth Message Under- standing Conference (MUC-5).",
637
- "links": null
638
- },
639
- "BIBREF1": {
640
- "ref_id": "b1",
641
- "title": "CogNIAC: high precision coreference with limited knowledge and linguistic resources",
642
- "authors": [
643
- {
644
- "first": "Brack",
645
- "middle": [],
646
- "last": "Baldwin",
647
- "suffix": ""
648
- }
649
- ],
650
- "year": 1997,
651
- "venue": "Proceedings of the ACL '97/EACL '97 Workshop on Operational factors in practical, robust anaphora resolution",
652
- "volume": "",
653
- "issue": "",
654
- "pages": "38--45",
655
- "other_ids": {},
656
- "num": null,
657
- "urls": [],
658
- "raw_text": "Brack Baldwin. 1997. CogNIAC: high precision corefer- ence with limited knowledge and linguistic resources. In Proceedings of the ACL '97/EACL '97 Workshop on Operational factors in practical, robust anaphora res- olution, pages 38-45, Madrid, Spain.",
659
- "links": null
660
- },
661
- "BIBREF3": {
662
- "ref_id": "b3",
663
- "title": "Corpus-Based Identification of Non-Anaphoric Noun Phrases",
664
- "authors": [
665
- {
666
- "first": "David",
667
- "middle": [],
668
- "last": "Bean",
669
- "suffix": ""
670
- },
671
- {
672
- "first": "Ellen",
673
- "middle": [],
674
- "last": "Riloff",
675
- "suffix": ""
676
- }
677
- ],
678
- "year": 1999,
679
- "venue": "Proceedings of the 37th Conference of the Assosiation for Computatioanl Linguistics (A CL-99)",
680
- "volume": "",
681
- "issue": "",
682
- "pages": "373--380",
683
- "other_ids": {},
684
- "num": null,
685
- "urls": [],
686
- "raw_text": "David Bean and Ellen Riloff. 1999. Corpus-Based Iden- tification of Non-Anaphoric Noun Phrases. In Pro- ceedings of the 37th Conference of the Assosiation for Computatioanl Linguistics (A CL-99), pages 373-380.",
687
- "links": null
688
- },
689
- "BIBREF4": {
690
- "ref_id": "b4",
691
- "title": "A simple rule-based part of speech tagger",
692
- "authors": [
693
- {
694
- "first": "Eric",
695
- "middle": [],
696
- "last": "Brill",
697
- "suffix": ""
698
- }
699
- ],
700
- "year": 1992,
701
- "venue": "Proceedings of the Third Conference on Applied Natural Language Processing",
702
- "volume": "",
703
- "issue": "",
704
- "pages": "152--155",
705
- "other_ids": {},
706
- "num": null,
707
- "urls": [],
708
- "raw_text": "Eric Brill. A simple rule-based part of speech tagger. In Proceedings of the Third Conference on Applied Nat- ural Language Processing, pages 152-155, 1992.",
709
- "links": null
710
- },
711
- "BIBREF5": {
712
- "ref_id": "b5",
713
- "title": "Avoiding Pitfalls When Learning Recursive Theories",
714
- "authors": [
715
- {
716
- "first": "F",
717
- "middle": [],
718
- "last": "Joseph",
719
- "suffix": ""
720
- },
721
- {
722
- "first": "Ross",
723
- "middle": [],
724
- "last": "Cameron-Jones",
725
- "suffix": ""
726
- },
727
- {
728
- "first": "",
729
- "middle": [],
730
- "last": "Quinlan",
731
- "suffix": ""
732
- }
733
- ],
734
- "year": 1993,
735
- "venue": "Proceedings of the 13th International Joint Conference on Artificial Intelligence (IJCAI-93)",
736
- "volume": "",
737
- "issue": "",
738
- "pages": "1050--1055",
739
- "other_ids": {},
740
- "num": null,
741
- "urls": [],
742
- "raw_text": "Joseph F. Cameron-Jones and Ross Quinlan. 1993. Avoiding Pitfalls When Learning Recursive Theories. In Proceedings of the 13th International Joint Confer- ence on Artificial Intelligence (IJCAI-93), pages 1050- 1055.",
743
- "links": null
744
- },
745
- "BIBREF6": {
746
- "ref_id": "b6",
747
- "title": "Noun phrase coreference as clustering",
748
- "authors": [
749
- {
750
- "first": "Claire",
751
- "middle": [],
752
- "last": "Cardie",
753
- "suffix": ""
754
- },
755
- {
756
- "first": "Kiri",
757
- "middle": [],
758
- "last": "Wagstaff",
759
- "suffix": ""
760
- }
761
- ],
762
- "year": 1999,
763
- "venue": "Proceedings of the Joint Conference on Empirical Methods in NLP and Very Large Corpora",
764
- "volume": "",
765
- "issue": "",
766
- "pages": "82--89",
767
- "other_ids": {},
768
- "num": null,
769
- "urls": [],
770
- "raw_text": "Claire Cardie and Kiri Wagstaff. 1999. Noun phrase coreference as clustering. In Proceedings of the Joint Conference on Empirical Methods in NLP and Very Large Corpora, pages 82-89.",
771
- "links": null
772
- },
773
- "BIBREF8": {
774
- "ref_id": "b8",
775
- "title": "Anaphora Resolution: A Multi-Strategy Approach",
776
- "authors": [],
777
- "year": null,
778
- "venue": "Proceedings of the 6th Workshop on Very Large Corpora, (COLING/A CL '98)",
779
- "volume": "",
780
- "issue": "",
781
- "pages": "",
782
- "other_ids": {},
783
- "num": null,
784
- "urls": [],
785
- "raw_text": "Anaphora Resolution: A Multi-Strategy Approach. In Proceedings of the 6th Workshop on Very Large Cor- pora, (COLING/A CL '98).",
786
- "links": null
787
- },
788
- "BIBREF9": {
789
- "ref_id": "b9",
790
- "title": "TERMIGHT: Identifying and translating technical terminology",
791
- "authors": [
792
- {
793
- "first": "Ido",
794
- "middle": [],
795
- "last": "Dagan",
796
- "suffix": ""
797
- },
798
- {
799
- "first": "Ken",
800
- "middle": [
801
- "W"
802
- ],
803
- "last": "Church",
804
- "suffix": ""
805
- }
806
- ],
807
- "year": 1994,
808
- "venue": "Proceedings of the ~th ACL Conference on Applied Natural Language Processing",
809
- "volume": "",
810
- "issue": "",
811
- "pages": "",
812
- "other_ids": {},
813
- "num": null,
814
- "urls": [],
815
- "raw_text": "Ido Dagan and Ken W. Church. 1994. TERMIGHT: Identifying and translating technical terminology. In Proceedings of the ~th ACL Conference on Applied Natural Language Processing (ANLP-94).",
816
- "links": null
817
- },
818
- "BIBREF10": {
819
- "ref_id": "b10",
820
- "title": "Lexical Acquisition for a Romanian WordNet",
821
- "authors": [
822
- {
823
- "first": "Sanda",
824
- "middle": [
825
- "M"
826
- ],
827
- "last": "Harabagiu",
828
- "suffix": ""
829
- }
830
- ],
831
- "year": 1999,
832
- "venue": "Proceeding of the 3rd European Summer School on Computational Linguistics",
833
- "volume": "",
834
- "issue": "",
835
- "pages": "",
836
- "other_ids": {},
837
- "num": null,
838
- "urls": [],
839
- "raw_text": "Sanda M. Harabagiu. 1999. Lexical Acquisition for a Romanian WordNet. Proceeding of the 3rd European Summer School on Computational Linguistics.",
840
- "links": null
841
- },
842
- "BIBREF11": {
843
- "ref_id": "b11",
844
- "title": "Knowledge-Lean Coreference Resolution and its Relation to Textual Cohesion and Coherence",
845
- "authors": [
846
- {
847
- "first": "M",
848
- "middle": [],
849
- "last": "Sanda",
850
- "suffix": ""
851
- },
852
- {
853
- "first": "Steve",
854
- "middle": [
855
- "J"
856
- ],
857
- "last": "Harabagiu",
858
- "suffix": ""
859
- },
860
- {
861
- "first": "",
862
- "middle": [],
863
- "last": "Maiorano",
864
- "suffix": ""
865
- }
866
- ],
867
- "year": 1999,
868
- "venue": "Proceedings of the Workshop on the Relation of Discourse/Dialogue Structure and Reference, ACL'98",
869
- "volume": "",
870
- "issue": "",
871
- "pages": "29--38",
872
- "other_ids": {},
873
- "num": null,
874
- "urls": [],
875
- "raw_text": "Sanda M. Harabagiu and Steve J. Maiorano. 1999. Knowledge-Lean Coreference Resolution and its Re- lation to Textual Cohesion and Coherence. In Pro- ceedings of the Workshop on the Relation of Dis- course/Dialogue Structure and Reference, ACL'98, pages 29-38.",
876
- "links": null
877
- },
878
- "BIBREF12": {
879
- "ref_id": "b12",
880
- "title": "Resolving pronoun references",
881
- "authors": [
882
- {
883
- "first": "Jerry",
884
- "middle": [
885
- "R"
886
- ],
887
- "last": "Hobbs",
888
- "suffix": ""
889
- }
890
- ],
891
- "year": null,
892
- "venue": "Lingua",
893
- "volume": "44",
894
- "issue": "",
895
- "pages": "311--338",
896
- "other_ids": {},
897
- "num": null,
898
- "urls": [],
899
- "raw_text": "Jerry R. Hobbs. Resolving pronoun references. Lingua, 44:311-338.",
900
- "links": null
901
- },
902
- "BIBREF13": {
903
- "ref_id": "b13",
904
- "title": "Probabilistic Coreference in Information Extraction",
905
- "authors": [
906
- {
907
- "first": "Andrew",
908
- "middle": [],
909
- "last": "Kehler",
910
- "suffix": ""
911
- }
912
- ],
913
- "year": 1997,
914
- "venue": "Proceedings of the Second Conference on Empirical Methods in Natural Language Processing (SIGDAT)",
915
- "volume": "",
916
- "issue": "",
917
- "pages": "163--173",
918
- "other_ids": {},
919
- "num": null,
920
- "urls": [],
921
- "raw_text": "Andrew Kehler. 1997. Probabilistic Coreference in In- formation Extraction. In Proceedings of the Second Conference on Empirical Methods in Natural Lan- guage Processing (SIGDAT), pages 163-173.",
922
- "links": null
923
- },
924
- "BIBREF14": {
925
- "ref_id": "b14",
926
- "title": "An algorithm for pronominal anaphora resolution",
927
- "authors": [
928
- {
929
- "first": "Shalom",
930
- "middle": [],
931
- "last": "Lappin",
932
- "suffix": ""
933
- },
934
- {
935
- "first": "Herbert",
936
- "middle": [],
937
- "last": "Leass",
938
- "suffix": ""
939
- }
940
- ],
941
- "year": 1994,
942
- "venue": "Computational Linguistics",
943
- "volume": "20",
944
- "issue": "4",
945
- "pages": "535--562",
946
- "other_ids": {},
947
- "num": null,
948
- "urls": [],
949
- "raw_text": "Shalom Lappin and Herbert Leass. 1994. An algorithm for pronominal anaphora resolution. Computational Linguistics, 20(4):535-562.",
950
- "links": null
951
- },
952
- "BIBREF15": {
953
- "ref_id": "b15",
954
- "title": "Bootstrapping for Text Learning Tasks",
955
- "authors": [
956
- {
957
- "first": "Rosie",
958
- "middle": [],
959
- "last": "Jones",
960
- "suffix": ""
961
- },
962
- {
963
- "first": "Andrew",
964
- "middle": [],
965
- "last": "Mccallum",
966
- "suffix": ""
967
- },
968
- {
969
- "first": "Kevin",
970
- "middle": [],
971
- "last": "Nigam",
972
- "suffix": ""
973
- },
974
- {
975
- "first": "Ellen",
976
- "middle": [],
977
- "last": "Riloff",
978
- "suffix": ""
979
- }
980
- ],
981
- "year": 1999,
982
- "venue": "Proceedings of the IJCAI-99 Workshop on Text Mining: Foundations, Techniques, and Applications",
983
- "volume": "",
984
- "issue": "",
985
- "pages": "",
986
- "other_ids": {},
987
- "num": null,
988
- "urls": [],
989
- "raw_text": "Rosie Jones, Andrew McCallum, Kevin Nigam and Ellen Riloff. 1999. Bootstrapping for Text Learning Tasks. In Proceedings of the IJCAI-99 Workshop on Text Mining: Foundations, Techniques, and Applications.",
990
- "links": null
991
- },
992
- "BIBREF16": {
993
- "ref_id": "b16",
994
- "title": "Recognizing Referential Links: An Information Extraction Perspective",
995
- "authors": [
996
- {
997
- "first": "Megumi",
998
- "middle": [],
999
- "last": "Kameyama",
1000
- "suffix": ""
1001
- }
1002
- ],
1003
- "year": 1997,
1004
- "venue": "Proceedings of the Workshop on Operational Factors in Practical, Robust Anaphora Resolution for Unrestricted Texts, (ACL-97/EACL-97)",
1005
- "volume": "",
1006
- "issue": "",
1007
- "pages": "46--53",
1008
- "other_ids": {},
1009
- "num": null,
1010
- "urls": [],
1011
- "raw_text": "Megumi Kameyama. 1997. Recognizing Referential Links: An Information Extraction Perspective. In Proceedings of the Workshop on Operational Factors in Practical, Robust Anaphora Resolution for Un- restricted Texts, (ACL-97/EACL-97), pages 46-53, Madrid, Spain.",
1012
- "links": null
1013
- },
1014
- "BIBREF17": {
1015
- "ref_id": "b17",
1016
- "title": "Anaphora for everyone: Pronominal anaphora resolution without a parser",
1017
- "authors": [
1018
- {
1019
- "first": "Christopher",
1020
- "middle": [],
1021
- "last": "Kennedy",
1022
- "suffix": ""
1023
- },
1024
- {
1025
- "first": "Branimir",
1026
- "middle": [],
1027
- "last": "Bogureav",
1028
- "suffix": ""
1029
- }
1030
- ],
1031
- "year": 1996,
1032
- "venue": "Proceedings of the 16th International Conference on Computational Linguistics (COLING-96)",
1033
- "volume": "",
1034
- "issue": "",
1035
- "pages": "",
1036
- "other_ids": {},
1037
- "num": null,
1038
- "urls": [],
1039
- "raw_text": "Christopher Kennedy and Branimir Bogureav. 1996. Anaphora for everyone: Pronominal anaphora reso- lution without a parser. In Proceedings of the 16th International Conference on Computational Linguis- tics (COLING-96).",
1040
- "links": null
1041
- },
1042
- "BIBREF18": {
1043
- "ref_id": "b18",
1044
- "title": "WordNet: A Lexical Database. Communication of the A CM",
1045
- "authors": [
1046
- {
1047
- "first": "A",
1048
- "middle": [],
1049
- "last": "George",
1050
- "suffix": ""
1051
- },
1052
- {
1053
- "first": "",
1054
- "middle": [],
1055
- "last": "Miller",
1056
- "suffix": ""
1057
- }
1058
- ],
1059
- "year": 1995,
1060
- "venue": "",
1061
- "volume": "38",
1062
- "issue": "",
1063
- "pages": "39--41",
1064
- "other_ids": {},
1065
- "num": null,
1066
- "urls": [],
1067
- "raw_text": "George A. Miller. 1995. WordNet: A Lexical Database. Communication of the A CM, 38(11):39-41.",
1068
- "links": null
1069
- },
1070
- "BIBREF19": {
1071
- "ref_id": "b19",
1072
- "title": "Robust pronoun resolution with limited knowledge",
1073
- "authors": [
1074
- {
1075
- "first": "Ruslan",
1076
- "middle": [],
1077
- "last": "Mitkov",
1078
- "suffix": ""
1079
- }
1080
- ],
1081
- "year": 1998,
1082
- "venue": "Proceedings of COLING-ACL'98",
1083
- "volume": "",
1084
- "issue": "",
1085
- "pages": "869--875",
1086
- "other_ids": {},
1087
- "num": null,
1088
- "urls": [],
1089
- "raw_text": "Ruslan Mitkov. 1998. Robust pronoun resolution with limited knowledge. In Proceedings of COLING- ACL'98, pages 869-875.",
1090
- "links": null
1091
- },
1092
- "BIBREF20": {
1093
- "ref_id": "b20",
1094
- "title": "Proceedings of the Sixth Message Understanding Conference (MUC-6)",
1095
- "authors": [],
1096
- "year": 1996,
1097
- "venue": "",
1098
- "volume": "",
1099
- "issue": "",
1100
- "pages": "",
1101
- "other_ids": {},
1102
- "num": null,
1103
- "urls": [],
1104
- "raw_text": "1996. Proceedings of the Sixth Message Understanding Conference (MUC-6),Morgan Kaufmann, San Mateo, CA.",
1105
- "links": null
1106
- },
1107
- "BIBREF21": {
1108
- "ref_id": "b21",
1109
- "title": "Proceedings of the Seventh Message Understanding Conference (MUC-7)",
1110
- "authors": [],
1111
- "year": null,
1112
- "venue": "",
1113
- "volume": "",
1114
- "issue": "",
1115
- "pages": "",
1116
- "other_ids": {},
1117
- "num": null,
1118
- "urls": [],
1119
- "raw_text": "Proceedings of the Seventh Message Understand- ing Conference (MUC-7) ,Morgan Kaufmann, San Mateo, CA.",
1120
- "links": null
1121
- },
1122
- "BIBREF22": {
1123
- "ref_id": "b22",
1124
- "title": "Semi-Automatic Acquisition of Domain-Specific Translation Lexicons",
1125
- "authors": [
1126
- {
1127
- "first": "Philip",
1128
- "middle": [],
1129
- "last": "Resnik",
1130
- "suffix": ""
1131
- },
1132
- {
1133
- "first": "I",
1134
- "middle": [
1135
- "Dan"
1136
- ],
1137
- "last": "Melamed",
1138
- "suffix": ""
1139
- }
1140
- ],
1141
- "year": 1997,
1142
- "venue": "Proceedings of the 5th ACL Conference on Applied Natural Language Processing",
1143
- "volume": "",
1144
- "issue": "",
1145
- "pages": "",
1146
- "other_ids": {},
1147
- "num": null,
1148
- "urls": [],
1149
- "raw_text": "Philip Resnik and I. Dan Melamed. 1997. Semi- Automatic Acquisition of Domain-Specific Translation Lexicons. In Proceedings of the 5th ACL Conference on Applied Natural Language Processing (ANLP-97).",
1150
- "links": null
1151
- },
1152
- "BIBREF23": {
1153
- "ref_id": "b23",
1154
- "title": "Learning Dictionaries for Information Extraction by Multi-Level Bootstrapping",
1155
- "authors": [
1156
- {
1157
- "first": "Ellen",
1158
- "middle": [],
1159
- "last": "Riloff",
1160
- "suffix": ""
1161
- },
1162
- {
1163
- "first": "Rosie",
1164
- "middle": [],
1165
- "last": "Jones",
1166
- "suffix": ""
1167
- }
1168
- ],
1169
- "year": 1999,
1170
- "venue": "Proceedings of the Sixteenth National Conference on Artificial Intelligence (AAAI-99)",
1171
- "volume": "",
1172
- "issue": "",
1173
- "pages": "",
1174
- "other_ids": {},
1175
- "num": null,
1176
- "urls": [],
1177
- "raw_text": "Ellen Riloff and Rosie Jones. 1999. Learning Dictionar- ies for Information Extraction by Multi-Level Boot- strapping. In Proceedings of the Sixteenth National Conference on Artificial Intelligence (AAAI-99).",
1178
- "links": null
1179
- },
1180
- "BIBREF24": {
1181
- "ref_id": "b24",
1182
- "title": "Translating collocations for bilingual lexicons: A statistical approach",
1183
- "authors": [
1184
- {
1185
- "first": "Frank",
1186
- "middle": [],
1187
- "last": "Smadja",
1188
- "suffix": ""
1189
- },
1190
- {
1191
- "first": "Katheleen",
1192
- "middle": [
1193
- "R"
1194
- ],
1195
- "last": "Mckeown",
1196
- "suffix": ""
1197
- },
1198
- {
1199
- "first": "Vasileios",
1200
- "middle": [],
1201
- "last": "Hatzivassiloglou",
1202
- "suffix": ""
1203
- }
1204
- ],
1205
- "year": 1996,
1206
- "venue": "Computational Linguistics",
1207
- "volume": "21",
1208
- "issue": "1",
1209
- "pages": "1--38",
1210
- "other_ids": {},
1211
- "num": null,
1212
- "urls": [],
1213
- "raw_text": "Frank Smadja, Katheleen R. McKeown and Vasileios Hatzivassiloglou. 1996. Translating collocations for bilingual lexicons: A statistical approach. Computa- tional Linguistics , 21(1):1-38.",
1214
- "links": null
1215
- }
1216
- },
1217
- "ref_entries": {
1218
- "FIGREF0": {
1219
- "type_str": "figure",
1220
- "num": null,
1221
- "uris": null,
1222
- "text": "Three coreference structures."
1223
- },
1224
- "FIGREF1": {
1225
- "type_str": "figure",
1226
- "num": null,
1227
- "uris": null,
1228
- "text": "Bootstrapping new heuristics."
1229
- },
1230
- "FIGREF2": {
1231
- "type_str": "figure",
1232
- "num": null,
1233
- "uris": null,
1234
- "text": "Case 1 of multilingual coreference"
1235
- },
1236
- "FIGREF3": {
1237
- "type_str": "figure",
1238
- "num": null,
1239
- "uris": null,
1240
- "text": "Case 2 of multilingual coreference"
1241
- },
1242
- "TABREF1": {
1243
- "type_str": "table",
1244
- "num": null,
1245
- "html": null,
1246
- "text": "Heuristics for 3rd person pronouns oHeuristie 1-Pronoun(H1Pron) Search in the same sentence for the same 3rd person pronoun Pros' if (Pron' belongs to coreference chain CC) and there is an element from CC which is closest to Pron in Text, Pick that element.",
1247
- "content": "<table><tr><td/><td>Heuristics for nominal reference</td></tr><tr><td/><td>o Heuristic 1-Nominal(HINom )</td></tr><tr><td/><td>if (Noun is the head of an appositive)</td></tr><tr><td/><td>then Pick the preceding NP.</td></tr><tr><td/><td>oHeuristic 2-Nominal(H2Nom)</td></tr><tr><td/><td>if (Noun belongs to an NP, Search for NP'</td></tr><tr><td/><td>such that Noun'=same_name(head(NP),head(NP'))</td></tr><tr><td>else Pick Pron'.</td><td>or</td></tr><tr><td>oHeuristic 2-Pronoun(H2Pron)</td><td>Noun'--same_name(adjunct(NP), adjunct(NP')))</td></tr><tr><td>Search for PN, the closest proper name from Pron</td><td>then if (Noun' belongs to coreference chain CC)</td></tr><tr><td>if (PN agrees in number and gender with Pros)</td><td>then Pick the element from CC which is</td></tr><tr><td>if (PN belongs to coreference chain CC)</td><td>closest to Noun in Text.</td></tr><tr><td>then Pick the element from CC which is</td><td>else Pick Noun'.</td></tr><tr><td>closest to Pros in Text.</td><td>oHeuristic 3-Nominal(H3Nom)</td></tr><tr><td>else Pick PN.</td><td>if Noun is the head of an NP</td></tr><tr><td>o Heuristic 3-Pronoun( H3Pron )</td><td>then Search for proper name PN</td></tr><tr><td>Search for Noun, the closest noun from Pros</td><td>such that head(PN)=Noun</td></tr><tr><td>if (Noun agrees in number and gender with Pros)</td><td>if (PN belongs to coreference chain CC)</td></tr><tr><td>if (Noun belongs to coreference chain CC)</td><td/></tr><tr><td>else Pick Noun</td><td/></tr></table>"
1248
- },
1249
- "TABREF2": {
1250
- "type_str": "table",
1251
- "num": null,
1252
- "html": null,
1253
- "text": "Best performing heuristics implemented in COCKTAIL",
1254
- "content": "<table/>"
1255
- },
1256
- "TABREF3": {
1257
- "type_str": "table",
1258
- "num": null,
1259
- "html": null,
1260
- "text": "Examples of coreference resolution. The same annotated index indicates coreference.",
1261
- "content": "<table/>"
1262
- },
1263
- "TABREF5": {
1264
- "type_str": "table",
1265
- "num": null,
1266
- "html": null,
1267
- "text": "Since the translations often introduce new coreferring expressions in the same chain, the new expressions are given new, unused ID numbers. Whenever present, the REF tag indicates the ID of the antecedent, whereas the MIN tag indicates the minimal reference expression.",
1268
- "content": "<table><tr><td>For example, Table 3 lists corresponding English</td></tr><tr><td>and Romanian fragments of coreference chains from</td></tr><tr><td>the original MUC-6 Wall Street Journal document</td></tr><tr><td>DOCNO: 930729-0143.</td></tr><tr><td>Table 3 also shows the original MUC coreference</td></tr><tr><td>SGML annotations.</td></tr></table>"
1269
- },
1270
- "TABREF6": {
1271
- "type_str": "table",
1272
- "num": null,
1273
- "html": null,
1274
- "text": "Engl i sh Text.... :rr-Z, la ,on .......",
1275
- "content": "<table/>"
1276
- },
1277
- "TABREF7": {
1278
- "type_str": "table",
1279
- "num": null,
1280
- "html": null,
1281
- "text": "Text chains E .................. H4~ ................. Translation ER ~ ............................ Translation",
1282
- "content": "<table><tr><td/><td colspan=\"2\">Romani an Text</td></tr><tr><td/><td>\u2022 ~ .........</td><td>RA</td></tr><tr><td/><td>'~</td><td>\u00a5( R) RR</td></tr><tr><td>ER: English reference</td><td colspan=\"2\">RR: Romanian reference</td></tr><tr><td>EA: English antecedent</td><td colspan=\"2\">RA: Romanian antecedent</td></tr><tr><td>ET: English translation</td><td colspan=\"2\">RT: Romanian translation</td></tr><tr><td>of Romanian antecedent</td><td colspan=\"2\">of English antecedent</td></tr></table>"
1283
- },
1284
- "TABREF9": {
1285
- "type_str": "table",
1286
- "num": null,
1287
- "html": null,
1288
- "text": "",
1289
- "content": "<table><tr><td/><td/><td/><td>Total</td></tr><tr><td/><td/><td/><td>84%</td></tr><tr><td/><td/><td/><td>72%</td></tr><tr><td/><td/><td/><td>87%</td></tr><tr><td/><td/><td/><td>76%</td></tr><tr><td/><td colspan=\"2\">: Coreference precision</td><td/></tr><tr><td/><td>Nominal</td><td colspan=\"2\">Pronominal Total</td></tr><tr><td>English</td><td>69%</td><td>89%</td><td>78%</td></tr><tr><td>Romanian</td><td>63%</td><td>83%</td><td>72%</td></tr><tr><td>SWIZZLE on</td><td>66%</td><td>87%</td><td>77%</td></tr><tr><td>English</td><td/><td/><td/></tr><tr><td>SWIZZLE on</td><td>61%</td><td>80%</td><td>70%</td></tr><tr><td>Romanian</td><td/><td/><td/></tr></table>"
1290
- },
1291
- "TABREF10": {
1292
- "type_str": "table",
1293
- "num": null,
1294
- "html": null,
1295
- "text": "",
1296
- "content": "<table/>"
1297
- }
1298
- }
1299
- }
1300
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1021.json DELETED
@@ -1,1151 +0,0 @@
1
- {
2
- "paper_id": "A00-1021",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:26.506117Z"
6
- },
7
- "title": "Ranking suspected answers to natural language questions using predictive annotation",
8
- "authors": [
9
- {
10
- "first": "Dragomir",
11
- "middle": [
12
- "R"
13
- ],
14
- "last": "Radev",
15
- "suffix": "",
16
- "affiliation": {},
17
- "email": "radev@umich@edu"
18
- },
19
- {
20
- "first": "John",
21
- "middle": [],
22
- "last": "Prager",
23
- "suffix": "",
24
- "affiliation": {},
25
- "email": "[email protected]"
26
- },
27
- {
28
- "first": "Valerie",
29
- "middle": [],
30
- "last": "Samn",
31
- "suffix": "",
32
- "affiliation": {},
33
- "email": ""
34
- }
35
- ],
36
- "year": "",
37
- "venue": null,
38
- "identifiers": {},
39
- "abstract": "In this paper, we describe a system to rank suspected answers to natural language questions. We process both corpus and query using a new technique, predictive annotation, which augments phrases in texts with labels anticipating their being targets of certain kinds of questions. Given a natural language question, our IR system returns a set of matching passages, which we then rank using a linear function of seven predictor variables. We provide an evaluation of the techniques based on results from the TREC Q&A evaluation in which our system participated.",
40
- "pdf_parse": {
41
- "paper_id": "A00-1021",
42
- "_pdf_hash": "",
43
- "abstract": [
44
- {
45
- "text": "In this paper, we describe a system to rank suspected answers to natural language questions. We process both corpus and query using a new technique, predictive annotation, which augments phrases in texts with labels anticipating their being targets of certain kinds of questions. Given a natural language question, our IR system returns a set of matching passages, which we then rank using a linear function of seven predictor variables. We provide an evaluation of the techniques based on results from the TREC Q&A evaluation in which our system participated.",
46
- "cite_spans": [],
47
- "ref_spans": [],
48
- "eq_spans": [],
49
- "section": "Abstract",
50
- "sec_num": null
51
- }
52
- ],
53
- "body_text": [
54
- {
55
- "text": "Question Answering is a task that calls for a combination of techniques from Information Retrieval and Natural Language Processing. The former has the advantage of years of development of efficient techniques for indexing and searching large collections of data, but lacks of any meaningful treatment of the semantics of the query or the texts indexed. NLP tackles the semantics, but tends to be computationally expensive.",
56
- "cite_spans": [],
57
- "ref_spans": [],
58
- "eq_spans": [],
59
- "section": "Introduction",
60
- "sec_num": "1"
61
- },
62
- {
63
- "text": "We have attempted to carve out a middle ground, whereby we use a modified IR system augmented by shallow NL parsing. Our approach was motivated by the following problem with traditional IR systems. Suppose the user asks \"Where did <some event> happen?\". If the system does no pre-processing of the query, then \"where\" will be included in the bag of words submitted to the search engine, but this will not be helpful since the target text will be unlikely to contain the word \"where\". If the word is stripped out as a stop-word, then * The work presented in this paper was performed while the first and third authors were at 1BM Research.",
64
- "cite_spans": [],
65
- "ref_spans": [],
66
- "eq_spans": [],
67
- "section": "Introduction",
68
- "sec_num": "1"
69
- },
70
- {
71
- "text": "the search engine will have no idea that a location is sought. Our approach, called predictive annotation, is to augment the query with semantic category markers (which we call QA-Tokens) , in this case with the PLACES token, and also to label with QA-Tokens all occurrences in text that are recognized entities, (for example, places). Then traditional bag-ofwords matching proceeds successfully, and will return matching passages. The answer-selection process then looks for and ranks in these passages occurrences of phrases containing the particular QA-Token(s) from the augmented query. This classification of questions is conceptually similar to the query expansion in (Voorhees, 1994) but is expected to achieve much better performance since potentially matching phrases in text are classified in a similar and synergistic way.",
72
- "cite_spans": [
73
- {
74
- "start": 177,
75
- "end": 187,
76
- "text": "QA-Tokens)",
77
- "ref_id": null
78
- },
79
- {
80
- "start": 674,
81
- "end": 690,
82
- "text": "(Voorhees, 1994)",
83
- "ref_id": "BIBREF7"
84
- }
85
- ],
86
- "ref_spans": [],
87
- "eq_spans": [],
88
- "section": "Introduction",
89
- "sec_num": "1"
90
- },
91
- {
92
- "text": "Our system participated in the official TREC Q&A evaluation. For 200 questions in the evaluation set, we were asked to provide a list of 50-byte and 250-byte extracts from a 2-GB corpus. The results are shown in Section 7.",
93
- "cite_spans": [],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "Introduction",
97
- "sec_num": "1"
98
- },
99
- {
100
- "text": "Some techniques used by other participants in the TREC evaluation are paragraph indexing, followed by abductive inference (Harabagiu and Maiorano, 1999) and knowledge-representation combined with information retrieval (Breck et al., 1999) . Some earlier systems related to our work are FaqFinder (Kulyukin et al., 1998) , MURAX (Kupiec, 1993) , which uses an encyclopedia as a knowledge base from which to extract answers, and PROFILE (Radev and McKeown, 1997) which identifies named entities and noun phrases that describe them in text.",
101
- "cite_spans": [
102
- {
103
- "start": 122,
104
- "end": 152,
105
- "text": "(Harabagiu and Maiorano, 1999)",
106
- "ref_id": "BIBREF2"
107
- },
108
- {
109
- "start": 218,
110
- "end": 238,
111
- "text": "(Breck et al., 1999)",
112
- "ref_id": "BIBREF0"
113
- },
114
- {
115
- "start": 296,
116
- "end": 319,
117
- "text": "(Kulyukin et al., 1998)",
118
- "ref_id": "BIBREF3"
119
- },
120
- {
121
- "start": 328,
122
- "end": 342,
123
- "text": "(Kupiec, 1993)",
124
- "ref_id": "BIBREF4"
125
- },
126
- {
127
- "start": 435,
128
- "end": 460,
129
- "text": "(Radev and McKeown, 1997)",
130
- "ref_id": "BIBREF6"
131
- }
132
- ],
133
- "ref_spans": [],
134
- "eq_spans": [],
135
- "section": "Introduction",
136
- "sec_num": "1"
137
- },
138
- {
139
- "text": "Our system ( Figure 1 ) consists of two pieces: an IR component (GuruQA) that which returns matching texts, and an answer selection compo-neat (AnSel/Werlect) that extracts and ranks potential answers from these texts. This paper focuses on the process of ranking potential answers selected by the IR engine, which is itself described in (Prager et al., 1999) . ",
140
- "cite_spans": [
141
- {
142
- "start": 338,
143
- "end": 359,
144
- "text": "(Prager et al., 1999)",
145
- "ref_id": "BIBREF5"
146
- }
147
- ],
148
- "ref_spans": [
149
- {
150
- "start": 13,
151
- "end": 21,
152
- "text": "Figure 1",
153
- "ref_id": null
154
- }
155
- ],
156
- "eq_spans": [],
157
- "section": "System description",
158
- "sec_num": "2"
159
- },
160
- {
161
- "text": "In the context of fact-seeking questions, we made the following observations:",
162
- "cite_spans": [],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "The Information Retrieval component",
166
- "sec_num": "2.1"
167
- },
168
- {
169
- "text": "\u2022 In documents that contain the answers, the query terms tend to occur in close proximity to each other.",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "The Information Retrieval component",
174
- "sec_num": "2.1"
175
- },
176
- {
177
- "text": "\u2022 The answers to fact-seeking questions are usually phrases: \"President Clinton\", \"in the Rocky Mountains\", and \"today\").",
178
- "cite_spans": [],
179
- "ref_spans": [],
180
- "eq_spans": [],
181
- "section": "The Information Retrieval component",
182
- "sec_num": "2.1"
183
- },
184
- {
185
- "text": "\u2022 These phrases can be categorized by a set of a dozen or so labels ( Figure 2 ) corresponding to question types.",
186
- "cite_spans": [],
187
- "ref_spans": [
188
- {
189
- "start": 70,
190
- "end": 78,
191
- "text": "Figure 2",
192
- "ref_id": "FIGREF1"
193
- }
194
- ],
195
- "eq_spans": [],
196
- "section": "The Information Retrieval component",
197
- "sec_num": "2.1"
198
- },
199
- {
200
- "text": "\u2022 The phrases can be identified in text by pattern matching techniques (without full NLP).",
201
- "cite_spans": [],
202
- "ref_spans": [],
203
- "eq_spans": [],
204
- "section": "The Information Retrieval component",
205
- "sec_num": "2.1"
206
- },
207
- {
208
- "text": "As a result, we defined a set of about 20 categories, each labeled with its own QA-Token, and built an IR system which deviates from the traditional model in three important aspects.",
209
- "cite_spans": [],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "The Information Retrieval component",
213
- "sec_num": "2.1"
214
- },
215
- {
216
- "text": "\u2022 We process the query against a set of approximately 200 question templates which, may replace some of the query words with a set of QA-Tokens, called a SYNclass. Thus \"Where\" gets mapped to \"PLACES\", but \"How long \" goes to \"@SYN(LENGTH$, DURATIONS)\". Some templates do not cause complete replacement of the matched string. For example, the pattern \"What is the population\" gets replaced by \"NUMBERS population'.",
217
- "cite_spans": [],
218
- "ref_spans": [],
219
- "eq_spans": [],
220
- "section": "The Information Retrieval component",
221
- "sec_num": "2.1"
222
- },
223
- {
224
- "text": "\u2022 Before indexing the text, we process it with Textract (Byrd and Ravin, 1998; Wacholder et al., 1997) , which performs lemmatization, and discovers proper names and technical terms. We added a new module (Resporator) which annotates text segments with QA-Tokens using pattern matching. Thus the text \"for 5 centuries\" matches the DURATIONS pattern \"for :CARDINAL _timeperiod\", where :CAR-DINAL is the label for cardinal numbers, and _timeperiod marks a time expression.",
225
- "cite_spans": [
226
- {
227
- "start": 56,
228
- "end": 78,
229
- "text": "(Byrd and Ravin, 1998;",
230
- "ref_id": "BIBREF1"
231
- },
232
- {
233
- "start": 79,
234
- "end": 102,
235
- "text": "Wacholder et al., 1997)",
236
- "ref_id": "BIBREF8"
237
- }
238
- ],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "The Information Retrieval component",
242
- "sec_num": "2.1"
243
- },
244
- {
245
- "text": "\u2022 GuruQA scores text passages instead of documents. We use a simple documentand collection-independent weighting scheme: QA-Tokens get a weight of 400, proper nouns get 200 and any other word -100 (stop words are removed in query processing after the pattern template matching operation).",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "The Information Retrieval component",
250
- "sec_num": "2.1"
251
- },
252
- {
253
- "text": "The density of matching query tokens within a passage is contributes a score of 1 to 99 (the highest scores occur when all matched terms are consecutive).",
254
- "cite_spans": [],
255
- "ref_spans": [],
256
- "eq_spans": [],
257
- "section": "The Information Retrieval component",
258
- "sec_num": "2.1"
259
- },
260
- {
261
- "text": "Predictive Annotation works best for Where, When, What, Which and How+adjective questions than for How+verb and Why questions, since the latter are typically not answered by phrases. However, we observed that \"by\" + the present participle would usually indicate the description of a procedure, so we instantiate a METHODS QA-Token for such occurrences. We have no such QA-Token for Why questions, but we do replace the word \"why\" with \"@SYN(result, cause, because)\", since the occurrence of any of these words usually betokens an explanation.",
262
- "cite_spans": [],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "The Information Retrieval component",
266
- "sec_num": "2.1"
267
- },
268
- {
269
- "text": "3 Answer selection So far, we have described how we retrieve relevant passages that may contain the answer to a query. The output of GuruQA is a list of 10 short passages containing altogether a large number (often more than 30 or 40) of potential answers in the form of phrases annotated with QA-Tokens.",
270
- "cite_spans": [],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "The Information Retrieval component",
274
- "sec_num": "2.1"
275
- },
276
- {
277
- "text": "We now describe two algorithms, AnSel and Werlect, which rank the spans returned by Gu-ruQA. AnSel and Werlect 1 use different approaches, which we describe, evaluate and compare and contrast. The output of either system consists of five text extracts per question that contain the likeliest answers to the questions.",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Answer ranking",
282
- "sec_num": "3.1"
283
- },
284
- {
285
- "text": "The role of answer selection is to decide which among the spans extracted by GuruQA are most likely to contain the precise answer to the questions. Figure 3 contains an example of the data structure passed from GuruQA to our answer selection module. The input consists of four items:",
286
- "cite_spans": [],
287
- "ref_spans": [
288
- {
289
- "start": 148,
290
- "end": 156,
291
- "text": "Figure 3",
292
- "ref_id": "FIGREF2"
293
- }
294
- ],
295
- "eq_spans": [],
296
- "section": "Sample Input to AnSel/Werlect",
297
- "sec_num": "3.2"
298
- },
299
- {
300
- "text": "\u2022 a query (marked with <QUERY> tokens in the example),",
301
- "cite_spans": [],
302
- "ref_spans": [],
303
- "eq_spans": [],
304
- "section": "Sample Input to AnSel/Werlect",
305
- "sec_num": "3.2"
306
- },
307
- {
308
- "text": "\u2022 a list of 10 passages (one of which is shown above),",
309
- "cite_spans": [],
310
- "ref_spans": [],
311
- "eq_spans": [],
312
- "section": "Sample Input to AnSel/Werlect",
313
- "sec_num": "3.2"
314
- },
315
- {
316
- "text": "\u2022 a list of annotated text spans within the passages, annotated with QA-Tokens, and 1 from ANswer SELect and ansWER seLECT, respectively",
317
- "cite_spans": [],
318
- "ref_spans": [],
319
- "eq_spans": [],
320
- "section": "Sample Input to AnSel/Werlect",
321
- "sec_num": "3.2"
322
- },
323
- {
324
- "text": "\u2022 the SYN-class corresponding to the type of question (e.g., \"PERSONS NAMES\").",
325
- "cite_spans": [],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "Sample Input to AnSel/Werlect",
329
- "sec_num": "3.2"
330
- },
331
- {
332
- "text": "The text in Figure 3 contains five spans (potential answers), of which three (\"Biography of Margaret Thatcher\", \"Hugo Young\", and \"Margaret Thatcher\") are of types included in the SYN-class for the question (PERSON NAME). The full output of GuruQA for this question includes a total of 14 potential spans (5 PERSONs and 9 NAMEs).",
333
- "cite_spans": [],
334
- "ref_spans": [
335
- {
336
- "start": 12,
337
- "end": 20,
338
- "text": "Figure 3",
339
- "ref_id": "FIGREF2"
340
- }
341
- ],
342
- "eq_spans": [],
343
- "section": "Sample Input to AnSel/Werlect",
344
- "sec_num": "3.2"
345
- },
346
- {
347
- "text": "The answer selection module has two outputs: internal (phrase) and external (text passage).",
348
- "cite_spans": [],
349
- "ref_spans": [],
350
- "eq_spans": [],
351
- "section": "Sample Output of AnSel/Werlect",
352
- "sec_num": "3.3"
353
- },
354
- {
355
- "text": "The internal output is a ranked list of spans as shown in Table 1 . It represents a ranked list of the spans (potential answers) sent by GuruQA.",
356
- "cite_spans": [],
357
- "ref_spans": [
358
- {
359
- "start": 58,
360
- "end": 65,
361
- "text": "Table 1",
362
- "ref_id": "TABREF2"
363
- }
364
- ],
365
- "eq_spans": [],
366
- "section": "Internal output:",
367
- "sec_num": null
368
- },
369
- {
370
- "text": "External output: The external output is a ranked list of 50-byte and 250-byte extracts. These extracts are selected in a way to cover the highest-ranked spans in the list of potential answers. Examples are given later in the paper.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Internal output:",
375
- "sec_num": null
376
- },
377
- {
378
- "text": "The external output was required for the TREC evaluation while system's internal output can be used in a variety of applications, e.g., to highlight the actual span that we believe is the answer to the question within the context of the passage in which it appears. <p> <NUMBER> 1 </NUMBER> </p> <p><QUERY>Who is the author of the book, \"The Iron Lady: A Biography of Margaret Thatcher\"? </QUERY></p> <p> <PROCESSED_QUERY> @excwin(*dynamic* @weight (200 * Iron_Lady) @weight (200 Biography_of_Margaret_Thatcher) @weight(200 Margaret) @weight(100 author) @weight(100 book) @weight(100 iron) @weight(100 lady) @weight(100 :) @weight(100 biography) @weight(100 thatcher) @weight(400 @syn(PERSON$ NAME$)))</PROCESSED_QUERY></p> <p> <DOC>LA090290-0118</DOC> </p> <p> <SCORE> 1020.8114</SCORE> d/p> <TEXT><p>THE IRON LADY; A <span class=\"NAME\">Biography of Margaret Thatcher </span> by <span class--\"PERSON\">Hugo Young</span> (<span class='ORG\">Farrar , Straus & Giroux</span>) The central riddle revealed here is why, as a woman <span class--'PLACEDEF'>in a man</span>'s world, <span class--'PERSON'>Margaret Thatcher</span> evinces such an exclusionary attitude toward women.</p></TEXT> ",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Internal output:",
383
- "sec_num": null
384
- },
385
- {
386
- "text": "In this section we describe the corpora used for training and evaluation as well as the questions contained in the training and evaluation question sets.",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Analysis of corpus and question sets",
391
- "sec_num": "4"
392
- },
393
- {
394
- "text": "For both training and evaluation, we used the TREC corpus, consisting of approximately 2 GB of articles from four news agencies.",
395
- "cite_spans": [],
396
- "ref_spans": [],
397
- "eq_spans": [],
398
- "section": "Corpus analysis",
399
- "sec_num": null
400
- },
401
- {
402
- "text": "To train our system, we used 38 questions (see Figure 4 ) for which the answers were provided by NIST.",
403
- "cite_spans": [],
404
- "ref_spans": [
405
- {
406
- "start": 47,
407
- "end": 55,
408
- "text": "Figure 4",
409
- "ref_id": "FIGREF3"
410
- }
411
- ],
412
- "eq_spans": [],
413
- "section": "Training set TR38",
414
- "sec_num": "4.2"
415
- },
416
- {
417
- "text": "The majority of the 200 questions (see Figure 5 ) in the evaluation set (T200) were not substan-tially different from these in TR38, although the introduction of \"why\" and \"how\" questions as well as the wording of questions in the format \"Name X\" made the task slightly harder.",
418
- "cite_spans": [],
419
- "ref_spans": [
420
- {
421
- "start": 39,
422
- "end": 47,
423
- "text": "Figure 5",
424
- "ref_id": "FIGREF4"
425
- }
426
- ],
427
- "eq_spans": [],
428
- "section": "Test set T200",
429
- "sec_num": "4.3"
430
- },
431
- {
432
- "text": "Q: Why did David Koresh ask the FBI for a word processor? A: to record his revelations. Q: How tall is the Matterhorn? A: 14,776 feet 9 inches Q: How tall is the replica of the Matterhorn at Disneyland? A: 147-foot Figure 6 . Q: Why did David Koresh ask the FBI for a word processor? Q: Name the first private citizen to fly in space. Q: What is considered the costliest disaster the insurance industry has ever faced? Q: What did John Hinckley do to impress Jodie Foster? Q: How did Socrates die? Figure 6 : Sample harder questions from T200.",
433
- "cite_spans": [],
434
- "ref_spans": [
435
- {
436
- "start": 215,
437
- "end": 223,
438
- "text": "Figure 6",
439
- "ref_id": null
440
- },
441
- {
442
- "start": 498,
443
- "end": 506,
444
- "text": "Figure 6",
445
- "ref_id": null
446
- }
447
- ],
448
- "eq_spans": [],
449
- "section": "Questlon/Answer (T200)",
450
- "sec_num": null
451
- },
452
- {
453
- "text": "AnSel uses an optimization algorithm with 7 predictive variables to describe how likely a given span is to be the correct answer to a question. The variables are illustrated with examples related to the sample question number 10001 from TR38 \"Who was Johnny Mathis' high school track coach?\". The potential answers (extracted by GuruQA) are shown in Table 2.",
454
- "cite_spans": [],
455
- "ref_spans": [],
456
- "eq_spans": [],
457
- "section": "AnSel",
458
- "sec_num": "5"
459
- },
460
- {
461
- "text": "The seven span features described below were found to correlate with the correct answers.",
462
- "cite_spans": [],
463
- "ref_spans": [],
464
- "eq_spans": [],
465
- "section": "Feature selection",
466
- "sec_num": "5.1"
467
- },
468
- {
469
- "text": "Number: position of the span among M1 spans returned from the hit-list.",
470
- "cite_spans": [],
471
- "ref_spans": [],
472
- "eq_spans": [],
473
- "section": "Feature selection",
474
- "sec_num": "5.1"
475
- },
476
- {
477
- "text": "Rspanno: position of the span among all spans returned within the current passage.",
478
- "cite_spans": [],
479
- "ref_spans": [],
480
- "eq_spans": [],
481
- "section": "Feature selection",
482
- "sec_num": "5.1"
483
- },
484
- {
485
- "text": "Count: number of spans of any span class retrieved within the current passage.",
486
- "cite_spans": [],
487
- "ref_spans": [],
488
- "eq_spans": [],
489
- "section": "Feature selection",
490
- "sec_num": "5.1"
491
- },
492
- {
493
- "text": "Notinq: the number of words in the span that do not appear in the query.",
494
- "cite_spans": [],
495
- "ref_spans": [],
496
- "eq_spans": [],
497
- "section": "Feature selection",
498
- "sec_num": "5.1"
499
- },
500
- {
501
- "text": "Type: the position of the span type in the list of potential span types. Example: Type (\"Lou Vasquez\") = 1, because the span type of \"Lou Vasquez\", namely \"PER-SON\" appears first in the SYN-class \"PER-SON ORG NAME ROLE\".",
502
- "cite_spans": [],
503
- "ref_spans": [],
504
- "eq_spans": [],
505
- "section": "Feature selection",
506
- "sec_num": "5.1"
507
- },
508
- {
509
- "text": "Avgdst: the average distance in words between the beginning of the span and query words that also appear in the passage. Example: given the passage \"Tim O'Donohue, Woodbridge High School's varsity baseball coach, resigned Monday and will be replaced by assistant Johnny Ceballos, Athletic Director Dave Cowen said.\" and the span \"Tim O'Donohue\", the value of avgdst is equal to 8.",
510
- "cite_spans": [],
511
- "ref_spans": [],
512
- "eq_spans": [],
513
- "section": "Feature selection",
514
- "sec_num": "5.1"
515
- },
516
- {
517
- "text": "Sscore: passage relevance as computed by Gu-ruQA.",
518
- "cite_spans": [],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "Feature selection",
522
- "sec_num": "5.1"
523
- },
524
- {
525
- "text": "Number: the position of the span among all retrieved spans.",
526
- "cite_spans": [],
527
- "ref_spans": [],
528
- "eq_spans": [],
529
- "section": "Feature selection",
530
- "sec_num": "5.1"
531
- },
532
- {
533
- "text": "The TOTAL score for a given potential answer is computed as a linear combination of the features described in the previous subsection:",
534
- "cite_spans": [],
535
- "ref_spans": [],
536
- "eq_spans": [],
537
- "section": "AnSel algorithm",
538
- "sec_num": "5.2"
539
- },
540
- {
541
- "text": "TOTAL = ~ w~ , fi i",
542
- "cite_spans": [],
543
- "ref_spans": [],
544
- "eq_spans": [],
545
- "section": "AnSel algorithm",
546
- "sec_num": "5.2"
547
- },
548
- {
549
- "text": "The Mgorithm that the training component of AnSel uses to learn the weights used in the formula is shown in Figure 7 .",
550
- "cite_spans": [],
551
- "ref_spans": [
552
- {
553
- "start": 108,
554
- "end": 116,
555
- "text": "Figure 7",
556
- "ref_id": "FIGREF6"
557
- }
558
- ],
559
- "eq_spans": [],
560
- "section": "AnSel algorithm",
561
- "sec_num": "5.2"
562
- },
563
- {
564
- "text": "For each <question,span> tuple in training set : i. Compute features for each span 2. Compute TOTAL score for each span using current set of weights Figure 8 . For lack of space, we are omitting the 250-byte extracts.",
565
- "cite_spans": [],
566
- "ref_spans": [
567
- {
568
- "start": 149,
569
- "end": 157,
570
- "text": "Figure 8",
571
- "ref_id": null
572
- }
573
- ],
574
- "eq_spans": [],
575
- "section": "AnSel algorithm",
576
- "sec_num": "5.2"
577
- },
578
- {
579
- "text": "The Werlect algorithm used many of the same features of phrases used by AnSel, but employed a different ranking scheme.",
580
- "cite_spans": [],
581
- "ref_spans": [],
582
- "eq_spans": [],
583
- "section": "Werlect",
584
- "sec_num": "6"
585
- },
586
- {
587
- "text": "Unlike AnSel, Werlect is based on a two-step, rule-based process approximating a function with interaction between variables. In the first stage of this algorithm, we assign a rank to -7.53 -9.93 -12.57 -15.87 -19.07 -19.36 -25.22 -25.37 -25.47 -28.37 -29.57 -30,87 -37.40 -40.06 -49.80 -52.52 -56.27 -59.42 -62.77 -71.17 -211.33 -254.16 -259.67 every relevant phrase within each sentence according to how likely it is to be the target answer. Next, we generate and rank each N-byte fragment based on the sentence score given by GuruQA, measures of the fragment's relevance, and the ranks of its component phrases. Unlike AnSel, Werlect was optimized through manual trial-and-error using the TR38 questions.",
588
- "cite_spans": [
589
- {
590
- "start": 184,
591
- "end": 345,
592
- "text": "-7.53 -9.93 -12.57 -15.87 -19.07 -19.36 -25.22 -25.37 -25.47 -28.37 -29.57 -30,87 -37.40 -40.06 -49.80 -52.52 -56.27 -59.42 -62.77 -71.17 -211.33 -254.16 -259.67",
593
- "ref_id": null
594
- }
595
- ],
596
- "ref_spans": [],
597
- "eq_spans": [],
598
- "section": "Approach",
599
- "sec_num": "6.1"
600
- },
601
- {
602
- "text": "Step One: Feature Selection The features considered in Werlect that were also used by AnSel, were Type, Avgdst and Sscore. Two additional features were also taken into account:",
603
- "cite_spans": [],
604
- "ref_spans": [],
605
- "eq_spans": [],
606
- "section": "6.2",
607
- "sec_num": null
608
- },
609
- {
610
- "text": "NotinqW: a modified version of Notinq. As in AnSel, spans that are contained in the query are given a rank of 0. However, partial matches are weighted favorably in some cases. For example, if the question asks, \"Who was Lincoln's Secretary of State?\" a noun phrase that contains \"Secretary of State\" is more likely to be the answer than one that does not. In this example, the phrase, \"Secretary of State William Seward\" is the most likely candidate. This criterion also seems to play a role in the event that Resporator fails to identify rel-evant phrase types. For example, in the training question, \"What shape is a porpoise's tooth?\" the phrase \"spade-shaped\" is correctly selected from among all nouns and adjectives of the sentences returned by Guru-QA.",
611
- "cite_spans": [],
612
- "ref_spans": [],
613
- "eq_spans": [],
614
- "section": "6.2",
615
- "sec_num": null
616
- },
617
- {
618
- "text": "Frequency: how often the span occurs across different passages. For example, the test question, \"How many lives were lost in the Pan Am crash in Lockerbie, Scotland?\" resulted in four potential answers in the first two sentences returned by Guru-QA. Table 3 shows the frequencies of each term, and their eventual influence on the span rank. The repeated occurrence of \"270\", helps promote it to first place.",
619
- "cite_spans": [],
620
- "ref_spans": [],
621
- "eq_spans": [],
622
- "section": "6.2",
623
- "sec_num": null
624
- },
625
- {
626
- "text": "Step two: ranking the sentence spans",
627
- "cite_spans": [],
628
- "ref_spans": [],
629
- "eq_spans": [],
630
- "section": "6.3",
631
- "sec_num": null
632
- },
633
- {
634
- "text": "After each relevant span is assigned a rank, we rank all possible text segments of 50 (or 250) bytes from the hit list based on the sum of the phrase ranks plus additional points for other words in the segment that match the query. The algorithm used by Werlect is shown in Figure 9 . 2 270 7 1 (ranked highest) noted that on the 14 questions we were unable to classify with a QA-Token, Werlect (runs W50 and W250) achieved an MRAR of 3.5 to Ansel's 2.0.",
635
- "cite_spans": [],
636
- "ref_spans": [
637
- {
638
- "start": 274,
639
- "end": 282,
640
- "text": "Figure 9",
641
- "ref_id": "FIGREF8"
642
- }
643
- ],
644
- "eq_spans": [],
645
- "section": "6.3",
646
- "sec_num": null
647
- },
648
- {
649
- "text": "The cumulative RAR of A50 on T200 (Table 4) is 63.22 (i.e., we got 49 questions among the 198 right from our first try and 39 others within the first five answers).",
650
- "cite_spans": [],
651
- "ref_spans": [],
652
- "eq_spans": [],
653
- "section": "6.3",
654
- "sec_num": null
655
- },
656
- {
657
- "text": "The performance of A250 on T200 is shown in Table 5 . We were able to answer 71 questions with our first answer and 38 others within our first five answers (cumulative RAR = 85.17).",
658
- "cite_spans": [],
659
- "ref_spans": [
660
- {
661
- "start": 44,
662
- "end": 51,
663
- "text": "Table 5",
664
- "ref_id": null
665
- }
666
- ],
667
- "eq_spans": [],
668
- "section": "6.3",
669
- "sec_num": null
670
- },
671
- {
672
- "text": "To better characterize the performance of our system, we split the 198 questions into 20 groups of 10 questions. Our performance on groups of questions ranged from 0.87 to 5.50 MRAR for A50 and from 1.98 to 7.5 MRAR for A250 (Table 6 ). ",
673
- "cite_spans": [],
674
- "ref_spans": [
675
- {
676
- "start": 225,
677
- "end": 233,
678
- "text": "(Table 6",
679
- "ref_id": null
680
- }
681
- ],
682
- "eq_spans": [],
683
- "section": "6.3",
684
- "sec_num": null
685
- },
686
- {
687
- "text": "In this section, we describe the performance of our system using results from our four official runs.",
688
- "cite_spans": [],
689
- "ref_spans": [],
690
- "eq_spans": [],
691
- "section": "Evaluation",
692
- "sec_num": "7"
693
- },
694
- {
695
- "text": "For each question, the performance is computed as the reciprocal value of the rank (RAR) of the highest-ranked correct answer given by the system. For example, if the system has given the correct answer in three positions: second, third, and fifth, RAR for that question is ! 2\"",
696
- "cite_spans": [],
697
- "ref_spans": [],
698
- "eq_spans": [],
699
- "section": "Evaluation scheme",
700
- "sec_num": "7.1"
701
- },
702
- {
703
- "text": "The Mean Reciprocal Answer Rank (MRAR) is used to compute the overall performance of systems participating in the TREC evaluation: Table 6 : Performance on groups of ten questions Finally, Table 7 shows how our official runs compare to the rest of the 25 official submissions. Our performance using AnSel and 50byte output was 0.430. The performance of Werlect was 0.395. On 250 bytes, AnSel scored 0.319 and Werlect -0.280.",
704
- "cite_spans": [],
705
- "ref_spans": [
706
- {
707
- "start": 131,
708
- "end": 138,
709
- "text": "Table 6",
710
- "ref_id": null
711
- },
712
- {
713
- "start": 189,
714
- "end": 196,
715
- "text": "Table 7",
716
- "ref_id": null
717
- }
718
- ],
719
- "eq_spans": [],
720
- "section": "Evaluation scheme",
721
- "sec_num": "7.1"
722
- },
723
- {
724
- "text": "We presented a new technique, predictive annotation, for finding answers to natural language questions in text corpora. We showed that a system based on predictive annotation can deliver very good results compared to other competing systems.",
725
- "cite_spans": [],
726
- "ref_spans": [],
727
- "eq_spans": [],
728
- "section": "Conclusion",
729
- "sec_num": "8"
730
- },
731
- {
732
- "text": "We described a set of features that correlate with the plausibility of a given text span being a good answer to a question. We experi- Table 7 : Comparison of our system with the other participants mented with two algorithms for ranking potential answers based on these features. We discovered that a linear combination of these features performs better overall, while a non-linear algorithm performs better on unclassified questions.",
733
- "cite_spans": [],
734
- "ref_spans": [
735
- {
736
- "start": 135,
737
- "end": 142,
738
- "text": "Table 7",
739
- "ref_id": null
740
- }
741
- ],
742
- "eq_spans": [],
743
- "section": "Conclusion",
744
- "sec_num": "8"
745
- }
746
- ],
747
- "back_matter": [
748
- {
749
- "text": "We would like to thank Eric Brown, Anni Coden, and Wlodek Zadrozny from IBM Research for useful comments and collaboration. We would also like to thank the organizers of the TREC Q~zA evaluation for initiating such a wonderful research initiative.",
750
- "cite_spans": [],
751
- "ref_spans": [],
752
- "eq_spans": [],
753
- "section": "Acknowledgments",
754
- "sec_num": null
755
- }
756
- ],
757
- "bib_entries": {
758
- "BIBREF0": {
759
- "ref_id": "b0",
760
- "title": "Question answering from large document collections",
761
- "authors": [
762
- {
763
- "first": "Eric",
764
- "middle": [],
765
- "last": "Breck",
766
- "suffix": ""
767
- },
768
- {
769
- "first": "John",
770
- "middle": [],
771
- "last": "Burger",
772
- "suffix": ""
773
- },
774
- {
775
- "first": "David",
776
- "middle": [],
777
- "last": "House",
778
- "suffix": ""
779
- },
780
- {
781
- "first": "Marc",
782
- "middle": [],
783
- "last": "Light",
784
- "suffix": ""
785
- },
786
- {
787
- "first": "Inderjeet",
788
- "middle": [],
789
- "last": "Mani",
790
- "suffix": ""
791
- }
792
- ],
793
- "year": 1999,
794
- "venue": "Proceedings of AAAI Fall Symposium on Question Answering Systems",
795
- "volume": "",
796
- "issue": "",
797
- "pages": "",
798
- "other_ids": {},
799
- "num": null,
800
- "urls": [],
801
- "raw_text": "Eric Breck, John Burger, David House, Marc Light, and Inderjeet Mani. 1999. Ques- tion answering from large document collec- tions. In Proceedings of AAAI Fall Sympo- sium on Question Answering Systems, North Falmouth, Massachusetts.",
802
- "links": null
803
- },
804
- "BIBREF1": {
805
- "ref_id": "b1",
806
- "title": "Identifying and extracting relations in text",
807
- "authors": [
808
- {
809
- "first": "Roy",
810
- "middle": [],
811
- "last": "Byrd",
812
- "suffix": ""
813
- },
814
- {
815
- "first": "Yael",
816
- "middle": [],
817
- "last": "Ravin",
818
- "suffix": ""
819
- }
820
- ],
821
- "year": 1998,
822
- "venue": "Proceedings of NLDB",
823
- "volume": "",
824
- "issue": "",
825
- "pages": "",
826
- "other_ids": {},
827
- "num": null,
828
- "urls": [],
829
- "raw_text": "Roy Byrd and Yael Ravin. 1998. Identifying and extracting relations in text. In Proceed- ings of NLDB, Klagenfurt, Austria.",
830
- "links": null
831
- },
832
- "BIBREF2": {
833
- "ref_id": "b2",
834
- "title": "Finding answers in large collections of texts : Paragraph indexing + abductive inference",
835
- "authors": [
836
- {
837
- "first": "Sanda",
838
- "middle": [],
839
- "last": "Harabagiu",
840
- "suffix": ""
841
- },
842
- {
843
- "first": "Steven",
844
- "middle": [
845
- "J"
846
- ],
847
- "last": "Maiorano",
848
- "suffix": ""
849
- }
850
- ],
851
- "year": 1999,
852
- "venue": "Proceedings of AAAI Fall Symposium on Question Answering Systems",
853
- "volume": "",
854
- "issue": "",
855
- "pages": "",
856
- "other_ids": {},
857
- "num": null,
858
- "urls": [],
859
- "raw_text": "Sanda Harabagiu and Steven J. Maiorano. 1999. Finding answers in large collections of texts : Paragraph indexing + abductive in- ference. In Proceedings of AAAI Fall Sympo- sium on Question Answering Systems, North Falmouth, Massachusetts.",
860
- "links": null
861
- },
862
- "BIBREF3": {
863
- "ref_id": "b3",
864
- "title": "Answering questions for an organization online",
865
- "authors": [
866
- {
867
- "first": "Vladimir",
868
- "middle": [],
869
- "last": "Kulyukin",
870
- "suffix": ""
871
- },
872
- {
873
- "first": "Kristian",
874
- "middle": [],
875
- "last": "Hammond",
876
- "suffix": ""
877
- },
878
- {
879
- "first": "Robin",
880
- "middle": [],
881
- "last": "Burke",
882
- "suffix": ""
883
- }
884
- ],
885
- "year": 1998,
886
- "venue": "Proceedings of AAAI",
887
- "volume": "",
888
- "issue": "",
889
- "pages": "",
890
- "other_ids": {},
891
- "num": null,
892
- "urls": [],
893
- "raw_text": "Vladimir Kulyukin, Kristian Hammond, and Robin Burke. 1998. Answering questions for an organization online. In Proceedings of AAAI, Madison, Wisconsin.",
894
- "links": null
895
- },
896
- "BIBREF4": {
897
- "ref_id": "b4",
898
- "title": "MURAX: A robust linguistic approach for question answering using an ondine encyclopedia",
899
- "authors": [
900
- {
901
- "first": "Julian",
902
- "middle": [
903
- "M"
904
- ],
905
- "last": "Kupiec",
906
- "suffix": ""
907
- }
908
- ],
909
- "year": 1993,
910
- "venue": "Proceedings, 16th Annual International A CM SIGIR Conference on Research and Development in Information Retrieval",
911
- "volume": "",
912
- "issue": "",
913
- "pages": "",
914
- "other_ids": {},
915
- "num": null,
916
- "urls": [],
917
- "raw_text": "Julian M. Kupiec. 1993. MURAX: A robust linguistic approach for question answering us- ing an ondine encyclopedia. In Proceedings, 16th Annual International A CM SIGIR Con- ference on Research and Development in In- formation Retrieval.",
918
- "links": null
919
- },
920
- "BIBREF5": {
921
- "ref_id": "b5",
922
- "title": "The use of predictive annotation for question answering in TREC8",
923
- "authors": [
924
- {
925
- "first": "John",
926
- "middle": [],
927
- "last": "Prager",
928
- "suffix": ""
929
- },
930
- {
931
- "first": "R",
932
- "middle": [],
933
- "last": "Dragomir",
934
- "suffix": ""
935
- },
936
- {
937
- "first": "Eric",
938
- "middle": [],
939
- "last": "Radev",
940
- "suffix": ""
941
- },
942
- {
943
- "first": "Anni",
944
- "middle": [],
945
- "last": "Brown",
946
- "suffix": ""
947
- },
948
- {
949
- "first": "Valerie",
950
- "middle": [],
951
- "last": "Coden",
952
- "suffix": ""
953
- },
954
- {
955
- "first": "",
956
- "middle": [],
957
- "last": "Samn",
958
- "suffix": ""
959
- }
960
- ],
961
- "year": 1999,
962
- "venue": "Proceedings o/TREC-8",
963
- "volume": "",
964
- "issue": "",
965
- "pages": "",
966
- "other_ids": {},
967
- "num": null,
968
- "urls": [],
969
- "raw_text": "John Prager, Dragomir R. Radev, Eric Brown, Anni Coden, and Valerie Samn. 1999. The use of predictive annotation for question an- swering in TREC8. In Proceedings o/TREC- 8, Gaithersburg, Maryland.",
970
- "links": null
971
- },
972
- "BIBREF6": {
973
- "ref_id": "b6",
974
- "title": "Building a generation knowledge source using internet-accessible newswire",
975
- "authors": [
976
- {
977
- "first": "R",
978
- "middle": [],
979
- "last": "Dragomir",
980
- "suffix": ""
981
- },
982
- {
983
- "first": "Kathleen",
984
- "middle": [
985
- "R"
986
- ],
987
- "last": "Radev",
988
- "suffix": ""
989
- },
990
- {
991
- "first": "",
992
- "middle": [],
993
- "last": "Mckeown",
994
- "suffix": ""
995
- }
996
- ],
997
- "year": 1997,
998
- "venue": "Proceedings of the 5th Conference on Applied Natural Language Processing",
999
- "volume": "",
1000
- "issue": "",
1001
- "pages": "221--228",
1002
- "other_ids": {},
1003
- "num": null,
1004
- "urls": [],
1005
- "raw_text": "Dragomir R. Radev and Kathleen R. McKe- own. 1997. Building a generation knowledge source using internet-accessible newswire. In Proceedings of the 5th Conference on Applied Natural Language Processing, pages 221-228, Washington, DC, April.",
1006
- "links": null
1007
- },
1008
- "BIBREF7": {
1009
- "ref_id": "b7",
1010
- "title": "Query expansion using lexical-semantic relations",
1011
- "authors": [
1012
- {
1013
- "first": "Ellen",
1014
- "middle": [],
1015
- "last": "Voorhees",
1016
- "suffix": ""
1017
- }
1018
- ],
1019
- "year": 1994,
1020
- "venue": "Proceedings of A CM SIGIR",
1021
- "volume": "",
1022
- "issue": "",
1023
- "pages": "",
1024
- "other_ids": {},
1025
- "num": null,
1026
- "urls": [],
1027
- "raw_text": "Ellen Voorhees. 1994. Query expansion using lexical-semantic relations. In Proceedings of A CM SIGIR, Dublin, Ireland.",
1028
- "links": null
1029
- },
1030
- "BIBREF8": {
1031
- "ref_id": "b8",
1032
- "title": "Disambiguation of proper names in text",
1033
- "authors": [
1034
- {
1035
- "first": "Nina",
1036
- "middle": [],
1037
- "last": "Wacholder",
1038
- "suffix": ""
1039
- },
1040
- {
1041
- "first": "Yael",
1042
- "middle": [],
1043
- "last": "Ravin",
1044
- "suffix": ""
1045
- },
1046
- {
1047
- "first": "Misook",
1048
- "middle": [],
1049
- "last": "Choi",
1050
- "suffix": ""
1051
- }
1052
- ],
1053
- "year": 1997,
1054
- "venue": "Proceedings of the Fifth Applied Natural Language Processing Conference",
1055
- "volume": "",
1056
- "issue": "",
1057
- "pages": "",
1058
- "other_ids": {},
1059
- "num": null,
1060
- "urls": [],
1061
- "raw_text": "Nina Wacholder, Yael Ravin, and Misook Choi. 1997. Disambiguation of proper names in text. In Proceedings of the Fifth Applied Nat- ural Language Processing Conference, Wash- ington, D.C. Association for Computational Linguistics.",
1062
- "links": null
1063
- }
1064
- },
1065
- "ref_entries": {
1066
- "FIGREF0": {
1067
- "num": null,
1068
- "uris": null,
1069
- "text": "Figure 1: System Architecture.",
1070
- "type_str": "figure"
1071
- },
1072
- "FIGREF1": {
1073
- "num": null,
1074
- "uris": null,
1075
- "text": "Sample QA-Tokens.",
1076
- "type_str": "figure"
1077
- },
1078
- "FIGREF2": {
1079
- "num": null,
1080
- "uris": null,
1081
- "text": "Input sent from GuruQA to AnSel/Werlect.",
1082
- "type_str": "figure"
1083
- },
1084
- "FIGREF3": {
1085
- "num": null,
1086
- "uris": null,
1087
- "text": "Sample questions from TR38.",
1088
- "type_str": "figure"
1089
- },
1090
- "FIGREF4": {
1091
- "num": null,
1092
- "uris": null,
1093
- "text": "Sample questions from T200. Some examples of problematic questions are shown in",
1094
- "type_str": "figure"
1095
- },
1096
- "FIGREF6": {
1097
- "num": null,
1098
- "uris": null,
1099
- "text": "Training algorithm used by AnSel. Training discovered the following weights: Wnurnbe r -~ --0.3; Wrspann o -~ --0.5; Wcount : 3.0; Wnotinq = 2.0; Wtypes = 15.0; Wavgdst -----1.0; W~score = 1.5 At runtime, the weights are used to rank potential answers. Each span is assigned a TO-TAL score and the top 5 distinct extracts of 50 (or 250) bytes centered around the span are output. The 50-byte extracts for question 10001 are shown in",
1100
- "type_str": "figure"
1101
- },
1102
- "FIGREF7": {
1103
- "num": null,
1104
- "uris": null,
1105
- "text": "Figure 8: Fifty-byte extracts.",
1106
- "type_str": "figure"
1107
- },
1108
- "FIGREF8": {
1109
- "num": null,
1110
- "uris": null,
1111
- "text": "Algorithm used by Werlect.",
1112
- "type_str": "figure"
1113
- },
1114
- "TABREF0": {
1115
- "num": null,
1116
- "html": null,
1117
- "type_str": "table",
1118
- "text": "",
1119
- "content": "<table><tr><td>PLACES</td><td>Where</td><td>In the Rocky Mountains</td></tr><tr><td>COUNTRY$</td><td>Where/What country</td><td>United Kingdom</td></tr><tr><td>STATES</td><td>Where/What state</td><td>Massachusetts</td></tr><tr><td>PERSONS</td><td>Who</td><td>Albert Einstein</td></tr><tr><td>ROLES</td><td>Who</td><td>Doctor</td></tr><tr><td>NAMES</td><td>Who/What/Which</td><td>The Shakespeare Festival</td></tr><tr><td>ORG$</td><td>Who/What</td><td>The US Post Office</td></tr><tr><td>DURATIONS</td><td>How long</td><td>For 5 centuries</td></tr><tr><td>AGES</td><td>How old</td><td>30 years old</td></tr><tr><td>YEARS</td><td>When/What year</td><td>1999</td></tr><tr><td>TIMES</td><td>When</td><td>In the afternoon</td></tr><tr><td>DATES</td><td>When/What date</td><td>July 4th, 1776</td></tr><tr><td>VOLUMES</td><td>How big</td><td>3 gallons</td></tr><tr><td>AREAS</td><td>How big</td><td>4 square inches</td></tr><tr><td>LENGTHS</td><td>How big/long/high</td><td>3 miles</td></tr><tr><td>WEIGHTS</td><td>How big/heavy</td><td>25 tons</td></tr><tr><td>NUMBERS</td><td>How many</td><td>1,234.5</td></tr><tr><td>METHODS</td><td>How</td><td>By rubbing</td></tr><tr><td>RATES</td><td>How much</td><td>50 per cent</td></tr><tr><td>MONEYS</td><td>How much</td><td>4 million dollars</td></tr></table>"
1120
- },
1121
- "TABREF2": {
1122
- "num": null,
1123
- "html": null,
1124
- "type_str": "table",
1125
- "text": "Ranked potential answers to Quest. 1.",
1126
- "content": "<table/>"
1127
- },
1128
- "TABREF3": {
1129
- "num": null,
1130
- "html": null,
1131
- "type_str": "table",
1132
- "text": "",
1133
- "content": "<table><tr><td/><td>Type</td><td>Nunlber</td><td>Rspanno</td><td>Count</td><td>Notlnq</td><td>Type</td><td>Avgdst</td><td>Sscore</td></tr><tr><td/><td>PERSON</td><td>3</td><td>3</td><td>6</td><td>2</td><td>I</td><td>12</td><td>0.02507</td></tr><tr><td/><td>PERSON</td><td>1</td><td>1</td><td>6</td><td>2</td><td>1</td><td>16</td><td>0.02507</td></tr><tr><td/><td>PERSON</td><td>17</td><td>1</td><td>4</td><td>2</td><td>I</td><td>8</td><td>0.02257</td></tr><tr><td/><td>PERSON</td><td>23</td><td>6</td><td>4</td><td>4</td><td>1</td><td>II</td><td>0.02257</td></tr><tr><td/><td>PERSON</td><td>22</td><td>5</td><td>4</td><td>I</td><td>I</td><td>9</td><td>0.02257</td></tr><tr><td/><td>PERSON</td><td>13</td><td>I</td><td>2</td><td>5</td><td>1</td><td>16</td><td>0.02505</td></tr><tr><td/><td>PERSON</td><td>25</td><td>2</td><td>4</td><td>I</td><td>I</td><td>15</td><td>0.02256</td></tr><tr><td/><td>PERSON</td><td>33</td><td>4</td><td>4</td><td>2</td><td>l</td><td>14</td><td>0.02256</td></tr><tr><td/><td>PERSON</td><td>3O</td><td>1</td><td>4</td><td>2</td><td>1</td><td>17</td><td>0.02256</td></tr><tr><td/><td>ORG</td><td>18</td><td>2</td><td>4</td><td>1</td><td>2</td><td>6</td><td>0.02257</td></tr><tr><td/><td>PERSON</td><td>37</td><td>6</td><td>4</td><td>1</td><td>1</td><td>14</td><td>0.02256</td></tr><tr><td/><td>PERSON</td><td>38</td><td>7</td><td>4</td><td>2</td><td>1</td><td>17</td><td>0.02256</td></tr><tr><td>O.J. Simpson</td><td>NAME</td><td>2</td><td>2</td><td>6</td><td>2</td><td>3</td><td>12</td><td>0.02507</td></tr><tr><td>South Lake Tahoe</td><td>NAME</td><td>7</td><td>5</td><td>6</td><td>3</td><td>3</td><td>14</td><td>0.02507</td></tr><tr><td>Washington High</td><td>NAME</td><td>10</td><td>6</td><td>6</td><td>1</td><td>3</td><td>18</td><td>0.02507</td></tr><tr><td>Morgan</td><td>NAME</td><td>26</td><td>3</td><td>4</td><td>1</td><td>3</td><td>12</td><td>0.02256</td></tr><tr><td>Tennessee football</td><td>NAME</td><td>31</td><td>2</td><td>4</td><td>1</td><td>3</td><td>15</td><td>0.02256</td></tr><tr><td>Ellington</td><td>NAME</td><td>24</td><td>1</td><td>4</td><td>1</td><td>3</td><td>20</td><td>0.02256</td></tr><tr><td>assistant</td><td>ROLE</td><td>21</td><td>4</td><td>4</td><td>1</td><td>4</td><td>8</td><td>0.02257</td></tr><tr><td>the Volunteers</td><td>ROLE</td><td>34</td><td>5</td><td>4</td><td>2</td><td>4</td><td>14</td><td>0.02256</td></tr><tr><td>Johnny Mathis</td><td>PERSON</td><td>4</td><td>4</td><td>6</td><td>-I00</td><td>I</td><td>II</td><td>0.02507</td></tr><tr><td>Mathis</td><td>NAME</td><td>14</td><td>2</td><td>2</td><td>-100</td><td>3</td><td>I0</td><td>0.02505</td></tr><tr><td>coach</td><td>ROLE</td><td>19</td><td>3</td><td>4</td><td>-100</td><td>4</td><td>4</td><td>0.02257</td></tr></table>"
1134
- },
1135
- "TABREF4": {
1136
- "num": null,
1137
- "html": null,
1138
- "type_str": "table",
1139
- "text": "",
1140
- "content": "<table/>"
1141
- },
1142
- "TABREF6": {
1143
- "num": null,
1144
- "html": null,
1145
- "type_str": "table",
1146
- "text": "Influence of frequency on span rank.",
1147
- "content": "<table><tr><td colspan=\"2\">i. Let candidate_set = all potential</td></tr><tr><td colspan=\"2\">answers, ranked and sorted.</td></tr><tr><td colspan=\"2\">2. For each hit-list passage, extract</td></tr><tr><td colspan=\"2\">ali spans of 50 (or 250) bytes, on</td></tr><tr><td colspan=\"2\">word boundaries.</td></tr><tr><td colspan=\"2\">3. Rank and sort all segments based</td></tr><tr><td colspan=\"2\">on phrase ranks, matching terms,</td></tr><tr><td colspan=\"2\">and sentence ranks.</td></tr><tr><td colspan=\"2\">4. For each candidate in sorted</td></tr><tr><td colspan=\"2\">candidate_set</td></tr><tr><td colspan=\"2\">-Let highest_ranked_span</td></tr><tr><td>=</td><td>highest-ranked span</td></tr><tr><td colspan=\"2\">containing candidate</td></tr><tr><td colspan=\"2\">-Let answer_set[i++] =</td></tr><tr><td colspan=\"2\">highest_rankedspan</td></tr><tr><td colspan=\"2\">-Remove every candidate from</td></tr><tr><td colspan=\"2\">candidate_set that is found in</td></tr><tr><td colspan=\"2\">highest_rankedspan</td></tr><tr><td colspan=\"2\">-Exit if i &gt; 5</td></tr><tr><td colspan=\"2\">5. Output answer_set</td></tr></table>"
1148
- }
1149
- }
1150
- }
1151
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1022.json DELETED
@@ -1,1359 +0,0 @@
1
- {
2
- "paper_id": "A00-1022",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:40.516367Z"
6
- },
7
- "title": "Message Classification in the Call Center",
8
- "authors": [
9
- {
10
- "first": "Stephan",
11
- "middle": [],
12
- "last": "Busemann",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Seen Schmeier~ Roman G. Arens DFKI GmbH",
17
- "location": {
18
- "addrLine": "Stuhlsatzenhausweg 3",
19
- "postCode": "D-66123",
20
- "settlement": "Saarbriicken",
21
- "country": "Germany"
22
- }
23
- },
24
- "email": "[email protected]"
25
- }
26
- ],
27
- "year": "",
28
- "venue": null,
29
- "identifiers": {},
30
- "abstract": "Customer care in technical domains is increasingly based on e-mail communication, allowing for the reproduction of approved solutions. Identifying the customer's problem is often time-consuming, as the problem space changes if new products are launched. This paper describes a new approach to the classification of e-mail requests based on shallow text processing and machine learning techniques. It is implemented within an assistance system for call center agents that is used in a commercial setting.",
31
- "pdf_parse": {
32
- "paper_id": "A00-1022",
33
- "_pdf_hash": "",
34
- "abstract": [
35
- {
36
- "text": "Customer care in technical domains is increasingly based on e-mail communication, allowing for the reproduction of approved solutions. Identifying the customer's problem is often time-consuming, as the problem space changes if new products are launched. This paper describes a new approach to the classification of e-mail requests based on shallow text processing and machine learning techniques. It is implemented within an assistance system for call center agents that is used in a commercial setting.",
37
- "cite_spans": [],
38
- "ref_spans": [],
39
- "eq_spans": [],
40
- "section": "Abstract",
41
- "sec_num": null
42
- }
43
- ],
44
- "body_text": [
45
- {
46
- "text": "Customer care in technical domains is increasingly based on e-mail communication, allowing for the reproduction of approved solutions. For a call center agent, identifying the customer's problem is often time-consuming, as the problem space changes if new products are launched or existing regulations are modified. The typical task of a call center agent processing e-mail requests consists of the following steps:",
47
- "cite_spans": [],
48
- "ref_spans": [],
49
- "eq_spans": [],
50
- "section": "Introduction",
51
- "sec_num": "1"
52
- },
53
- {
54
- "text": "Recognize the problem(s): read and understand the e-mail request;",
55
- "cite_spans": [],
56
- "ref_spans": [],
57
- "eq_spans": [],
58
- "section": "Introduction",
59
- "sec_num": "1"
60
- },
61
- {
62
- "text": "Search a solution: identify and select predefined text blocks;",
63
- "cite_spans": [],
64
- "ref_spans": [],
65
- "eq_spans": [],
66
- "section": "Introduction",
67
- "sec_num": "1"
68
- },
69
- {
70
- "text": "Provide the solution: if necessary, customize text blocks to meet the current request, and send the text.",
71
- "cite_spans": [],
72
- "ref_spans": [],
73
- "eq_spans": [],
74
- "section": "Introduction",
75
- "sec_num": "1"
76
- },
77
- {
78
- "text": "This task can partly be automated by a system suggesting relevant solutions for an incoming e-mail. This would cover the first two steps. The last step can be delicate, as its primary goal is to keep the customer satisfied. Thus human intervention seems mandatory to allow for individual, customized answers. Such a system will \u2022 reduce the training effort required since agents don't have to know every possible solution for every possible problem;",
79
- "cite_spans": [],
80
- "ref_spans": [],
81
- "eq_spans": [],
82
- "section": "Introduction",
83
- "sec_num": "1"
84
- },
85
- {
86
- "text": "\u2022 increase the agents' performance since agents can more quickly select a solution among several offered than searching one;",
87
- "cite_spans": [],
88
- "ref_spans": [],
89
- "eq_spans": [],
90
- "section": "Introduction",
91
- "sec_num": "1"
92
- },
93
- {
94
- "text": "\u2022 improve the quality of responses since agents will behave more homogeneously -both as a group and over time -and commit fewer errors.",
95
- "cite_spans": [],
96
- "ref_spans": [],
97
- "eq_spans": [],
98
- "section": "Introduction",
99
- "sec_num": "1"
100
- },
101
- {
102
- "text": "Given that free text about arbitrary topics must be processed, in-depth approaches to language understanding are not feasible. Given further that the topics may change over time, a top-down approach to knowledge modeling is out of the question. Rather a combination of shallow text processing (STP) with statistics-based machine learning techniques (SML) is called for. STP gathers partial information about text such as part of speech, word stems, negations, or sentence type. These types of information can be used to identify the linguistic properties of a large training set of categorized e-mails. SML techniques are used to build a classifier that is used for new, incoming messages. Obviously, the change of topics can be accommodated by adding new categories and e-mails and producing a new classifier on the basis of old and new data. We call this replacement of a classifier \"relearning\". This paper describes a new approach to the classification of e-mail requests along these lines. It is implemented within the ICe-MAIL system, which is an assistance system for call center agents that is currently used in a commercial setting. Section 2 describes important properties of the input data, i.e. the e-mail texts on the one hand, and the categories on the other. These properties influenced the system architecture, which is presented in Section 3. Various publicly available SML systems have been tested with different methods of STP-based preprocessing. Section 4 describes the results. The implementation and usage of the system including the graphical user interface is presented in Section 5. We conclude by giving an outlook to further expected improvements (Section 6).",
103
- "cite_spans": [],
104
- "ref_spans": [],
105
- "eq_spans": [],
106
- "section": "Introduction",
107
- "sec_num": "1"
108
- },
109
- {
110
- "text": "A closer look at the data the ICe-MAIL system is processing will clarify the task further. We carried out experiments with unmodified e-mail data accumulated over a period of three months in the call center database. The total amount was 4777 e-mails.",
111
- "cite_spans": [],
112
- "ref_spans": [],
113
- "eq_spans": [],
114
- "section": "Data Characteristics",
115
- "sec_num": "2"
116
- },
117
- {
118
- "text": "We used 47 categories, which contained at least 30 documents. This minimum amount of documents turned out to render the category sufficiently distinguishable for the SML tools. The database contained 74 categories with at least 10 documents, but the selected ones covered 94% of all e-malls, i.e. 4490 documents.",
119
- "cite_spans": [],
120
- "ref_spans": [],
121
- "eq_spans": [],
122
- "section": "Data Characteristics",
123
- "sec_num": "2"
124
- },
125
- {
126
- "text": "It has not yet generally been investigated how the type of data influences the learning result (Yang, 1999) , or under which circumstances which kind of preprocessing and which learning algorithm is most appropriate. Several aspects must be considered: Length of the documents, morphological and syntactic well-formedness, the degree to which a document can be uniquely classified, and, of course, the language of the documents.",
127
- "cite_spans": [
128
- {
129
- "start": 95,
130
- "end": 107,
131
- "text": "(Yang, 1999)",
132
- "ref_id": "BIBREF18"
133
- }
134
- ],
135
- "ref_spans": [],
136
- "eq_spans": [],
137
- "section": "Data Characteristics",
138
- "sec_num": "2"
139
- },
140
- {
141
- "text": "In our application domain the documents differ very much from documents generally used in benchmark tests, for example the Reuters corpus 1. First of all, we have to deal with German, whereas the Reuters data are in English. The average length of our e-mails is 60 words, whereas for documents of Reuters-21578 it is 129 words. The number of categories we used compares to the top 47 categories of the Reuters TOPICS category set. While we have 5008 documents, TOPICS consists of 13321 instances 2. The Reuters documents usually are morphologically and syntactically well-formed. As emails are a more spontaneously created and informal type of document, they require us to cope with a large amount of jargon, misspellings and grammatical inaccuracy. A drastic example is shown in Figure 2 . The bad conformance to linguistic standards was a major argument in favor of STP instead of in-depth syntactic and semantic analysis.",
142
- "cite_spans": [],
143
- "ref_spans": [
144
- {
145
- "start": 780,
146
- "end": 788,
147
- "text": "Figure 2",
148
- "ref_id": null
149
- }
150
- ],
151
- "eq_spans": [],
152
- "section": "Data Characteristics",
153
- "sec_num": "2"
154
- },
155
- {
156
- "text": "The degree to which a document can be uniquely classified is hard to verify and can only be inferred from the results in general terms. 3 It is, however, dependent on the ability to uniquely distinguish the classes. In our application we encounter overlapping and non-exhaustive categories as the category system develops over time.",
157
- "cite_spans": [],
158
- "ref_spans": [],
159
- "eq_spans": [],
160
- "section": "Data Characteristics",
161
- "sec_num": "2"
162
- },
163
- {
164
- "text": "With Machine Learning STP and SML correspond to two different paradigms. STP tools used for classification tasks promise very high recall/precision or accuracy values. Usually human experts define one or several template structures to be filled automatically by extracting information from the documents (cf. e.g. (Ciravegna et al., 1999) ). Afterwards, the partially lhttp ://~wv. research, a~t. com/'le~is/reuters21578. html 2We took only uniquely classified documents into account. 3Documents containing multiple requests can at present only be treated manually, as described in Section 5. filled templates are classified by hand-made rules. The whole process brings about high costs in analyzing and modeling the application domain, especially if it is to take into account the problem of changing categories in the present application.",
165
- "cite_spans": [
166
- {
167
- "start": 314,
168
- "end": 338,
169
- "text": "(Ciravegna et al., 1999)",
170
- "ref_id": "BIBREF1"
171
- }
172
- ],
173
- "ref_spans": [],
174
- "eq_spans": [],
175
- "section": "Integrating Language Technology",
176
- "sec_num": "3"
177
- },
178
- {
179
- "text": "SML promises low costs both in analyzing and modeling the application at the expense of a lower accuracy. It is independent of the domain on the one hand, but does not consider any domain specific knowledge on the other.",
180
- "cite_spans": [],
181
- "ref_spans": [],
182
- "eq_spans": [],
183
- "section": "Integrating Language Technology",
184
- "sec_num": "3"
185
- },
186
- {
187
- "text": "By combining both methodologies in ICe-MAIL, we achieve high accuracy and can still preserve a useful degree of domain-independence. STP may use both general linguistic knowledge and linguistic algorithms or heuristics adapted to the application in order to extract information from texts that is relevant for classification. The input to the SML tool is enriched with that information. The tool builds one or several categorizers 4 that will classify new texts.",
188
- "cite_spans": [],
189
- "ref_spans": [],
190
- "eq_spans": [],
191
- "section": "Integrating Language Technology",
192
- "sec_num": "3"
193
- },
194
- {
195
- "text": "In general, SML tools work with a vector representation of data. First, a relevancy vector of relevant features for each class is computed (Yang and Pedersen, 1997) . In our case the relevant features consist of the user-defined output of the linguistic preprocessor. Then each single document is translated into a vector of numbers isomorphic to the defining vector. Each entry represents the occurrence of the corresponding feature. More details will be given in Section 4",
196
- "cite_spans": [
197
- {
198
- "start": 139,
199
- "end": 164,
200
- "text": "(Yang and Pedersen, 1997)",
201
- "ref_id": "BIBREF17"
202
- }
203
- ],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "Integrating Language Technology",
207
- "sec_num": "3"
208
- },
209
- {
210
- "text": "The ICe-MAIL architecture is shown in Figure 1 . The workflow of the system consists of a learning step carried out off-line (the light gray box) and an online categorization step (the dark gray box). In the off-line part, categorizers are built by processing classified data first by an STP and then by an SML tool. In this way, categorizers can be replaced by the system administrator as she wants to include new or remove expired categories. The categorizers are used on-line in order to classify new documents after they have passed the linguistic preprocessing. The resulting category is in our application associated with a standard text that the call center agent uses in her answer. The on-line step provides new classified data that is stored in a dedicated ICe-MAIL database (not shown in Figure 1 ). The relearning step is based on data from this database.",
211
- "cite_spans": [],
212
- "ref_spans": [
213
- {
214
- "start": 38,
215
- "end": 46,
216
- "text": "Figure 1",
217
- "ref_id": "FIGREF0"
218
- },
219
- {
220
- "start": 799,
221
- "end": 807,
222
- "text": "Figure 1",
223
- "ref_id": "FIGREF0"
224
- }
225
- ],
226
- "eq_spans": [],
227
- "section": "Integrating Language Technology",
228
- "sec_num": "3"
229
- },
230
- {
231
- "text": "Linguistic preprocessing of text documents is carried out by re-using sines, an information extraction core system for real-world German text processing (Neumann et al., 1997) . The fundamental design criterion of sines is to provide a set of basic, powerful, robust, and efficient STP components and 4Almost all tools we examined build a single multicategorizer except for SVM-Light, which builds multiple binary classifiers. generic linguistic knowledge sources that can easily be customized to deal with different tasks in a flexible manner, sines includes a text tokenizer, a lexical processor and a chunk parser. The chunk parser itself is subdivided into three components. In the first step, phrasal fragments like general nominal expressions and verb groups are recognized. Next, the dependency-based structure of the fragments of each sentence is computed using a set of specific sentence patterns. Third, the grammatical functions are determined for each dependency-based structure on the basis of a large subcategorization lexicon. The present application benefits from the high modularity of the usage of the components. Thus, it is possible to run only a subset of the components and to tailor their output. The experiments described in Section 4 make use of this feature.",
232
- "cite_spans": [
233
- {
234
- "start": 153,
235
- "end": 175,
236
- "text": "(Neumann et al., 1997)",
237
- "ref_id": "BIBREF9"
238
- }
239
- ],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "Shallow Text Processing",
243
- "sec_num": "3.1"
244
- },
245
- {
246
- "text": "Several SML tools representing different learning paradigms have been selected and evaluated in different settings of our domain:",
247
- "cite_spans": [],
248
- "ref_spans": [],
249
- "eq_spans": [],
250
- "section": "Statistics-Based Machine Learning",
251
- "sec_num": "3.2"
252
- },
253
- {
254
- "text": "Lazy Learning: Lazy Learners are also known as memory-based, instance-based, exemplarbased, case-based, experience-based, or knearest neighbor algorithms. They store all documents as vectors during the learning phase. In the categorization phase, the new document vector is compared to the stored ones and is categorized to same class as the k-nearest neighbors. The distance is measured by computing e.g. the Euclidean distance between the vectors. By changing the number of neighbors k or the kind of distance measure, the amount of generalization can be controlled. We used IB (Aha, 1992), which is part of the MLC++ library (Kohavi and Sommerfield, 1996) .",
255
- "cite_spans": [
256
- {
257
- "start": 628,
258
- "end": 658,
259
- "text": "(Kohavi and Sommerfield, 1996)",
260
- "ref_id": "BIBREF7"
261
- }
262
- ],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "Statistics-Based Machine Learning",
266
- "sec_num": "3.2"
267
- },
268
- {
269
- "text": "This type of learners constructs a representation for document vectors belonging to a certain class during the learning phase, e.g. decision trees, decision rules or probability weightings. During the categorization phase, the representation is used to assign the appropriate class to a new document vector. Several pruning or specialization heuristics can be used to control the amount of generalization. We used ID3 (Quinlan, 1986) , C4.5 (Quinlan, 1992) and C5.0, RIPPER (Cohen, 1995) , and the Naive Bayes inducer (Good, 1965) contained in the MLCq-q-library. ID3, C4.5 and C5.0 produce decision trees, RIPPER isa rulebased learner and the Naive Bayes algorithm computes conditional probabilities of the classes from the instances. Support Vector Machines (SVMs): SVMs are described in (Vapnik, 1995) . SVMs are binary learners in that they distinguish positive and negative examples for each class. Like eager learners, they construct a representation during the learning phase, namely a hyper plane supported by vectors of positive and negative examples. For each class, a categorizer is built by computing such a hyper plane. During the categorization phase, each categorizer is applied to the new document vector, yielding the probabilities of the document belonging to a class. The probability increases with the distance of thevector from the hyper plane. A document is said to belong to the class with the highest probability. We chose SVM_Light (Joachims, 1998) .",
270
- "cite_spans": [
271
- {
272
- "start": 418,
273
- "end": 433,
274
- "text": "(Quinlan, 1986)",
275
- "ref_id": "BIBREF11"
276
- },
277
- {
278
- "start": 441,
279
- "end": 456,
280
- "text": "(Quinlan, 1992)",
281
- "ref_id": "BIBREF12"
282
- },
283
- {
284
- "start": 474,
285
- "end": 487,
286
- "text": "(Cohen, 1995)",
287
- "ref_id": "BIBREF2"
288
- },
289
- {
290
- "start": 518,
291
- "end": 530,
292
- "text": "(Good, 1965)",
293
- "ref_id": "BIBREF4"
294
- },
295
- {
296
- "start": 790,
297
- "end": 804,
298
- "text": "(Vapnik, 1995)",
299
- "ref_id": "BIBREF14"
300
- },
301
- {
302
- "start": 1457,
303
- "end": 1473,
304
- "text": "(Joachims, 1998)",
305
- "ref_id": "BIBREF6"
306
- }
307
- ],
308
- "ref_spans": [],
309
- "eq_spans": [],
310
- "section": "Symbolic Eager Learning:",
311
- "sec_num": null
312
- },
313
- {
314
- "text": "Neural Networks: Neural Networks are a special kind of \"non-symbolic\" eager learning algo-rithm. The neural network links the vector elements to the document categories The learning phase defines thresholds for the activation of neurons. In the categorization phase, a new document vector leads to the activation of a single category. For details we refer to (Wiener et al., 1995) . In our application, we tried out the Learning Vector Quantization (LVQ) (Kohonen et al., 1996) . LVQ has been used in its default configuration only. No adaptation to the application domain has been made.",
315
- "cite_spans": [
316
- {
317
- "start": 359,
318
- "end": 380,
319
- "text": "(Wiener et al., 1995)",
320
- "ref_id": "BIBREF15"
321
- },
322
- {
323
- "start": 455,
324
- "end": 477,
325
- "text": "(Kohonen et al., 1996)",
326
- "ref_id": "BIBREF8"
327
- }
328
- ],
329
- "ref_spans": [],
330
- "eq_spans": [],
331
- "section": "Symbolic Eager Learning:",
332
- "sec_num": null
333
- },
334
- {
335
- "text": "We describe the experiments and results we achieved with different linguistic preprocessing and learning algorithms and provide some interpretations. We start out from the corpus of categorized emails described in Section 2. In order to normalize the vectors representing the preprocessing results of texts of different length, and to concentrate on relevant material (cf. (Yang and Pedersen, 1997) ), we define the relevancy vector as follows. First, all documents are preprocessed, yielding a list of results for each category. From each of these lists, the 100 most frequent results -according to a TF/IDF measure -are selected. The relevancy vector consists of all selected results, where doubles are eliminated. Its length was about 2500 for the 47 categories; it slightly varied with the kind of preprocessing used.",
336
- "cite_spans": [
337
- {
338
- "start": 373,
339
- "end": 398,
340
- "text": "(Yang and Pedersen, 1997)",
341
- "ref_id": "BIBREF17"
342
- }
343
- ],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "Experiments and Results",
347
- "sec_num": "4"
348
- },
349
- {
350
- "text": "During the learning phase, each document is preprocessed. The result is mapped onto a vector of the same length as the relevancy vector. For every position in the relevancy vector, it is determined whether the corresponding result has been found. In that case, the value of the result vector element is 1, otherwise it is 0.",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "Experiments and Results",
355
- "sec_num": "4"
356
- },
357
- {
358
- "text": "In the categorization phase, the new document is preprocessed, and a result vector is built as described above and handed over to the categorizer (cf . Figure 1) .",
359
- "cite_spans": [],
360
- "ref_spans": [
361
- {
362
- "start": 150,
363
- "end": 161,
364
- "text": ". Figure 1)",
365
- "ref_id": "FIGREF0"
366
- }
367
- ],
368
- "eq_spans": [],
369
- "section": "Experiments and Results",
370
- "sec_num": "4"
371
- },
372
- {
373
- "text": "While we tried various kinds of linguistic preprocessing, systematic experiments have been carried out with morphological analysis (MorphAna), shallow parsing heuristics (STP-Heuristics), and a combination of both (Combined).",
374
- "cite_spans": [],
375
- "ref_spans": [],
376
- "eq_spans": [],
377
- "section": "Experiments and Results",
378
- "sec_num": "4"
379
- },
380
- {
381
- "text": "MorphAna: Morphological Analysis provided by sines yields the word stems of nouns, verbs and adjectives, as well as the full forms of unknown words. We are using a lexicon of approx. 100000 word stems of German (Neumann et al., 1997) .",
382
- "cite_spans": [
383
- {
384
- "start": 211,
385
- "end": 233,
386
- "text": "(Neumann et al., 1997)",
387
- "ref_id": "BIBREF9"
388
- }
389
- ],
390
- "ref_spans": [],
391
- "eq_spans": [],
392
- "section": "Experiments and Results",
393
- "sec_num": "4"
394
- },
395
- {
396
- "text": "STP-Heuristics: Shallow parsing techniques are used to heuristically identify sentences containing relevant information. The e-mails usually contain questions and/or descriptions of problems. The manual analysis of a sample of the data suggested some linguistic constructions frequently used to express the problem. We expected that content words in these constructions should be particularly influential to the categorization. Words in these constructions are extracted and processed as in MorphAna, and all other words are ignored. 5 The heuristics were implemented in ICC-MAIL using sines. Table 1 , detailed information about the accuracy achieved is presented. All experiments were carried out using 10-fold cross-validation on the data described in Section 2.",
397
- "cite_spans": [],
398
- "ref_spans": [
399
- {
400
- "start": 593,
401
- "end": 600,
402
- "text": "Table 1",
403
- "ref_id": null
404
- }
405
- ],
406
- "eq_spans": [],
407
- "section": "Experiments and Results",
408
- "sec_num": "4"
409
- },
410
- {
411
- "text": "In all experiments the SVM_Light system outperformed other learning algorithms, which confirms Yang's (Yang and Liu, 1999) results for SVMs fed with Reuters data. The k-nearest neighbor algorithm IB performed surprisingly badly although different values ofk were used. For IB, ID3, C4.5, C5.0, Naive Bayes, RIPPER and SVM_Light, linguistic preprocessing increased the overall performance. In fact, the method performing best, SVM_Light, gained 3.5% by including the task-oriented heuristics. However, the boosted RIPPER and LVQ scored a decreased accuracy value there. For LVQ the decrease may be due to the fact that no adaptations to 5If no results were found this way, MorphAna was applied instead.",
412
- "cite_spans": [
413
- {
414
- "start": 102,
415
- "end": 122,
416
- "text": "(Yang and Liu, 1999)",
417
- "ref_id": "BIBREF16"
418
- }
419
- ],
420
- "ref_spans": [],
421
- "eq_spans": [],
422
- "section": "Experiments and Results",
423
- "sec_num": "4"
424
- },
425
- {
426
- "text": "6We certainly would have benefited from lexical semantic information, e.g. The correct date is missing would not be captured by our approach. honen et al., 1996) ). Neural networks are rather sensitive to misconfigurations. The boosting for RIP-PER seems to run into problems of overfitting. We noted that in six trials the accuracy could be improved in Combined compared to MorphAna, but in four trials, boosting led to deterioration. This effect is also mentioned in (Quinlan, 1996) . These figures are slightly lower than the ones reported by (Neumann and Schmeier, 1999 ) that were obtained from a different data set. Moreover, these data did not contain multiple queries in one e-mall.",
427
- "cite_spans": [
428
- {
429
- "start": 142,
430
- "end": 161,
431
- "text": "honen et al., 1996)",
432
- "ref_id": null
433
- },
434
- {
435
- "start": 469,
436
- "end": 484,
437
- "text": "(Quinlan, 1996)",
438
- "ref_id": "BIBREF13"
439
- },
440
- {
441
- "start": 546,
442
- "end": 573,
443
- "text": "(Neumann and Schmeier, 1999",
444
- "ref_id": "BIBREF10"
445
- }
446
- ],
447
- "ref_spans": [],
448
- "eq_spans": [],
449
- "section": "Experiments and Results",
450
- "sec_num": "4"
451
- },
452
- {
453
- "text": "It would be desirable to provide explanations for the behavior of the SML algorithms on our data. As we have emphasized in Section 2, general methods of explanation do not exist yet. In the application in hand, we found it difficult to account for the effects of e.g. ungrammatical text or redundant categories. For the time being, we can only offer some speculative and inconclusive assumptions: Some of the tools performing badly -IB, ID3, and the Naive Bayes inducer of the MLC++ library -have no or little pruning ability. With rarely occurring data, this leads to very low generalization rates, which again is a problem of overfitting. This suggests that a more canonical representation for the many ways of expressing a technical problem should be sought for. Would more extensive linguistic preprocessing help?",
454
- "cite_spans": [],
455
- "ref_spans": [],
456
- "eq_spans": [],
457
- "section": "Experiments and Results",
458
- "sec_num": "4"
459
- },
460
- {
461
- "text": "Other tests not reported in Table 1 looked at improvements through more general and sophisticated STP such as chunk parsing. The results were very discouraging, leading to a significant decrease compared to MorphAna. We explain this with the bad compliance of e-mall texts to grammatical standards (cf. the example in Figure 2) .",
462
- "cite_spans": [],
463
- "ref_spans": [
464
- {
465
- "start": 28,
466
- "end": 35,
467
- "text": "Table 1",
468
- "ref_id": null
469
- },
470
- {
471
- "start": 318,
472
- "end": 327,
473
- "text": "Figure 2)",
474
- "ref_id": null
475
- }
476
- ],
477
- "eq_spans": [],
478
- "section": "Experiments and Results",
479
- "sec_num": "4"
480
- },
481
- {
482
- "text": "However, the practical usefulness of chunk parsing or even deeper language understanding such as se-mantic analysis may be questioned in general: In a moving domain, the coverage of linguistic knowledge will always be incomplete, as it would be too expensive for a call center to have language technology experts keep pace with the occurrence of new to~ ics. Thus the preprocessing results will often differ for e-mails expressing the same problem and hence not be useful for SML.",
483
- "cite_spans": [],
484
- "ref_spans": [],
485
- "eq_spans": [],
486
- "section": "Experiments and Results",
487
- "sec_num": "4"
488
- },
489
- {
490
- "text": "As a result of the tests in our application domain, we identified a favorite statistical tool and found that task-specific linguistic preprocessing is encouraging, while general STP is not.",
491
- "cite_spans": [],
492
- "ref_spans": [],
493
- "eq_spans": [],
494
- "section": "Experiments and Results",
495
- "sec_num": "4"
496
- },
497
- {
498
- "text": "Implementation and Use In this section we describe the integration of the ICC-MAIL system into the workflow of the call center of AOL Bertelsmann Online GmbH & Co. KG, which answers requests about the German version of AOL software. A client/server solution was built that allows the call center agents to connect as clients to the ICe-MAIL server, which implements the system described in Section 3. For this purpose, it was necessary to",
499
- "cite_spans": [],
500
- "ref_spans": [],
501
- "eq_spans": [],
502
- "section": "5",
503
- "sec_num": null
504
- },
505
- {
506
- "text": "\u2022 connect the server module to AOL's own Sybase database that delivers the incoming mail and dispatches the outgoing answers, and to Ice-MAIL'S own database that stores the classified e-mall texts;",
507
- "cite_spans": [],
508
- "ref_spans": [],
509
- "eq_spans": [],
510
- "section": "5",
511
- "sec_num": null
512
- },
513
- {
514
- "text": "\u2022 design the GUI of the client module in a selfexplanatory and easy to use way (cf. Figure 2 ).",
515
- "cite_spans": [],
516
- "ref_spans": [
517
- {
518
- "start": 84,
519
- "end": 92,
520
- "text": "Figure 2",
521
- "ref_id": null
522
- }
523
- ],
524
- "eq_spans": [],
525
- "section": "5",
526
- "sec_num": null
527
- },
528
- {
529
- "text": "The agent reads in an e-mall and starts ICe-MAIL using GUI buttons. She verifies the correctness of the suggested answer, displaying and perhaps selecting alternative solutions. If the agent finds the appropriate answer within these proposals, the associated text is filled in at the correct position of the answer e-mall. If, on the other hand, no proposed solution is found to be adequate, the ICe-MAIL tool can still be used to manually select any text block Before deinstalling the AOL-Soltware please check your folders for -downloaded data -saved passwords and copy them into a backup folder. Then remove the AOL-Software using the Windows Control Panel and reinstall it from your CD. Alter reinstallation please copy the data from the bac~p folder into the dght destinations. Figure 2 : The GUI of the ICe-MAIL Client. All labels and texts were translated by the authors. The English input is based on the following original text, which is similarly awkward though understandable: Wie mache ich zurn mein Programm total deinstalieren, und wieder neu instalierem, mit, wen Sic mir senden Version 4.0 ?????????????? . The suggested answer text is associated with the category named \"Delete & Reinstall AOL 4.0\". Four alternative answers can be selected using the tabs. The left-hand side window displays the active category in context. from the database. The ICe-MAIL client had to provide the functionality of the tool already in use since an additional tool was not acceptable to the agents, who are working under time pressure.",
530
- "cite_spans": [
531
- {
532
- "start": 1012,
533
- "end": 1041,
534
- "text": "Programm total deinstalieren,",
535
- "ref_id": null
536
- },
537
- {
538
- "start": 1042,
539
- "end": 1069,
540
- "text": "und wieder neu instalierem,",
541
- "ref_id": null
542
- },
543
- {
544
- "start": 1070,
545
- "end": 1074,
546
- "text": "mit,",
547
- "ref_id": null
548
- },
549
- {
550
- "start": 1075,
551
- "end": 1120,
552
- "text": "wen Sic mir senden Version 4.0 ??????????????",
553
- "ref_id": null
554
- }
555
- ],
556
- "ref_spans": [
557
- {
558
- "start": 783,
559
- "end": 791,
560
- "text": "Figure 2",
561
- "ref_id": null
562
- }
563
- ],
564
- "eq_spans": [],
565
- "section": "5",
566
- "sec_num": null
567
- },
568
- {
569
- "text": "In the answer e-mail window, the original e-mail is automatically added as a quote. If an e-mail contains several questions, the classification process can be repeated by marking each question and iteratively applying the process to the marked part. The agent can edit the suggested texts before sending them off. In each case, the classified text together with the selected category is stored in the ICe-MAIL database for use in future learning steps.",
570
- "cite_spans": [],
571
- "ref_spans": [],
572
- "eq_spans": [],
573
- "section": "5",
574
- "sec_num": null
575
- },
576
- {
577
- "text": "Other features of the ICe-MAIL client module include a spell checker and a history view. The latter displays not only the previous e-mails of the same author but also the solutions that have been proposed and the elapsed time before an answer was sent.",
578
- "cite_spans": [],
579
- "ref_spans": [],
580
- "eq_spans": [],
581
- "section": "5",
582
- "sec_num": null
583
- },
584
- {
585
- "text": "The assumed average time for an agent to answer an e-mail is a bit more than two minutes with AOL's own mail processing system. ~ With the ICC-MAIL system the complete cycle of fetching the mail, checking the proposed solutions, choosing the appropriate solutions, inserting additional text fragments and sending the answer back can probably be achieved in half the time. Systematic tests sup-~This system does not include automatic analysis of mails. porting this claim are not completed yet, s but the following preliminary results are encouraging:",
586
- "cite_spans": [],
587
- "ref_spans": [],
588
- "eq_spans": [],
589
- "section": "5",
590
- "sec_num": null
591
- },
592
- {
593
- "text": "\u2022 A test under real-time conditions at the callcenter envisaged the use of the ICe-MAIL system as a mail tool only, i.e. without taking advantage of the system's intelligence. It showed that the surface and the look-and-feel is accepted and the functionality corresponds to the real-time needs of the call center agents, as users were slightly faster than within their usual environment.",
594
- "cite_spans": [],
595
- "ref_spans": [],
596
- "eq_spans": [],
597
- "section": "5",
598
- "sec_num": null
599
- },
600
- {
601
- "text": "\u2022 A preliminary test of the throughput achieved by using the STP and SML technology in Ice-MAIL showed that experienced users take about 50-70 seconds on average for one cycle, as described above. This figure was gained through experiments with three users over a duration of about one hour each.",
602
- "cite_spans": [],
603
- "ref_spans": [],
604
- "eq_spans": [],
605
- "section": "5",
606
- "sec_num": null
607
- },
608
- {
609
- "text": "Using the system with a constant set of categories will improve its accuracy after repeating the off-line learning step. If a new category is introduced, the accuracy will slightly decline until 30 documents are manually classified and the category is automatically included into a new classifier. Relearning may take place at regular intervals. The definition of new categories must be fed into ICe-MAIL by a \"knowledge 8As of end of February 2000.",
610
- "cite_spans": [],
611
- "ref_spans": [],
612
- "eq_spans": [],
613
- "section": "5",
614
- "sec_num": null
615
- },
616
- {
617
- "text": "engineer\", who maintains the system. The effects of new categories and new data have not been tested yet.",
618
- "cite_spans": [],
619
- "ref_spans": [],
620
- "eq_spans": [],
621
- "section": "5",
622
- "sec_num": null
623
- },
624
- {
625
- "text": "The optimum performance of ICe-MAIL can be achieved only with a well-maintained category system. For a call center, this may be a difficult task to achieve, espescially under severe time pressure, but it will pay off. In particular, all new categories should be added, outdated ones should be removed, and redundant ones merged. Agents should only use these categories and no others. The organizational structure of the team should reflect this by defining the tasks of the \"knowledge engineer\" and her interactions with the agents.",
626
- "cite_spans": [],
627
- "ref_spans": [],
628
- "eq_spans": [],
629
- "section": "5",
630
- "sec_num": null
631
- },
632
- {
633
- "text": "Conclusions and Future Work",
634
- "cite_spans": [],
635
- "ref_spans": [],
636
- "eq_spans": [],
637
- "section": "6",
638
- "sec_num": null
639
- },
640
- {
641
- "text": "We have presented new combinations of STP and SML methods to classify unrestricted e-mail text according to a changing set of categories. The current accuracy of the ICC-MAIL system is 78% (correct solution among the top five proposals), corresponding to an overall performance of 73% since ICC-MAIL processes only 94% of the incoming e-mails. The accuracy improves with usage, since each relearning step will yield better classifiers. The accuracy is expected to approximate that of the agents, but not improve on it. With ICe-MAIL, the performance of an experienced agent can approximately be doubled.",
642
- "cite_spans": [],
643
- "ref_spans": [],
644
- "eq_spans": [],
645
- "section": "6",
646
- "sec_num": null
647
- },
648
- {
649
- "text": "The system is currently undergoing extensive tests at the call center of AOL Bertelsmann Online. Details about the development of the performance depending on the throughput and change of categories are expected to be available by mid 2000.",
650
- "cite_spans": [],
651
- "ref_spans": [],
652
- "eq_spans": [],
653
- "section": "6",
654
- "sec_num": null
655
- },
656
- {
657
- "text": "Technically, we expect improvements from the following areas of future work.",
658
- "cite_spans": [],
659
- "ref_spans": [],
660
- "eq_spans": [],
661
- "section": "6",
662
- "sec_num": null
663
- },
664
- {
665
- "text": "\u2022 Further task-specific heuristics aiming at general structural linguistic properties should be defined. This includes heuristics for the identification of multiple requests in a single e-mail that could be based on key words and key phrases as well as on the analysis of the document structure.",
666
- "cite_spans": [],
667
- "ref_spans": [],
668
- "eq_spans": [],
669
- "section": "6",
670
- "sec_num": null
671
- },
672
- {
673
- "text": "\u2022 Our initial experiments with the integration of GermaNet (Hamp and Feldweg, 1997) , the evolving German version of WordNet, seem to confirm the positive results described for Word-Net (de Buenaga Rodriguez et al., 1997) and will thus be extended.",
674
- "cite_spans": [
675
- {
676
- "start": 59,
677
- "end": 83,
678
- "text": "(Hamp and Feldweg, 1997)",
679
- "ref_id": "BIBREF5"
680
- },
681
- {
682
- "start": 177,
683
- "end": 221,
684
- "text": "Word-Net (de Buenaga Rodriguez et al., 1997)",
685
- "ref_id": null
686
- }
687
- ],
688
- "ref_spans": [],
689
- "eq_spans": [],
690
- "section": "6",
691
- "sec_num": null
692
- },
693
- {
694
- "text": "\u2022 A reorganization of the existing three-level category system into a semantically consistent tree structure would allow us to explore the nonterminal nodes of the tree for multi-layered SML. This places additional requirements on the knowledge engineering task and thus needs to be thoroughly investigated for pay-off.",
695
- "cite_spans": [],
696
- "ref_spans": [],
697
- "eq_spans": [],
698
- "section": "6",
699
- "sec_num": null
700
- },
701
- {
702
- "text": "\u2022 Where system-generated answers are acceptable to customers, a straightforward extension of ICe-MAIL can provide this functionality. For the application in hand, this was not the case.",
703
- "cite_spans": [],
704
- "ref_spans": [],
705
- "eq_spans": [],
706
- "section": "6",
707
- "sec_num": null
708
- },
709
- {
710
- "text": "The potential of the technology presented extends beyond call center applications. We intend to explore its use within an information broking assistant in document classification. In a further industrial project with German Telekom, the ICC-MAIL technology will be extended to process multi-lingual press releases. The nature of these documents will allow us to explore the application of more sophisticated language technologies during linguistic preprocessing.",
711
- "cite_spans": [],
712
- "ref_spans": [],
713
- "eq_spans": [],
714
- "section": "6",
715
- "sec_num": null
716
- }
717
- ],
718
- "back_matter": [
719
- {
720
- "text": "We are grateful to our colleagues Giinter Neumann, Matthias Fischmann, Volker Morbach, and Matthias Rinck for fruitful discussions and for support with sines modules. This work was partially supported by a grant of the Minister of Economy and Commerce of the Saarland, Germany, to the project ICC.",
721
- "cite_spans": [],
722
- "ref_spans": [],
723
- "eq_spans": [],
724
- "section": "Acknowledgments",
725
- "sec_num": null
726
- }
727
- ],
728
- "bib_entries": {
729
- "BIBREF0": {
730
- "ref_id": "b0",
731
- "title": "Tolerating noisy, irrelevant and novel attributes in instance based learning algorithms",
732
- "authors": [
733
- {
734
- "first": "W",
735
- "middle": [],
736
- "last": "David",
737
- "suffix": ""
738
- },
739
- {
740
- "first": "",
741
- "middle": [],
742
- "last": "Aha",
743
- "suffix": ""
744
- }
745
- ],
746
- "year": 1992,
747
- "venue": "International Journal of Man-Machine Studies",
748
- "volume": "36",
749
- "issue": "1",
750
- "pages": "267--287",
751
- "other_ids": {},
752
- "num": null,
753
- "urls": [],
754
- "raw_text": "David W. Aha. 1992. Tolerating noisy, irrelevant and novel attributes in instance based learning al- gorithms. International Journal of Man-Machine Studies, 36(1), pages 267-287.",
755
- "links": null
756
- },
757
- "BIBREF1": {
758
- "ref_id": "b1",
759
- "title": "Facile: Classifying texts integrating pattern matching and information extraction",
760
- "authors": [
761
- {
762
- "first": "Fabio",
763
- "middle": [],
764
- "last": "Ciravegna",
765
- "suffix": ""
766
- },
767
- {
768
- "first": "Alberto",
769
- "middle": [],
770
- "last": "Lavelli",
771
- "suffix": ""
772
- },
773
- {
774
- "first": "Nadia",
775
- "middle": [],
776
- "last": "Mana",
777
- "suffix": ""
778
- },
779
- {
780
- "first": "Johannes",
781
- "middle": [],
782
- "last": "Matiasek",
783
- "suffix": ""
784
- },
785
- {
786
- "first": "Luca",
787
- "middle": [],
788
- "last": "Gilardoni",
789
- "suffix": ""
790
- },
791
- {
792
- "first": "Silvia",
793
- "middle": [],
794
- "last": "Mazza",
795
- "suffix": ""
796
- },
797
- {
798
- "first": "Massimo",
799
- "middle": [],
800
- "last": "Ferraro",
801
- "suffix": ""
802
- },
803
- {
804
- "first": "William",
805
- "middle": [
806
- "J"
807
- ],
808
- "last": "Black",
809
- "suffix": ""
810
- },
811
- {
812
- "first": "Fabio",
813
- "middle": [],
814
- "last": "Rjnaldi",
815
- "suffix": ""
816
- },
817
- {
818
- "first": "David",
819
- "middle": [],
820
- "last": "Mowatt",
821
- "suffix": ""
822
- }
823
- ],
824
- "year": 1999,
825
- "venue": "Proceedings of IJCAI'99",
826
- "volume": "",
827
- "issue": "",
828
- "pages": "890--895",
829
- "other_ids": {},
830
- "num": null,
831
- "urls": [],
832
- "raw_text": "Fabio Ciravegna, Alberto Lavelli, Nadia Mana, Jo- hannes Matiasek, Luca Gilardoni, Silvia Mazza, Massimo Ferraro, William J.Black, Fabio RJ- naldi, and David Mowatt. 1999. Facile: Classi- fying texts integrating pattern matching and in- formation extraction. In Proceedings of IJCAI'99, Stockholm, pages 890-895.",
833
- "links": null
834
- },
835
- "BIBREF2": {
836
- "ref_id": "b2",
837
- "title": "Fast effective rule induction",
838
- "authors": [
839
- {
840
- "first": "William",
841
- "middle": [
842
- "W"
843
- ],
844
- "last": "Cohen",
845
- "suffix": ""
846
- }
847
- ],
848
- "year": 1995,
849
- "venue": "Proceedings of the Twelfth International Conference on Machine Learning",
850
- "volume": "",
851
- "issue": "",
852
- "pages": "",
853
- "other_ids": {},
854
- "num": null,
855
- "urls": [],
856
- "raw_text": "William W. Cohen. 1995. Fast effective rule induc- tion. In Proceedings of the Twelfth International Conference on Machine Learning, Lake Tahoe, California.",
857
- "links": null
858
- },
859
- "BIBREF3": {
860
- "ref_id": "b3",
861
- "title": "Using WordNet to complement training information in text categorization",
862
- "authors": [
863
- {
864
- "first": "Manuel",
865
- "middle": [],
866
- "last": "De",
867
- "suffix": ""
868
- },
869
- {
870
- "first": "Buenaga",
871
- "middle": [],
872
- "last": "Rodriguez",
873
- "suffix": ""
874
- },
875
- {
876
- "first": "Jose",
877
- "middle": [
878
- "Maria"
879
- ],
880
- "last": "Gomez-Hidalgo",
881
- "suffix": ""
882
- },
883
- {
884
- "first": "Belen",
885
- "middle": [],
886
- "last": "Diaz-Agudo",
887
- "suffix": ""
888
- }
889
- ],
890
- "year": 1997,
891
- "venue": "Proceedings of the Second International Conference on Recent Advances in Natural Language Processing",
892
- "volume": "",
893
- "issue": "",
894
- "pages": "",
895
- "other_ids": {},
896
- "num": null,
897
- "urls": [],
898
- "raw_text": "Manuel de Buenaga Rodriguez, Jose Maria Gomez- Hidalgo, and Belen Diaz-Agudo. 1997. Using WordNet to complement training information in text categorization. In Proceedings of the Second International Conference on Recent Advances in Natural Language Processing, Montreal, Canada.",
899
- "links": null
900
- },
901
- "BIBREF4": {
902
- "ref_id": "b4",
903
- "title": "The Estimation of Probabilities. An Essay on Modern Bayesian Methods",
904
- "authors": [
905
- {
906
- "first": "J",
907
- "middle": [],
908
- "last": "Good",
909
- "suffix": ""
910
- }
911
- ],
912
- "year": 1965,
913
- "venue": "",
914
- "volume": "",
915
- "issue": "",
916
- "pages": "",
917
- "other_ids": {},
918
- "num": null,
919
- "urls": [],
920
- "raw_text": "J. Good. 1965. The Estimation of Probabilities. An Essay on Modern Bayesian Methods. MIT- Press.",
921
- "links": null
922
- },
923
- "BIBREF5": {
924
- "ref_id": "b5",
925
- "title": "GermaNet -a lexical-semantic net for German",
926
- "authors": [
927
- {
928
- "first": "Birgit",
929
- "middle": [],
930
- "last": "Hamp",
931
- "suffix": ""
932
- },
933
- {
934
- "first": "Helmut",
935
- "middle": [],
936
- "last": "Feldweg",
937
- "suffix": ""
938
- }
939
- ],
940
- "year": 1997,
941
- "venue": "Proceedings of A CL workshop Automatic Information Extraction and Building of Lexical Semantic Resources for NLP Applications",
942
- "volume": "",
943
- "issue": "",
944
- "pages": "",
945
- "other_ids": {},
946
- "num": null,
947
- "urls": [],
948
- "raw_text": "Birgit Hamp and Helmut Feldweg. 1997. GermaNet -a lexical-semantic net for German. In Proceed- ings of A CL workshop Automatic Information Ex- traction and Building of Lexical Semantic Re- sources for NLP Applications, Madrid, Spain",
949
- "links": null
950
- },
951
- "BIBREF6": {
952
- "ref_id": "b6",
953
- "title": "Text categorization with support vector machines -learning with meany relevant features",
954
- "authors": [
955
- {
956
- "first": "Thorsten",
957
- "middle": [],
958
- "last": "Joachims",
959
- "suffix": ""
960
- }
961
- ],
962
- "year": 1998,
963
- "venue": "Proceedings of the European Conference on Machine Learning (ECML)",
964
- "volume": "",
965
- "issue": "",
966
- "pages": "137--142",
967
- "other_ids": {},
968
- "num": null,
969
- "urls": [],
970
- "raw_text": "Thorsten Joachims. 1998. Text categorization with support vector machines -learning with meany relevant features. In Proceedings of the Euro- pean Conference on Machine Learning (ECML), Chemnitz, Germany, pages 137-142.",
971
- "links": null
972
- },
973
- "BIBREF7": {
974
- "ref_id": "b7",
975
- "title": "MLC++ Machine Learning library in C++",
976
- "authors": [
977
- {
978
- "first": "Ronny",
979
- "middle": [],
980
- "last": "Kohavi",
981
- "suffix": ""
982
- },
983
- {
984
- "first": "Dan",
985
- "middle": [],
986
- "last": "Sommerfield",
987
- "suffix": ""
988
- }
989
- ],
990
- "year": 1996,
991
- "venue": "",
992
- "volume": "",
993
- "issue": "",
994
- "pages": "",
995
- "other_ids": {},
996
- "num": null,
997
- "urls": [],
998
- "raw_text": "Ronny Kohavi and Dan Sommerfield, 1996. MLC++ Machine Learning library in C++.",
999
- "links": null
1000
- },
1001
- "BIBREF8": {
1002
- "ref_id": "b8",
1003
- "title": "LVQ-PAK the learning vector quantization program package",
1004
- "authors": [
1005
- {
1006
- "first": "Teuvo",
1007
- "middle": [],
1008
- "last": "Kohonen",
1009
- "suffix": ""
1010
- },
1011
- {
1012
- "first": "Jussi",
1013
- "middle": [],
1014
- "last": "Hynninen",
1015
- "suffix": ""
1016
- },
1017
- {
1018
- "first": "Jari",
1019
- "middle": [],
1020
- "last": "Kangas",
1021
- "suffix": ""
1022
- },
1023
- {
1024
- "first": "Jorma",
1025
- "middle": [],
1026
- "last": "Laaksonen",
1027
- "suffix": ""
1028
- },
1029
- {
1030
- "first": "Kari",
1031
- "middle": [],
1032
- "last": "Torkkola",
1033
- "suffix": ""
1034
- }
1035
- ],
1036
- "year": 1996,
1037
- "venue": "",
1038
- "volume": "",
1039
- "issue": "",
1040
- "pages": "",
1041
- "other_ids": {},
1042
- "num": null,
1043
- "urls": [],
1044
- "raw_text": "Teuvo Kohonen, Jussi Hynninen, Jari Kangas, Jorma Laaksonen, and Kari Torkkola. 1996. LVQ-PAK the learning vector quantization pro- gram package. Technical Report A30, Helsinki University of Technology.",
1045
- "links": null
1046
- },
1047
- "BIBREF9": {
1048
- "ref_id": "b9",
1049
- "title": "An information extraction core system for real world German text processing",
1050
- "authors": [
1051
- {
1052
- "first": "G/Inter",
1053
- "middle": [],
1054
- "last": "Neumann",
1055
- "suffix": ""
1056
- },
1057
- {
1058
- "first": "Rolf",
1059
- "middle": [],
1060
- "last": "Backofen",
1061
- "suffix": ""
1062
- },
1063
- {
1064
- "first": "Judith",
1065
- "middle": [],
1066
- "last": "Baur",
1067
- "suffix": ""
1068
- },
1069
- {
1070
- "first": "Markus",
1071
- "middle": [],
1072
- "last": "Becket",
1073
- "suffix": ""
1074
- },
1075
- {
1076
- "first": "Christian",
1077
- "middle": [],
1078
- "last": "Braun",
1079
- "suffix": ""
1080
- }
1081
- ],
1082
- "year": 1997,
1083
- "venue": "Proceedings of 5th ANLP",
1084
- "volume": "",
1085
- "issue": "",
1086
- "pages": "209--216",
1087
- "other_ids": {},
1088
- "num": null,
1089
- "urls": [],
1090
- "raw_text": "G/inter Neumann, Rolf Backofen, Judith Baur, Markus Becket, and Christian Braun. 1997. An information extraction core system for real world German text processing. In Proceedings of 5th ANLP, Washington, pages 209-216.",
1091
- "links": null
1092
- },
1093
- "BIBREF10": {
1094
- "ref_id": "b10",
1095
- "title": "Combining shallow text processing and macine learning in real world applications",
1096
- "authors": [
1097
- {
1098
- "first": "G/Inter",
1099
- "middle": [],
1100
- "last": "Neumann",
1101
- "suffix": ""
1102
- },
1103
- {
1104
- "first": "Sven",
1105
- "middle": [],
1106
- "last": "Schmeier",
1107
- "suffix": ""
1108
- }
1109
- ],
1110
- "year": 1999,
1111
- "venue": "Proceedings of IJCAI workshop on Machine Learning for Information Filtering",
1112
- "volume": "",
1113
- "issue": "",
1114
- "pages": "55--60",
1115
- "other_ids": {},
1116
- "num": null,
1117
- "urls": [],
1118
- "raw_text": "G/inter Neumann and Sven Schmeier. 1999. Com- bining shallow text processing and macine learn- ing in real world applications. In Proceedings of IJCAI workshop on Machine Learning for Infor- mation Filtering, Stockholm, pages 55-60.",
1119
- "links": null
1120
- },
1121
- "BIBREF11": {
1122
- "ref_id": "b11",
1123
- "title": "Readings in machine learning. Machine learning series",
1124
- "authors": [
1125
- {
1126
- "first": "J",
1127
- "middle": [
1128
- "R"
1129
- ],
1130
- "last": "Quinlan",
1131
- "suffix": ""
1132
- },
1133
- {
1134
- "first": "W",
1135
- "middle": [],
1136
- "last": "Jude",
1137
- "suffix": ""
1138
- },
1139
- {
1140
- "first": "Thomas",
1141
- "middle": [
1142
- "G"
1143
- ],
1144
- "last": "Dietterich",
1145
- "suffix": ""
1146
- }
1147
- ],
1148
- "year": 1986,
1149
- "venue": "",
1150
- "volume": "",
1151
- "issue": "",
1152
- "pages": "",
1153
- "other_ids": {},
1154
- "num": null,
1155
- "urls": [],
1156
- "raw_text": "J.R. Quinlan. 1986. Induction of Decision Trees. Reprinted in Shavlik, Jude W. and Dietterich, Thomas G, Readings in machine learning. Ma- chine learning series. Morgan Kaufmann (1990)",
1157
- "links": null
1158
- },
1159
- "BIBREF12": {
1160
- "ref_id": "b12",
1161
- "title": "C4.5: Programs for Machine Learning",
1162
- "authors": [
1163
- {
1164
- "first": "J",
1165
- "middle": [
1166
- "R"
1167
- ],
1168
- "last": "Quinlan",
1169
- "suffix": ""
1170
- }
1171
- ],
1172
- "year": 1992,
1173
- "venue": "",
1174
- "volume": "",
1175
- "issue": "",
1176
- "pages": "",
1177
- "other_ids": {},
1178
- "num": null,
1179
- "urls": [],
1180
- "raw_text": "J.R. Quinlan. 1992. C4.5: Programs for Machine Learning. Morgan Kaufmann, San Mateo, Cali- fornia.",
1181
- "links": null
1182
- },
1183
- "BIBREF13": {
1184
- "ref_id": "b13",
1185
- "title": "Bagging, Boosting and C4.5",
1186
- "authors": [
1187
- {
1188
- "first": "J",
1189
- "middle": [
1190
- "R"
1191
- ],
1192
- "last": "Quinlan",
1193
- "suffix": ""
1194
- }
1195
- ],
1196
- "year": 1996,
1197
- "venue": "Proceedings of AAAI'96",
1198
- "volume": "",
1199
- "issue": "",
1200
- "pages": "725--730",
1201
- "other_ids": {},
1202
- "num": null,
1203
- "urls": [],
1204
- "raw_text": "J.R. Quinlan. 1996. Bagging, Boosting and C4.5. In Proceedings of AAAI'96, Portland, pages 725-730.",
1205
- "links": null
1206
- },
1207
- "BIBREF14": {
1208
- "ref_id": "b14",
1209
- "title": "The Nature of Statistical Learning Theory",
1210
- "authors": [
1211
- {
1212
- "first": "Vladimir",
1213
- "middle": [
1214
- "N"
1215
- ],
1216
- "last": "Vapnik",
1217
- "suffix": ""
1218
- }
1219
- ],
1220
- "year": 1995,
1221
- "venue": "",
1222
- "volume": "",
1223
- "issue": "",
1224
- "pages": "",
1225
- "other_ids": {},
1226
- "num": null,
1227
- "urls": [],
1228
- "raw_text": "Vladimir N. Vapnik. 1995. The Nature of Statistical Learning Theory. Springer.",
1229
- "links": null
1230
- },
1231
- "BIBREF15": {
1232
- "ref_id": "b15",
1233
- "title": "A neural network approach to topic spotting",
1234
- "authors": [
1235
- {
1236
- "first": "E",
1237
- "middle": [
1238
- "D"
1239
- ],
1240
- "last": "Wiener",
1241
- "suffix": ""
1242
- },
1243
- {
1244
- "first": "J",
1245
- "middle": [],
1246
- "last": "Pedersen",
1247
- "suffix": ""
1248
- },
1249
- {
1250
- "first": "A",
1251
- "middle": [
1252
- "S"
1253
- ],
1254
- "last": "Weigend",
1255
- "suffix": ""
1256
- }
1257
- ],
1258
- "year": 1995,
1259
- "venue": "Proceedings of the SDAIR",
1260
- "volume": "",
1261
- "issue": "",
1262
- "pages": "",
1263
- "other_ids": {},
1264
- "num": null,
1265
- "urls": [],
1266
- "raw_text": "E.D. Wiener, J. Pedersen, and A.S. Weigend. 1995. A neural network approach to topic spotting. In Proceedings of the SDAIR.",
1267
- "links": null
1268
- },
1269
- "BIBREF16": {
1270
- "ref_id": "b16",
1271
- "title": "A re-examination of text categorization methods",
1272
- "authors": [
1273
- {
1274
- "first": "Y",
1275
- "middle": [],
1276
- "last": "Yang",
1277
- "suffix": ""
1278
- },
1279
- {
1280
- "first": "Xin",
1281
- "middle": [],
1282
- "last": "Liu",
1283
- "suffix": ""
1284
- }
1285
- ],
1286
- "year": 1999,
1287
- "venue": "Proceedings of A CMSIGIR Conference on Research and Development in Information Retrieval",
1288
- "volume": "",
1289
- "issue": "",
1290
- "pages": "",
1291
- "other_ids": {},
1292
- "num": null,
1293
- "urls": [],
1294
- "raw_text": "Y. Yang and Xin Liu. 1999. A re-examination of text categorization methods. In Proceedings of A CMSIGIR Conference on Research and Devel- opment in Information Retrieval, Berkley, Calfor- nia.",
1295
- "links": null
1296
- },
1297
- "BIBREF17": {
1298
- "ref_id": "b17",
1299
- "title": "A comparative study on feature selection",
1300
- "authors": [
1301
- {
1302
- "first": "Y",
1303
- "middle": [],
1304
- "last": "Yang",
1305
- "suffix": ""
1306
- },
1307
- {
1308
- "first": "J",
1309
- "middle": [
1310
- "P"
1311
- ],
1312
- "last": "Pedersen",
1313
- "suffix": ""
1314
- }
1315
- ],
1316
- "year": 1997,
1317
- "venue": "Proceedings of the Fourteenth International Conference on Machine Learning (ICML '97)",
1318
- "volume": "",
1319
- "issue": "",
1320
- "pages": "",
1321
- "other_ids": {},
1322
- "num": null,
1323
- "urls": [],
1324
- "raw_text": "Y. Yang and J.P. Pedersen. 1997. A comparative study on feature selection. In Proceedings of the Fourteenth International Conference on Machine Learning (ICML '97).",
1325
- "links": null
1326
- },
1327
- "BIBREF18": {
1328
- "ref_id": "b18",
1329
- "title": "An evaluation of statistical approaches to text categorization. Information Retrieval",
1330
- "authors": [
1331
- {
1332
- "first": "Y",
1333
- "middle": [],
1334
- "last": "Yang",
1335
- "suffix": ""
1336
- }
1337
- ],
1338
- "year": 1999,
1339
- "venue": "Journal",
1340
- "volume": "",
1341
- "issue": "",
1342
- "pages": "",
1343
- "other_ids": {},
1344
- "num": null,
1345
- "urls": [],
1346
- "raw_text": "Y. Yang. 1999. An evaluation of statistical ap- proaches to text categorization. Information Re- trieval Journal (May 1999).",
1347
- "links": null
1348
- }
1349
- },
1350
- "ref_entries": {
1351
- "FIGREF0": {
1352
- "uris": null,
1353
- "num": null,
1354
- "text": "Architecture of the ICC-MAIL System.",
1355
- "type_str": "figure"
1356
- }
1357
- }
1358
- }
1359
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1023.json DELETED
@@ -1,777 +0,0 @@
1
- {
2
- "paper_id": "A00-1023",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:19.233891Z"
6
- },
7
- "title": "A Question Answering System Supported by Information Extraction*",
8
- "authors": [
9
- {
10
- "first": "Rohini",
11
- "middle": [],
12
- "last": "Srihari",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Cymfony Inc",
17
- "location": {
18
- "addrLine": "5500 Main Street Williamsville",
19
- "postCode": "14221",
20
- "region": "NY"
21
- }
22
- },
23
- "email": "[email protected]"
24
- },
25
- {
26
- "first": "Wei",
27
- "middle": [],
28
- "last": "Li",
29
- "suffix": "",
30
- "affiliation": {
31
- "laboratory": "",
32
- "institution": "Cymfony Inc",
33
- "location": {
34
- "addrLine": "5500 Main Street Williamsville",
35
- "postCode": "NY14221"
36
- }
37
- },
38
- "email": ""
39
- }
40
- ],
41
- "year": "",
42
- "venue": null,
43
- "identifiers": {},
44
- "abstract": "This paper discusses an information extraction (IE) system, Textract, in natural language (NL) question answering (QA) and examines the role of IE in QA application. It shows: (i) Named Entity tagging is an important component for QA, (ii) an NL shallow parser provides a structural basis for questions, and (iii) high-level domain independent IE can result in a QA breakthrough.",
45
- "pdf_parse": {
46
- "paper_id": "A00-1023",
47
- "_pdf_hash": "",
48
- "abstract": [
49
- {
50
- "text": "This paper discusses an information extraction (IE) system, Textract, in natural language (NL) question answering (QA) and examines the role of IE in QA application. It shows: (i) Named Entity tagging is an important component for QA, (ii) an NL shallow parser provides a structural basis for questions, and (iii) high-level domain independent IE can result in a QA breakthrough.",
51
- "cite_spans": [],
52
- "ref_spans": [],
53
- "eq_spans": [],
54
- "section": "Abstract",
55
- "sec_num": null
56
- }
57
- ],
58
- "body_text": [
59
- {
60
- "text": "With the explosion of information in Internet, Natural language QA is recognized as a capability with great potential. Traditionally, QA has attracted many AI researchers, but most QA systems developed are toy systems or games confined to lab and a very restricted domain. More recently, Text Retrieval Conference (TREC-8) designed a QA track to stimulate the research for real world application.",
61
- "cite_spans": [],
62
- "ref_spans": [],
63
- "eq_spans": [],
64
- "section": "Introduction",
65
- "sec_num": null
66
- },
67
- {
68
- "text": "Due to little linguistic support from text analysis, conventional IR systems or search engines do not really perform the task of information retrieval; they in fact aim at only document retrieval. The following quote from the QA Track Specifications (www.research.att.com/ -singhal/qa-track-spec.txt) in the TREC community illustrates this point.",
69
- "cite_spans": [],
70
- "ref_spans": [],
71
- "eq_spans": [],
72
- "section": "Introduction",
73
- "sec_num": null
74
- },
75
- {
76
- "text": "Current information retrieval systems allow us to locate documents that might contain the pertinent information, but most of them leave it to the user to extract the useful information from a ranked list. This leaves the (often unwilling) user with a relatively large amount of text to consume. There is an urgent need for tools that would reduce the amount of text one might have to read in order to obtain the desired information. This track aims at doing exactly that for a special (and popular) class of information seeking behavior: QUESTION ANSWERING. People have questions and they need answers, not documents. Automatic question answering will definitely be a significant advance in the state-of-art information retrieval technology. Kupiec (1993) presented a QA system MURAX using an on-line encyclopedia. This system used the technology of robust shallow parsing but suffered from the lack of basic information extraction support. In fact, the most siginifcant IE advance, namely the NE (Named Entity) technology, occured after Kupiec (1993) , thanks to the MUC program (MUC-7 1998). High-level IE technology beyond NE has not been in the stage of possible application until recently.",
77
- "cite_spans": [
78
- {
79
- "start": 485,
80
- "end": 498,
81
- "text": "(and popular)",
82
- "ref_id": null
83
- },
84
- {
85
- "start": 742,
86
- "end": 755,
87
- "text": "Kupiec (1993)",
88
- "ref_id": "BIBREF4"
89
- },
90
- {
91
- "start": 1038,
92
- "end": 1051,
93
- "text": "Kupiec (1993)",
94
- "ref_id": "BIBREF4"
95
- }
96
- ],
97
- "ref_spans": [],
98
- "eq_spans": [],
99
- "section": "Introduction",
100
- "sec_num": null
101
- },
102
- {
103
- "text": "AskJeeves launched a QA portal (www.askjeeves.com). It is equipped with a fairly sophisticated natural language question parser, but it does not provide direct answers to the asked questions. Instead, it directs the user to the relevant web pages, just as the traditional search engine does. In this sense, AskJeeves has only done half of the job for QA.",
104
- "cite_spans": [],
105
- "ref_spans": [],
106
- "eq_spans": [],
107
- "section": "Introduction",
108
- "sec_num": null
109
- },
110
- {
111
- "text": "We believe that QA is an ideal test bed for demonstrating the power of IE. There is a natural co-operation between IE and IR; we regard QA as one major intelligence which IE can offer IR.",
112
- "cite_spans": [],
113
- "ref_spans": [],
114
- "eq_spans": [],
115
- "section": "Introduction",
116
- "sec_num": null
117
- },
118
- {
119
- "text": "An important question then is, what type of IE can support IR in QA and how well does it support it? This forms the major topic of this paper. We structure the remaining part of the paper as follows. In Section 1, we first give an overview of the underlying IE technology which our organization has been developing. Section 2 discusses the QA system. Section 3 describes the limitation of the current system. Finally, in Section 4, we propose a more sophisticated QA system supported by three levels of IE.",
120
- "cite_spans": [],
121
- "ref_spans": [],
122
- "eq_spans": [],
123
- "section": "Introduction",
124
- "sec_num": null
125
- },
126
- {
127
- "text": "The last decade has seen great advance and interest in the area of IE. In the US, the DARPA sponsored Tipster Text Program [Grishman 1997 ] and the Message Understanding Conferences (MUC) [MUC-7 1998 ] have been the driving force for developing this technology. In fact, the MUC specifications for various IE tasks have become de facto standards in the IE research community. It is therefore necessary to present our IE effort in the context of the MUC program. MUC divides IE into distinct tasks, namely, NE (Named Entity), TE (Template Element), TR (Template Relation), CO (Co-reference), and ST (Scenario Templates) [Chinchor & Marsh 1998 ]. Our proposal for three levels of IE is modelled after the MUC standards using MUC-style representation. However, we have modified the MUC IE task definitions in order to make them more useful and more practical. More precisely, we propose a hierarchical, 3-level architecture for developing a kernel IE system which is domain-independent throughout.",
128
- "cite_spans": [
129
- {
130
- "start": 123,
131
- "end": 137,
132
- "text": "[Grishman 1997",
133
- "ref_id": "BIBREF2"
134
- },
135
- {
136
- "start": 188,
137
- "end": 199,
138
- "text": "[MUC-7 1998",
139
- "ref_id": null
140
- },
141
- {
142
- "start": 619,
143
- "end": 641,
144
- "text": "[Chinchor & Marsh 1998",
145
- "ref_id": "BIBREF1"
146
- }
147
- ],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "Overview of Textract IE",
151
- "sec_num": "1"
152
- },
153
- {
154
- "text": "The core of this system is a state-of-the-art NE tagger ], named Textract 1.0. The Textract NE tagger has achieved speed and accuracy comparable to that of the few deployed NE systems, such as NetOwl [Krupka & Hausman 1998 ] and Nymble [Bikel et al 1997] .",
155
- "cite_spans": [
156
- {
157
- "start": 200,
158
- "end": 222,
159
- "text": "[Krupka & Hausman 1998",
160
- "ref_id": "BIBREF3"
161
- },
162
- {
163
- "start": 236,
164
- "end": 254,
165
- "text": "[Bikel et al 1997]",
166
- "ref_id": "BIBREF0"
167
- }
168
- ],
169
- "ref_spans": [],
170
- "eq_spans": [],
171
- "section": "Overview of Textract IE",
172
- "sec_num": "1"
173
- },
174
- {
175
- "text": "It is to be noted that in our definition of NE, we significantly expanded the type of information to be extracted. In addition to all the MUC defined NE types (person, organization, location, time, date, money and percent), the following types/sub-types of information are also identified by the TextractNE module: These new sub-types provide a better foundation for defining multiple relationships between the identified entities and for supporting question answering functionality. For example, the key to a question processor is to identify the asking point (who, what, when, where, etc.) . In many cases, the asking point corresponds to an NE beyond the MUC definition, e.g. the how+adjective questions: how long (duration or length), how far (length), how often (frequency), how old (age), etc.",
176
- "cite_spans": [
177
- {
178
- "start": 561,
179
- "end": 591,
180
- "text": "(who, what, when, where, etc.)",
181
- "ref_id": null
182
- }
183
- ],
184
- "ref_spans": [],
185
- "eq_spans": [],
186
- "section": "Overview of Textract IE",
187
- "sec_num": "1"
188
- },
189
- {
190
- "text": "\u2022 duration,",
191
- "cite_spans": [],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "Overview of Textract IE",
195
- "sec_num": "1"
196
- },
197
- {
198
- "text": "Level-2 IE, or CE (Correlated Entity), is concerned with extracting pre-defined multiple relationships between the entities. Consider the person entity as an example; the TextractCE prototype is capable of extracting the key relationships such as age, gender, affiliation, position, birthtime, birth__place, spouse, parents, children, where.from, address, phone, fax, email, descriptors. As seen, the information in the CE represents a mini-CV or profile of the entity. In general, the CE template integrates and greatly enriches the information contained in MUC TE and TR.",
199
- "cite_spans": [
200
- {
201
- "start": 252,
202
- "end": 387,
203
- "text": "gender, affiliation, position, birthtime, birth__place, spouse, parents, children, where.from, address, phone, fax, email, descriptors.",
204
- "ref_id": null
205
- }
206
- ],
207
- "ref_spans": [],
208
- "eq_spans": [],
209
- "section": "Overview of Textract IE",
210
- "sec_num": "1"
211
- },
212
- {
213
- "text": "The final goal of our IE effort is to further extract open-ended general events (GE, or level 3 IE) for information like who did what (to whom) when (or how often) and where. By general events, we refer to argument structures centering around verb notions plus the associated information of time/frequency and location. We show an example of our defined GE extracted from the text below:",
214
- "cite_spans": [],
215
- "ref_spans": [],
216
- "eq_spans": [],
217
- "section": "Overview of Textract IE",
218
- "sec_num": "1"
219
- },
220
- {
221
- "text": "Julian Hill, a research chemist whose accidental discovery of a tough, taffylike compound revolutionized everyday life after it proved its worth in warfare and courtship, died on Sunday in Hockessin, Del.",
222
- "cite_spans": [],
223
- "ref_spans": [],
224
- "eq_spans": [],
225
- "section": "Overview of Textract IE",
226
- "sec_num": "1"
227
- },
228
- {
229
- "text": "[1] <GE_TEMPLATE> := PREDICATE: die ARGUMENTI:",
230
- "cite_spans": [],
231
- "ref_spans": [],
232
- "eq_spans": [],
233
- "section": "Overview of Textract IE",
234
- "sec_num": "1"
235
- },
236
- {
237
- "text": "Julian Hill TIME:",
238
- "cite_spans": [],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Overview of Textract IE",
242
- "sec_num": "1"
243
- },
244
- {
245
- "text": "Sunday LOCATION:",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "Overview of Textract IE",
250
- "sec_num": "1"
251
- },
252
- {
253
- "text": "Hockessin, Del Figure 1 is the overall system architecture for the IE system Textract that our organization has been developing. The core of the system consists of three kernel IE modules and six linguistic modules. The multi-level linguistic modules serve as an underlying support system for different levels of IE. The IE results are stored in a database which is the basis for IE-related applications like QA, BR (Browsing, threading and visualization) and AS (Automatic Summarization). The approach to IE taken here, consists of a unique blend of machine learning and FST (finite state transducer) rule-based system [Roche & Schabes 1997] . By combining machine learning with an FST rule-based system, we are able to exploit the best of both paradigms while overcoming their respective weaknesses , Li & Srihari 2000 , where (LOCATION), how far (LENGTH). Therefore, the NE tagger has been proven to be very helpful.",
254
- "cite_spans": [
255
- {
256
- "start": 620,
257
- "end": 642,
258
- "text": "[Roche & Schabes 1997]",
259
- "ref_id": "BIBREF6"
260
- },
261
- {
262
- "start": 801,
263
- "end": 820,
264
- "text": ", Li & Srihari 2000",
265
- "ref_id": "BIBREF5"
266
- }
267
- ],
268
- "ref_spans": [
269
- {
270
- "start": 15,
271
- "end": 23,
272
- "text": "Figure 1",
273
- "ref_id": "FIGREF0"
274
- }
275
- ],
276
- "eq_spans": [],
277
- "section": "Overview of Textract IE",
278
- "sec_num": "1"
279
- },
280
- {
281
- "text": "I I I I I I F L-- ----~ .... . L ------. ------| ....",
282
- "cite_spans": [],
283
- "ref_spans": [],
284
- "eq_spans": [],
285
- "section": "Overview of Textract IE",
286
- "sec_num": "1"
287
- },
288
- {
289
- "text": "Of course, the NE of the targeted type is only necessary but not complete in answering such questions because NE by nature only extracts isolated individual entities from the text. Nevertheless, using even crude methods like \"the nearest NE to the queried key words\" or \"the NE and its related key words within the same line (or same paragraph, etc.)\", in most cases, the QA system was able to extract text portions which contained answers in the top five list. Figure 2 illustrates the system design of TextractQA Prototype.",
290
- "cite_spans": [],
291
- "ref_spans": [
292
- {
293
- "start": 462,
294
- "end": 470,
295
- "text": "Figure 2",
296
- "ref_id": null
297
- }
298
- ],
299
- "eq_spans": [],
300
- "section": "Apptication Modutes",
301
- "sec_num": null
302
- },
303
- {
304
- "text": "There are two components for the QA prototype: Question Processor and Text Processor. The Text Matcher module links the two processing results and tries to find answers to the processed question. Matching is based on keywords, plus the NE type and their common location within a same sentence. The following is an example where the asking point does not correspond to any type of NE in our definition.",
305
- "cite_spans": [],
306
- "ref_spans": [],
307
- "eq_spans": [],
308
- "section": "Apptication Modutes",
309
- "sec_num": null
310
- },
311
- {
312
- "text": "[3] Why did David Koresh ask the FBI for a word processor ?",
313
- "cite_spans": [],
314
- "ref_spans": [],
315
- "eq_spans": [],
316
- "section": "Apptication Modutes",
317
- "sec_num": null
318
- },
319
- {
320
- "text": "The system then maps it to the following question template :",
321
- "cite_spans": [],
322
- "ref_spans": [],
323
- "eq_spans": [],
324
- "section": "Apptication Modutes",
325
- "sec_num": null
326
- },
327
- {
328
- "text": "[4] asking_point:",
329
- "cite_spans": [],
330
- "ref_spans": [],
331
- "eq_spans": [],
332
- "section": "Apptication Modutes",
333
- "sec_num": null
334
- },
335
- {
336
- "text": "key_word:",
337
- "cite_spans": [],
338
- "ref_spans": [],
339
- "eq_spans": [],
340
- "section": "Apptication Modutes",
341
- "sec_num": null
342
- },
343
- {
344
- "text": "REASON { ask, David, Koresh, FBI, word, processor }",
345
- "cite_spans": [],
346
- "ref_spans": [],
347
- "eq_spans": [],
348
- "section": "Apptication Modutes",
349
- "sec_num": null
350
- },
351
- {
352
- "text": "The question processor scans the question to search for question words (wh-words) and maps them into corresponding NE types/sub-types or pre-defined notions like REASON.",
353
- "cite_spans": [],
354
- "ref_spans": [],
355
- "eq_spans": [],
356
- "section": "Apptication Modutes",
357
- "sec_num": null
358
- },
359
- {
360
- "text": "We adopt two sets of pattern matching rules for this purpose: (i) structure based pattern matching rules; (ii) simple key word based pattern matching rules (regarded as default rules).",
361
- "cite_spans": [],
362
- "ref_spans": [],
363
- "eq_spans": [],
364
- "section": "Apptication Modutes",
365
- "sec_num": null
366
- },
367
- {
368
- "text": "It is fairly easy to exhaust the second set of rules as interrogative question words/phrases form a closed set. In comparison, the development of the first set of rules are continuously being fine-tuned and expanded. This strategy of using two set of rules leads to the robustness of the question processor.",
369
- "cite_spans": [],
370
- "ref_spans": [],
371
- "eq_spans": [],
372
- "section": "Apptication Modutes",
373
- "sec_num": null
374
- },
375
- {
376
- "text": "The first set of rules are based on shallow parsing results of the questions, using Cymfony FST based Shallow Parser. This parser identifies basic syntactic constructions like BaseNP (Basic Noun Phrase), BasePP (Basic Prepositional Phrase) and VG (Verb Group).",
377
- "cite_spans": [],
378
- "ref_spans": [],
379
- "eq_spans": [],
380
- "section": "Apptication Modutes",
381
- "sec_num": null
382
- },
383
- {
384
- "text": "The following is a sample of the first set of rules: As seen, shallow parsing helps us to capture a variety of natural language question expressions. However, there are cases where some simple key word based pattern matching would be enough to capture the asking point. That is our second set of rules. These rules are used when the first set of rules has failed to produce results. The following is a sample of such rules:",
385
- "cite_spans": [],
386
- "ref_spans": [],
387
- "eq_spans": [],
388
- "section": "Apptication Modutes",
389
- "sec_num": null
390
- },
391
- {
392
- "text": "In the stage of question expansion, the template in [4] [asking, David,Koresh,FBI, word, processor} The last item in the asking._point list attempts to find an infinitive by checking the word to followed by a verb (with the part-of-speech tag VB). As we know, infinitive verb phrases are often used in English to explain a reason for some action.",
393
- "cite_spans": [
394
- {
395
- "start": 52,
396
- "end": 55,
397
- "text": "[4]",
398
- "ref_id": null
399
- },
400
- {
401
- "start": 56,
402
- "end": 99,
403
- "text": "[asking, David,Koresh,FBI, word, processor}",
404
- "ref_id": null
405
- }
406
- ],
407
- "ref_spans": [],
408
- "eq_spans": [],
409
- "section": "Apptication Modutes",
410
- "sec_num": null
411
- },
412
- {
413
- "text": "On the text processing side, we first send the question directly to a search engine in order to narrow down the document pool to the first n, say 200, documents for IE processing. Currently, this includes tokenization, POS tagging and NE tagging. Future plans include several levels of parsing as well; these are required to support CE and GE extraction. It should be noted that all these operations are extremely robust and fast, features necessary for large volume text indexing.",
414
- "cite_spans": [],
415
- "ref_spans": [],
416
- "eq_spans": [],
417
- "section": "Text Processing",
418
- "sec_num": "2.2"
419
- },
420
- {
421
- "text": "Parsing is accomplished through cascaded finite state transducer grammars.",
422
- "cite_spans": [],
423
- "ref_spans": [],
424
- "eq_spans": [],
425
- "section": "Text Processing",
426
- "sec_num": "2.2"
427
- },
428
- {
429
- "text": "The Text Matcher attempts to match the question template with the processed documents for both the asking point and the key words. There is a preliminary ranking standard built-in the matcher in order to find the most probable answers. The primary rank is a count of how many unique keywords are contained within a sentence. The secondary ranking is based on the order that the keywords appear in the sentence compared to their order in the question. The third ranking is based on whether there is an exact match or a variant match for the key verb. In the TREC-8 QA track competition, Cymfony QA accuracy was 66.0%. Considering we have only used NE technology to support QA in this run, 66.0% is a very encouraging result.",
430
- "cite_spans": [],
431
- "ref_spans": [],
432
- "eq_spans": [],
433
- "section": "Text Matching",
434
- "sec_num": "2.3"
435
- },
436
- {
437
- "text": "The first limitation comes from the types of questions.",
438
- "cite_spans": [],
439
- "ref_spans": [],
440
- "eq_spans": [],
441
- "section": "Limitation",
442
- "sec_num": "3"
443
- },
444
- {
445
- "text": "Currently only wh-questions are handled although it is planned that yes-no questions will be handled once we introduce CE and GE templates to support QA. Among the wh-questions, the why-question and how-question t are more challenging because the asking point cannot be simply mapped to the NE types/sub-types.",
446
- "cite_spans": [],
447
- "ref_spans": [],
448
- "eq_spans": [],
449
- "section": "Limitation",
450
- "sec_num": "3"
451
- },
452
- {
453
- "text": "The second limitation is from the nature of the questions. Questions like Where can l find the homepage for Oscar winners or Where can I find info on Shakespeare's works might be answerable easily by a system based on a well-maintained data base of home pages. Since our system is based on the processing of the underlying documents, no correct answer can be provided if there is no such an answer (explicitly expressed in English) in the processed documents. In TREC-8 QA, this is not a problem since every question is guaranteed to have at least one answer in the given document pool. However, in the real world scenario such as a QA portal, it is conceived that the IE results based on the processing of the documents should be complemented by other knowledge sources such as e-copy of yellow pages or other manually maintained and updated data bases.",
454
- "cite_spans": [],
455
- "ref_spans": [],
456
- "eq_spans": [],
457
- "section": "Limitation",
458
- "sec_num": "3"
459
- },
460
- {
461
- "text": "The third limitation is the lack of linguistic processing such as sentence-level parsing and cross-sentential co-reference (CO). This problem will be gradually solved when high-level IE technology is introduced into the system.",
462
- "cite_spans": [],
463
- "ref_spans": [],
464
- "eq_spans": [],
465
- "section": "Limitation",
466
- "sec_num": "3"
467
- },
468
- {
469
- "text": "A new QA architecture is under development; it will exploit all levels of the IE system, including CE and GE. The first issue is how much CE can contribute to a better support of QA. It is found that there are some frequently seen questions which can be better answered once the CE information is provided. These questions are of two types: (i) what/who questions about an NE; (ii) relationship questions.",
470
- "cite_spans": [],
471
- "ref_spans": [],
472
- "eq_spans": [],
473
- "section": "Future Work: Multi-level IE Supported QA",
474
- "sec_num": "4"
475
- },
476
- {
477
- "text": "Questions The next issue is the relationships between GE and QA. It is our belief that the GE technology will result in a breakthrough for QA.",
478
- "cite_spans": [],
479
- "ref_spans": [],
480
- "eq_spans": [],
481
- "section": "Future Work: Multi-level IE Supported QA",
482
- "sec_num": "4"
483
- },
484
- {
485
- "text": "In order to extract GE templates, the text goes through a series of linguistic processing as shown in Figure 1 . It should be noted that the question processing is designed to go through parallel processes and share the same NLP resources until the point of matching and ranking.",
486
- "cite_spans": [],
487
- "ref_spans": [
488
- {
489
- "start": 102,
490
- "end": 110,
491
- "text": "Figure 1",
492
- "ref_id": "FIGREF0"
493
- }
494
- ],
495
- "eq_spans": [],
496
- "section": "Future Work: Multi-level IE Supported QA",
497
- "sec_num": "4"
498
- },
499
- {
500
- "text": "The merging of question templates and GE templates in Template Matcher are fairly straightforward. As they both undergo the same NLP processing, the resulting semantic templates are of the same form. Both question templates and GE templates correspond to fairly standard/predictable patterns (the PREDICATE value is open-ended, but the structure remains stable). More precisely, a user can ask questions on general events themselves (did what) and/or on the participants of the event (who, whom, what) and/or the time, frequency and place of events (when, how often, where). This addresses 2 An alpha version of TextractQA supported by both NE and CE has been implemented and is being tested. by far the most types of general questions of a potential user.",
501
- "cite_spans": [
502
- {
503
- "start": 484,
504
- "end": 501,
505
- "text": "(who, whom, what)",
506
- "ref_id": null
507
- }
508
- ],
509
- "ref_spans": [],
510
- "eq_spans": [],
511
- "section": "Future Work: Multi-level IE Supported QA",
512
- "sec_num": "4"
513
- },
514
- {
515
- "text": "For example, if a user is interested in company acquisition events, he can ask questions like: Which companies ware acquired by Microsoft in 1999? Which companies did Microsoft acquire in 1999? Our system will then parse these questions into the templates as shown below:",
516
- "cite_spans": [],
517
- "ref_spans": [],
518
- "eq_spans": [],
519
- "section": "Future Work: Multi-level IE Supported QA",
520
- "sec_num": "4"
521
- },
522
- {
523
- "text": "[31] <Q_TEMPLATE> := PREDICATE: acquire ARGUMENT1: Microsoft ARGUMENT2: WHAT(COMPANY) TIME: 1999",
524
- "cite_spans": [],
525
- "ref_spans": [],
526
- "eq_spans": [],
527
- "section": "Future Work: Multi-level IE Supported QA",
528
- "sec_num": "4"
529
- },
530
- {
531
- "text": "If the user wants to know when some acquisition happened, he can ask: When was Netscape acquired?",
532
- "cite_spans": [],
533
- "ref_spans": [],
534
- "eq_spans": [],
535
- "section": "Future Work: Multi-level IE Supported QA",
536
- "sec_num": "4"
537
- },
538
- {
539
- "text": "Our system will then translate it into the pattern below:",
540
- "cite_spans": [],
541
- "ref_spans": [],
542
- "eq_spans": [],
543
- "section": "Future Work: Multi-level IE Supported QA",
544
- "sec_num": "4"
545
- },
546
- {
547
- "text": "[32] <QTEMPLATE> := PREDICATE: acquire ARGUMENT1: WHO ARGUMENT2: Netscape TIME: WHEN Note that WHO, WHAT, WHEN above are variable to be instantiated.",
548
- "cite_spans": [],
549
- "ref_spans": [],
550
- "eq_spans": [],
551
- "section": "Future Work: Multi-level IE Supported QA",
552
- "sec_num": "4"
553
- },
554
- {
555
- "text": "Such question templates serve as search constraints to filter the events in our extracted GE template database. Because the question templates and the extracted GE template share the same structure, a simple merging operation would suffice. Nevertheless, there are two important questions to be answered: (i) what if a different verb with the same meaning is used in the question from the one used in the processed text? (ii) what if the question asks about something beyond the GE (or CE) information? These are issues that we are currently researching.",
556
- "cite_spans": [],
557
- "ref_spans": [],
558
- "eq_spans": [],
559
- "section": "Future Work: Multi-level IE Supported QA",
560
- "sec_num": "4"
561
- }
562
- ],
563
- "back_matter": [],
564
- "bib_entries": {
565
- "BIBREF0": {
566
- "ref_id": "b0",
567
- "title": "Nymble: a High-Performance Learning Name-finder",
568
- "authors": [
569
- {
570
- "first": "D",
571
- "middle": [
572
- "M"
573
- ],
574
- "last": "Bikel",
575
- "suffix": ""
576
- }
577
- ],
578
- "year": 1997,
579
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
580
- "volume": "",
581
- "issue": "",
582
- "pages": "194--201",
583
- "other_ids": {},
584
- "num": null,
585
- "urls": [],
586
- "raw_text": "Bikel D.M. et al. (1997) Nymble: a High-Performance Learning Name-finder. \"Proceedings of the Fifth Conference on Applied Natural Language Processing\", Morgan Kaufmann Publishers, pp. 194-201",
587
- "links": null
588
- },
589
- "BIBREF1": {
590
- "ref_id": "b1",
591
- "title": "MUC-7 Information Extraction Task Definition (version 5.1)",
592
- "authors": [
593
- {
594
- "first": "N",
595
- "middle": [],
596
- "last": "Chinchor",
597
- "suffix": ""
598
- },
599
- {
600
- "first": "E",
601
- "middle": [],
602
- "last": "Marsh",
603
- "suffix": ""
604
- }
605
- ],
606
- "year": 1998,
607
- "venue": "Proceedings of MUC-7",
608
- "volume": "",
609
- "issue": "",
610
- "pages": "",
611
- "other_ids": {},
612
- "num": null,
613
- "urls": [],
614
- "raw_text": "Chinchor N. and Marsh E. (1998) MUC-7 Information Extraction Task Definition (version 5.1), \"Proceedings of MUC-7\".",
615
- "links": null
616
- },
617
- "BIBREF2": {
618
- "ref_id": "b2",
619
- "title": "TIPSTER Architecture Design Document Version 2.3",
620
- "authors": [
621
- {
622
- "first": "R",
623
- "middle": [],
624
- "last": "Grishman",
625
- "suffix": ""
626
- }
627
- ],
628
- "year": 1997,
629
- "venue": "",
630
- "volume": "",
631
- "issue": "",
632
- "pages": "",
633
- "other_ids": {},
634
- "num": null,
635
- "urls": [],
636
- "raw_text": "Grishman R. (1997) TIPSTER Architecture Design Document Version 2.3. Technical report, DARPA",
637
- "links": null
638
- },
639
- "BIBREF3": {
640
- "ref_id": "b3",
641
- "title": "IsoQuest Inc.: Description of the NetOwl (TM) Extractor System as Used for MUC-7",
642
- "authors": [
643
- {
644
- "first": "G",
645
- "middle": [
646
- "R"
647
- ],
648
- "last": "Krupka",
649
- "suffix": ""
650
- },
651
- {
652
- "first": "K",
653
- "middle": [],
654
- "last": "Hausman",
655
- "suffix": ""
656
- }
657
- ],
658
- "year": 1998,
659
- "venue": "Proceedings of MUC-7",
660
- "volume": "",
661
- "issue": "",
662
- "pages": "",
663
- "other_ids": {},
664
- "num": null,
665
- "urls": [],
666
- "raw_text": "Krupka G.R. and Hausman K. (1998) IsoQuest Inc.: Description of the NetOwl (TM) Extractor System as Used for MUC-7, \"Proceedings of MUC-7\".",
667
- "links": null
668
- },
669
- "BIBREF4": {
670
- "ref_id": "b4",
671
- "title": "MURAX: A Robust Linguistic Approach For Question Answering Using An On-Line Encyclopaedia",
672
- "authors": [
673
- {
674
- "first": "J",
675
- "middle": [],
676
- "last": "Kupiec",
677
- "suffix": ""
678
- }
679
- ],
680
- "year": 1993,
681
- "venue": "Proceedings of SIGIR-93 93",
682
- "volume": "",
683
- "issue": "",
684
- "pages": "",
685
- "other_ids": {},
686
- "num": null,
687
- "urls": [],
688
- "raw_text": "Kupiec J. (1993) MURAX: A Robust Linguistic Approach For Question Answering Using An On-Line Encyclopaedia, \"Proceedings of SIGIR-93 93\" Pittsburgh, Penna.",
689
- "links": null
690
- },
691
- "BIBREF5": {
692
- "ref_id": "b5",
693
- "title": "Flexible Information Extraction Learning Algorithm, Final Technical Report",
694
- "authors": [
695
- {
696
- "first": "W &",
697
- "middle": [],
698
- "last": "Li",
699
- "suffix": ""
700
- },
701
- {
702
- "first": "R",
703
- "middle": [],
704
- "last": "Srihari",
705
- "suffix": ""
706
- }
707
- ],
708
- "year": 1998,
709
- "venue": "Proceedings of the Seventh Message Understanding Conference",
710
- "volume": "",
711
- "issue": "",
712
- "pages": "",
713
- "other_ids": {},
714
- "num": null,
715
- "urls": [],
716
- "raw_text": "Li, W & Srihari, R. 2000. Flexible Information Extraction Learning Algorithm, Final Technical Report, Air Force Research Laboratory, Rome Research Site, New York MUC-7 (1998) Proceedings of the Seventh Message Understanding Conference (MUC-7), published on the website _http://www.muc.saic.com/",
717
- "links": null
718
- },
719
- "BIBREF6": {
720
- "ref_id": "b6",
721
- "title": "A Domain Independent Event Extraction Toolkit",
722
- "authors": [
723
- {
724
- "first": "E",
725
- "middle": [],
726
- "last": "Roche",
727
- "suffix": ""
728
- },
729
- {
730
- "first": "Y",
731
- "middle": [],
732
- "last": "Schabes",
733
- "suffix": ""
734
- },
735
- {
736
- "first": "R",
737
- "middle": [],
738
- "last": "Srihari",
739
- "suffix": ""
740
- }
741
- ],
742
- "year": 1997,
743
- "venue": "Finite-State Language Processing",
744
- "volume": "",
745
- "issue": "",
746
- "pages": "",
747
- "other_ids": {},
748
- "num": null,
749
- "urls": [],
750
- "raw_text": "Roche E. and Schabes Y. (1997) Finite-State Language Processing, MIT Press, Cambridge, MA Srihari R. (1998) A Domain Independent Event Extraction Toolkit, AFRL-IF-RS-TR-1998-152 Final Technical Report, Air Force Research Laboratory, Rome Research Site, New York",
751
- "links": null
752
- }
753
- },
754
- "ref_entries": {
755
- "FIGREF0": {
756
- "num": null,
757
- "uris": null,
758
- "text": "Textract IE System Architecture",
759
- "type_str": "figure"
760
- },
761
- "TABREF4": {
762
- "html": null,
763
- "num": null,
764
- "content": "<table><tr><td>Process Question</td><td/></tr><tr><td colspan=\"2\">Shallow parse question</td></tr><tr><td colspan=\"2\">Determine Asking Point</td></tr><tr><td colspan=\"2\">Question expansion (using word lists)</td></tr><tr><td>Process Documents</td><td/></tr><tr><td colspan=\"2\">Tokenization, POS tagging, NE Indexing</td></tr><tr><td colspan=\"2\">Shallow Parsing (not yet utilized)</td></tr><tr><td>Text Matcher</td><td/></tr><tr><td colspan=\"2\">Intersect search engine results with NE</td></tr><tr><td>rank answers</td><td/></tr><tr><td colspan=\"2\">2.1 Question Processing</td></tr><tr><td colspan=\"2\">The Question Processing results are a list of</td></tr><tr><td colspan=\"2\">keywords plus the information for asking point.</td></tr><tr><td colspan=\"2\">For example, the question:</td></tr><tr><td/><td>The output before</td></tr><tr><td colspan=\"2\">question expansion is a simple 2-feature template</td></tr><tr><td>as shown below:</td><td/></tr><tr><td colspan=\"2\">[3] asking_point: PERSON</td></tr><tr><td>key_word:</td><td>{ won, 1998, Nobel,</td></tr><tr><td/><td>Peace, Prize }</td></tr><tr><td/><td>Question Prc~:essor</td></tr><tr><td/><td>i : :eXt i .... i</td></tr><tr><td/><td>Figure 2: Textract/QA 1.0 Prototype Architecture</td></tr><tr><td/><td>The general algorithm for question</td></tr><tr><td/><td>answering is as follows:</td></tr></table>",
765
- "type_str": "table",
766
- "text": "P r~_~ ............ ?~ i i ~ ..............................."
767
- },
768
- "TABREF7": {
769
- "html": null,
770
- "num": null,
771
- "content": "<table><tr><td colspan=\"3\">Q: Who is Julian Hill?</td></tr><tr><td>A: name:</td><td/><td colspan=\"2\">Julian Werner Hill</td></tr><tr><td>type:</td><td/><td>PERSON</td></tr><tr><td>age:</td><td/><td>91</td></tr><tr><td>gender:</td><td/><td>MALE</td></tr><tr><td>position:</td><td/><td>research chemist</td></tr><tr><td colspan=\"2\">affiliation:</td><td>Du Pont Co.</td></tr><tr><td colspan=\"2\">education:</td><td colspan=\"2\">Washington University;</td></tr><tr><td/><td/><td>MIT</td></tr><tr><td colspan=\"3\">Q: What is Du Pont?</td></tr><tr><td colspan=\"3\">A: name: Du Pont Co,</td></tr><tr><td colspan=\"3\">type: COMPANY</td></tr><tr><td colspan=\"4\">staff: Julian Hill; Wallace Carothers.</td></tr><tr><td>Questions</td><td colspan=\"2\">specifically about</td><td>a CE</td></tr><tr><td colspan=\"4\">relationship include: For which company did</td></tr><tr><td colspan=\"4\">Julian Hill work? (affiliation relationship) Who</td></tr><tr><td colspan=\"4\">are employees of Du Pont Co.? (staff</td></tr><tr><td colspan=\"4\">relationship) What does Julian Hill do?</td></tr><tr><td colspan=\"3\">(position/profession relationship)</td><td>Which</td></tr><tr><td colspan=\"4\">university did Julian Hill graduate from?</td></tr><tr><td colspan=\"3\">(education relationship), etc. 2</td></tr></table>",
772
- "type_str": "table",
773
- "text": "of the following format require CE templates as best answers: who/what is NE? For example, Who is Julian Hill? Who is Bill Clinton? What is Du Pont? What is Cymfony? To answer these questions, the system can simply 1 For example, How did one make a chocolate cake? How+Adjective questions (e.g. how long, how big, how old, etc.) are handled fairly well.retrieve the corresponding CE template to provide an \"assembled\" answer, as shown below."
774
- }
775
- }
776
- }
777
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1024.json DELETED
@@ -1,1190 +0,0 @@
1
- {
2
- "paper_id": "A00-1024",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:01.745614Z"
6
- },
7
- "title": "Categorizing Unknown Words: Using Decision Trees to Identify Names and Misspellings",
8
- "authors": [
9
- {
10
- "first": "Janine",
11
- "middle": [],
12
- "last": "Toole",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "Natural Language Laboratory",
16
- "institution": "Simon Fraser University Burnaby",
17
- "location": {
18
- "region": "BC",
19
- "country": "Canada VSA IS6"
20
- }
21
- },
22
- "email": "[email protected]"
23
- }
24
- ],
25
- "year": "",
26
- "venue": null,
27
- "identifiers": {},
28
- "abstract": "This paper introduces a system for categorizing unknown words. The system is based on a multicomponent architecture where each component is responsible for identifying one class of unknown words. The focus of this paper is the components that identify names and spelling errors. Each component uses a decision tree architecture to combine multiple types of evidence about the unknown word. The system is evaluated using data from live closed captions a genre replete with a wide variety of unknown words.",
29
- "pdf_parse": {
30
- "paper_id": "A00-1024",
31
- "_pdf_hash": "",
32
- "abstract": [
33
- {
34
- "text": "This paper introduces a system for categorizing unknown words. The system is based on a multicomponent architecture where each component is responsible for identifying one class of unknown words. The focus of this paper is the components that identify names and spelling errors. Each component uses a decision tree architecture to combine multiple types of evidence about the unknown word. The system is evaluated using data from live closed captions a genre replete with a wide variety of unknown words.",
35
- "cite_spans": [],
36
- "ref_spans": [],
37
- "eq_spans": [],
38
- "section": "Abstract",
39
- "sec_num": null
40
- }
41
- ],
42
- "body_text": [
43
- {
44
- "text": "In any real world use, a Natural Language Processing (NLP) system will encounter words that are not in its lexicon, what we term 'unknown words'. Unknown words are problematic because a NLP system will perform well only if it recognizes the words that it is meant to analyze or translate: the more words a system does not recognize the more the system's performance will degrade. Even when unknown words are infrequent, they can have a disproportionate effect on system quality. For example, Min (1996) found that while only 0.6% of words in 300 e-mails were misspelled, this meant that 12% of the sentences contained an error (discussed in (Min and Wilson, 1998) ).",
45
- "cite_spans": [
46
- {
47
- "start": 492,
48
- "end": 502,
49
- "text": "Min (1996)",
50
- "ref_id": "BIBREF12"
51
- },
52
- {
53
- "start": 641,
54
- "end": 663,
55
- "text": "(Min and Wilson, 1998)",
56
- "ref_id": "BIBREF11"
57
- }
58
- ],
59
- "ref_spans": [],
60
- "eq_spans": [],
61
- "section": "Introduction",
62
- "sec_num": null
63
- },
64
- {
65
- "text": "Words may be unknown for many reasons: the word may be a proper name, a misspelling, an abbreviation, a number, a morphological variant of a known word (e.g. recleared), or missing from the dictionary. The first step in dealing with unknown words is to identify the class of the unknown word; whether it is a misspelling, a proper name, an abbreviation etc. Once this is known, the proper action can be taken, misspellings can be corrected, abbreviations can be expanded and so on, as deemed necessary by the particular text processing application. In this paper we introduce a system for categorizing unknown words. The system is based on a multi-component architecture where each component is responsible for identifying one category of unknown words. The main focus of this paper is the components that identify names and spelling errors. Both components use a decision tree architecture to combine multiple types of evidence about the unknown word. Results from the two components are combined using a weighted voting procedure. The system is evaluated using data from live closed captions -a genre replete with a wide variety of unknown words.",
66
- "cite_spans": [],
67
- "ref_spans": [],
68
- "eq_spans": [],
69
- "section": "Introduction",
70
- "sec_num": null
71
- },
72
- {
73
- "text": "This paper is organized as follows. In section 2 we outline the overall architecture of the unknown word categorizer. The name identifier and the misspelling identifier are introduced in section 3. Performance and evaluation issues are discussed in section 4. Section 5 considers portability issues. Section 6 compares the current system with relevant preceding research. Concluding comments can be found in section 6.",
74
- "cite_spans": [],
75
- "ref_spans": [],
76
- "eq_spans": [],
77
- "section": "Introduction",
78
- "sec_num": null
79
- },
80
- {
81
- "text": "The goal of our research is to develop a system that automatically categorizes unknown words. According to our definition, an unknown word is a word that is not contained in the lexicon of an NLP system. As defined, 'unknown-ness' is a relative concept: a word that is known to one system may be unknown to another system. Our research is motivated by the problems that we have experienced in translating live closed captions: live captions are produced under tight time constraints and contain many unknown words. Typically, the caption transcriber has a five second window to transcribe the broadcast dialogue. Because of the live nature of the broadcast, there is no opportunity to post-edit the transcript in any way. Although motivated by our specific requirements, the unknown word categorizer would benefit any NLP system that encounters unknown words of differing categories. Some immediately obvious domains where unknown words are frequent include e-mail messages, internet chat rooms, data typed in by call centre operators, etc.",
82
- "cite_spans": [],
83
- "ref_spans": [],
84
- "eq_spans": [],
85
- "section": "System Architecture",
86
- "sec_num": "2"
87
- },
88
- {
89
- "text": "To deal with these issues we propose a multicomponent architecture where individual components specialize in identifying one particular type of unknown word. For example, the misspelling identifier will specialize in identifying misspellings, the abbreviation component will specialize in identifying abbreviations, etc. Each component will return a confidence measure of the reliability of its prediction, c.f. (Elworthy, 1998) . The results from each component are evaluated to determine the final category of the word.",
90
- "cite_spans": [
91
- {
92
- "start": 412,
93
- "end": 428,
94
- "text": "(Elworthy, 1998)",
95
- "ref_id": "BIBREF5"
96
- }
97
- ],
98
- "ref_spans": [],
99
- "eq_spans": [],
100
- "section": "System Architecture",
101
- "sec_num": "2"
102
- },
103
- {
104
- "text": "There are several advantages to this approach. Firstly, the system can take advantage of existing research. For example, the name recognition module can make use of the considerable research that exists on name recognition, e.g. (McDonald, 1996) , (Mani et al., 1996) . Secondly, individual components can be replaced when improved models are available, without affecting other parts of the system. Thirdly, this approach is compatible with incorporating multiple components of the same type to improve performance (cf. (van Halteren et al., 1998) who found that combining the results of several part of speech taggers increased performance).",
105
- "cite_spans": [
106
- {
107
- "start": 229,
108
- "end": 245,
109
- "text": "(McDonald, 1996)",
110
- "ref_id": "BIBREF10"
111
- },
112
- {
113
- "start": 248,
114
- "end": 267,
115
- "text": "(Mani et al., 1996)",
116
- "ref_id": "BIBREF9"
117
- },
118
- {
119
- "start": 515,
120
- "end": 547,
121
- "text": "(cf. (van Halteren et al., 1998)",
122
- "ref_id": null
123
- }
124
- ],
125
- "ref_spans": [],
126
- "eq_spans": [],
127
- "section": "System Architecture",
128
- "sec_num": "2"
129
- },
130
- {
131
- "text": "The Current System",
132
- "cite_spans": [],
133
- "ref_spans": [],
134
- "eq_spans": [],
135
- "section": "3",
136
- "sec_num": null
137
- },
138
- {
139
- "text": "In this paper we introduce a simplified version of the unknown word categorizer: one that contains just two components: misspelling identification and name identification. In this section we introduce these components and the 'decision: component which combines the results from the individual modules.",
140
- "cite_spans": [],
141
- "ref_spans": [],
142
- "eq_spans": [],
143
- "section": "3",
144
- "sec_num": null
145
- },
146
- {
147
- "text": "The goal of the name identifier is to differentiate between those unknown words which are proper names, and those which are not. We define a name as word identifying a person, place, or concept that would typically require capitalization in English. One of the motivations for the modular architecture introduced above, was to be able to leverage existing research. For example, ideally, we should be able to plug in an existing proper name recognizer and avoid the problem of creating our own. However, the domain in which we are currently operating -live closed captions -makes this approach difficult. Closed captions do not contain any case information, all captions are in upper case. Existing proper name recognizers rely heavily on case to identify names, hence they perform poorly on our data.",
148
- "cite_spans": [],
149
- "ref_spans": [],
150
- "eq_spans": [],
151
- "section": "The Name Identifier",
152
- "sec_num": "3.1"
153
- },
154
- {
155
- "text": "A second disadvantage of currently available name recognizers is that they do not generally return a confidence measure with their prediction. Some indication of confidence is required in the multicomponent architecture we have implemented. However, while currently existing name recognizers are inappropriate for the needs of our domain, future name recognizers may well meet these requirements and be able to be incorporated into the architecture we propose.",
156
- "cite_spans": [],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "The Name Identifier",
160
- "sec_num": "3.1"
161
- },
162
- {
163
- "text": "For these reasons we develop our own name identifier. We utilize a decision tree to model the characteristics of proper names. The advantage of decision trees is that they are highly explainable: one can readily understand the features that are affecting the analysis (Weiss and Indurkhya, 1998) . Furthermore, decision trees are well-suited for combining a wide variety of information.",
164
- "cite_spans": [
165
- {
166
- "start": 268,
167
- "end": 295,
168
- "text": "(Weiss and Indurkhya, 1998)",
169
- "ref_id": "BIBREF16"
170
- }
171
- ],
172
- "ref_spans": [],
173
- "eq_spans": [],
174
- "section": "The Name Identifier",
175
- "sec_num": "3.1"
176
- },
177
- {
178
- "text": "For this project, we made use of the decision tree that is part of IBM's Intelligent Miner suite for data mining. Since the point of this paper is to describe an application of decision trees rather than to argue for a particular decision tree algorithm, we omit further details of the decision tree software. Similar results should be obtained by using other decision tree software. Indeed, the results we obtain could perhaps be improved by using more sophisticated decision-tree approaches such as the adaptiveresampling described in (Weiss et al, 1999) .",
179
- "cite_spans": [
180
- {
181
- "start": 537,
182
- "end": 556,
183
- "text": "(Weiss et al, 1999)",
184
- "ref_id": "BIBREF17"
185
- }
186
- ],
187
- "ref_spans": [],
188
- "eq_spans": [],
189
- "section": "The Name Identifier",
190
- "sec_num": "3.1"
191
- },
192
- {
193
- "text": "The features that we use to train the decision tree are intended to capture the characteristics of names. We specify a total of ten features for each unknown word. These identify two features of the unknown word itself as well as two features for each of the two preceding and two following words.",
194
- "cite_spans": [],
195
- "ref_spans": [],
196
- "eq_spans": [],
197
- "section": "The Name Identifier",
198
- "sec_num": "3.1"
199
- },
200
- {
201
- "text": "The first feature represents the part of speech of the word. Vv'e use an in-house statistical tagger (based on (Church, 1988) ) to tag the text in which the unknown word occurs. The tag set used is a simplified version of the tags used in the machinereadable version of the Oxford Advanced Learners Dictionary (OALD). The tag set contains just one tag to identify nouns.",
202
- "cite_spans": [
203
- {
204
- "start": 111,
205
- "end": 125,
206
- "text": "(Church, 1988)",
207
- "ref_id": "BIBREF2"
208
- }
209
- ],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "The Name Identifier",
213
- "sec_num": "3.1"
214
- },
215
- {
216
- "text": "The second feature provides more informative tagging for specific parts of speech (these are referred to as 'detailed tags' (DETAG)). This tagset consists of the nine tags listed in Table 1 . All parts of speech apart from noun and punctuation tags are assigned the tag 'OTHER;. All punctuation tags are assigned the tag 'BOUNDARY'. Words identified as nouns are assigned one of the remaining tags depending on the information provided in the OALD (although the unknown word, by definition, will not appear in the OALD, the preceding and following words may well appear in the dictionary). If the word is identified in the OALD as a common noun it is assigned the tag 'COM'. If it is identified in the OALD as a proper name it is assigned the tag 'NAME'. If the word is specified as both a name and a common noun (e.g. 'bilF), then it is assigned the tag 'NCOM'. Pronouns are assigned the tag 'PRON'. If the word is in a list of titles that we have compiled, then the tag 'TITLE' is assigned. Similarly, if the word is a member of the class of words that can follow a name (e.g. 'jr'), then the tag 'POST ~ is assigned. A simple rule-based sys- tern is used to assign these tags. If we were dealing with data that contains case information, we would also include fields representing the existence/non-existence of initial upper case for the five words. However, since our current data does not include case information we do not include these features.",
217
- "cite_spans": [],
218
- "ref_spans": [
219
- {
220
- "start": 182,
221
- "end": 189,
222
- "text": "Table 1",
223
- "ref_id": null
224
- }
225
- ],
226
- "eq_spans": [],
227
- "section": "The Name Identifier",
228
- "sec_num": "3.1"
229
- },
230
- {
231
- "text": "The goal of the misspelling identifier is to differentiate between those unknown words which are spelling errors and those which are not. We define a misspelling as an unintended, orthographically incorrect representation (with respect to the NLP system) of a word. A misspelling differs from the intended known word through one or more additions, deletions, substitutions, or reversals of letters, or the exclusion of punctuation such as hyphenation or spacing. Like the definition of 'unknown word', the definition of a misspelling is also relative to a particular NLP system.",
232
- "cite_spans": [],
233
- "ref_spans": [],
234
- "eq_spans": [],
235
- "section": "The Misspelling Identifier",
236
- "sec_num": "3.2"
237
- },
238
- {
239
- "text": "Like the name identifier, we make use of a decision tree to capture the characteristics of misspellings. The features we use are derived from previous research, including our own previous research on misspelling identification. An abridged list of the features that are used in the training data is listed in Table 2 and discussed below.",
240
- "cite_spans": [],
241
- "ref_spans": [
242
- {
243
- "start": 309,
244
- "end": 316,
245
- "text": "Table 2",
246
- "ref_id": "TABREF1"
247
- }
248
- ],
249
- "eq_spans": [],
250
- "section": "The Misspelling Identifier",
251
- "sec_num": "3.2"
252
- },
253
- {
254
- "text": "Corpus frequency: (Vosse, 1992) differentiates between misspellings and neologisms (new words) in terms of their frequency. His algorithm classifies unknown words that appear infrequently as misspellings, and those that appear more frequently as neologisms. Our corpus frequency variable specifies the frequency of each unknown word in a 2.6 million word corpus of business news closed captions.",
255
- "cite_spans": [
256
- {
257
- "start": 18,
258
- "end": 31,
259
- "text": "(Vosse, 1992)",
260
- "ref_id": "BIBREF15"
261
- }
262
- ],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "The Misspelling Identifier",
266
- "sec_num": "3.2"
267
- },
268
- {
269
- "text": "I~'ord Length: (Agirre et al., 1998) note that their predictions for the correct spelling of misspelled words are more accurate for words longer than four characters, and much less accurate for shorter words. This observation can also be found in (Kukich, 1992) . Our word length variables measures the number of characters in each word.",
270
- "cite_spans": [
271
- {
272
- "start": 15,
273
- "end": 36,
274
- "text": "(Agirre et al., 1998)",
275
- "ref_id": "BIBREF0"
276
- },
277
- {
278
- "start": 247,
279
- "end": 261,
280
- "text": "(Kukich, 1992)",
281
- "ref_id": "BIBREF8"
282
- }
283
- ],
284
- "ref_spans": [],
285
- "eq_spans": [],
286
- "section": "The Misspelling Identifier",
287
- "sec_num": "3.2"
288
- },
289
- {
290
- "text": "Edit distance: Edit-distance is a metric for identifying the orthographic similarity of two words. Typically, one edit-distance corresponds to one substitution, deletion, reversal or addition of a character. (Damerau, 1964) observed that 80% of spelling errors in his data were just one edit-distance from the intended word. Similarly, (Mitton, 1987) found that 70% of his data was within one edit-distance from the intended word. Our edit distance feature represents the edit distance from the unknown word to the closest suggestion produced by the unix spell checker, ispell. If ispell does not produce any suggestions, an edit distance of thirty is assigned. In previous work we have experimented with more sophisticated distance measures. However, simple edit distance proved to be the most effective (Toole, 1999) .",
291
- "cite_spans": [
292
- {
293
- "start": 208,
294
- "end": 223,
295
- "text": "(Damerau, 1964)",
296
- "ref_id": "BIBREF3"
297
- },
298
- {
299
- "start": 336,
300
- "end": 350,
301
- "text": "(Mitton, 1987)",
302
- "ref_id": null
303
- },
304
- {
305
- "start": 805,
306
- "end": 818,
307
- "text": "(Toole, 1999)",
308
- "ref_id": "BIBREF13"
309
- }
310
- ],
311
- "ref_spans": [],
312
- "eq_spans": [],
313
- "section": "The Misspelling Identifier",
314
- "sec_num": "3.2"
315
- },
316
- {
317
- "text": "Character sequence frequency: A characteristic of some misspellings is that they contain character sequences which are not typical of the language, e.g.tlted, wful. Exploiting this information is a standard way of identifying spelling errors when using a dictionary is not desired or appropriate, e.g. (Hull and Srihari, 1982) , (Zamora et al., 1981) .",
318
- "cite_spans": [
319
- {
320
- "start": 302,
321
- "end": 326,
322
- "text": "(Hull and Srihari, 1982)",
323
- "ref_id": "BIBREF7"
324
- },
325
- {
326
- "start": 329,
327
- "end": 350,
328
- "text": "(Zamora et al., 1981)",
329
- "ref_id": "BIBREF18"
330
- }
331
- ],
332
- "ref_spans": [],
333
- "eq_spans": [],
334
- "section": "The Misspelling Identifier",
335
- "sec_num": "3.2"
336
- },
337
- {
338
- "text": "To calculate our character sequence feature, we firstly determine the frequencies of the two least frequent character tri-gram sequences in the word in each of a selection of corpora. In previous work we included each of these values as individual features. However, the resulting trees were quite unstable as one feature would be relevant to one tree, whereas a different character sequence feature would be relevant to another tree. To avoid this problem, we developed a composite feature that is the sum of all individual character sequence frequencies.",
339
- "cite_spans": [],
340
- "ref_spans": [],
341
- "eq_spans": [],
342
- "section": "The Misspelling Identifier",
343
- "sec_num": "3.2"
344
- },
345
- {
346
- "text": "This binary feature specifies whether a word contains a character that is not typical of English words, such as accented characters, etc. Such characters are indicative of foreign names or transmission noise (in the case of captions) rather than misspellings.",
347
- "cite_spans": [],
348
- "ref_spans": [],
349
- "eq_spans": [],
350
- "section": "Non-English characters:",
351
- "sec_num": null
352
- },
353
- {
354
- "text": "The misspelling identifier and the name identifier will each return a prediction for an unknown word. In cases where the predictions are compatible, e.g. where the name identifier predicts that it is a name and the spelling identifier predicts that it is not a misspelling, then the decision is straightforward. Similarly, if both decision trees make negative predictions, then we can assume that the unknown word is neither a misspelling nor a name, but some other category of unknown word. However, it is also possible that both the spelling identifier and the name identifier will make positive predictions. In these cases we need a mechanism to decide which assignment is upheld. For the purposes of this paper, we make use of a simple heuristic where in the case of two positive predictions the one with the highest confidence measure is accepted. The decision trees return a confidence measure for each leaf of the tree. The confidence measure for a particular leaf is calculated from the training data and corresponds to the proportion of correct predictions over the total number of predictions at this leaf.",
355
- "cite_spans": [],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "Decision Making Component",
359
- "sec_num": "3.3"
360
- },
361
- {
362
- "text": "In this section we evaluate the unknown word categorizer introduced above. We begin by describing the training and test data. Following this, we evaluate the individual components and finally, we evaluate the decision making component.",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Evaluation",
367
- "sec_num": "4"
368
- },
369
- {
370
- "text": "The training and test data for the decision tree consists of 7000 cases of unknown words extracted from a 2.6 million word corpus of live business news captions. Of the 7000 cases, 70.4% were manually identified as names and 21.3% were identified as misspellings.The remaining cases were other types of unknown words such as abbreviations, morphological variants, etc. Seventy percent of the data was randomly selected to serve as the training corpus. The remaining thirty percent, or 2100 records, was reserved as the test corpus. The test data consists of ten samples of 2100 records selected randomly with replacement from the test corpus.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Evaluation",
375
- "sec_num": "4"
376
- },
377
- {
378
- "text": "We now consider the results of training a decision tree to identify misspellings using those features we introduced in the section on the misspelling identifier. The tree was trained on the training data described above. The tree was evaluated using each of the ten test data sets. The average precision and recall data for the ten test sets are given in Table 3, together with the base-line case of assuming that we categorize all unknown words as names (the most common category). With the baseline case we achieve 70.4% precision but with 0% recall. In contrast, the decision tree approach obtains 77.1% precision and 73.8% recall.",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Evaluation",
383
- "sec_num": "4"
384
- },
385
- {
386
- "text": "We also trained a decision tree using not only the features identified in our discussion on misspellings but also those features that we introduced in our discussion of name identification. The results for this tree can be found in the second line of Table 3 . The inclusion of the additional features has increased precision by approximately 5%. However, it has also decreased recall by about the same amount. The overall F-score is quite similar. It appears that the name features are not predictive for identifying misspellings in this domain. This is not surprising considering that eight of the ten features specified for name identification are concerned with features of the two preceding and two following words. Such word-external information is of little use in identifying a misspelling.",
387
- "cite_spans": [],
388
- "ref_spans": [
389
- {
390
- "start": 251,
391
- "end": 259,
392
- "text": "Table 3",
393
- "ref_id": null
394
- }
395
- ],
396
- "eq_spans": [],
397
- "section": "Evaluation",
398
- "sec_num": "4"
399
- },
400
- {
401
- "text": "An analysis of the cases where the misspelling decision tree failed to identify a misspelling revealed two major classes of omissions. The first class contains a collection of words which have typical characteristics of English words, but differ from the intended word by the addition or deletion of a syllable.",
402
- "cite_spans": [],
403
- "ref_spans": [],
404
- "eq_spans": [],
405
- "section": "Evaluation",
406
- "sec_num": "4"
407
- },
408
- {
409
- "text": "Words in this class include creditability for credibility, coordmatored for coordinated, and representires for representatives. The second class contains misspellings that differ from known words by the deletion of a blank. Examples in this class include webpage, crewmembers, and rainshower. The second class of misspellings can be addressed by adding a feature that specifies whether the unknown word can be split up into two component known words. Such a feature should provide strong predictability for the second class of words. The first class of words are more of a challenge. These words have a close homophonic relationship with the intended word rather than a close homographic relationship (as captured by edit distance). Perhaps this class of words would benefit from a feature representing phonetic distance rather than edit distance.",
410
- "cite_spans": [],
411
- "ref_spans": [],
412
- "eq_spans": [],
413
- "section": "Evaluation",
414
- "sec_num": "4"
415
- },
416
- {
417
- "text": "Among those words which were incorrectly identified as misspellings, it is also possible to identify common causes for the misidentification. Among these words are many foreign words which have character sequences which are not common in English. Examples include khanehanalak, phytopla~2kton, brycee1~.",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "Evaluation",
422
- "sec_num": "4"
423
- },
424
- {
425
- "text": "The results for our name identifier are given in Table 4 . Again, the decision tree approach is a significant improvement over the baseline case. If we take the baseline approach and assume that all unknown words are names, then we would achieve a precision of 70.4%. However, using the decision tree approach, we obtain 86.5% precision and 92.9% recall.",
426
- "cite_spans": [],
427
- "ref_spans": [
428
- {
429
- "start": 49,
430
- "end": 56,
431
- "text": "Table 4",
432
- "ref_id": null
433
- }
434
- ],
435
- "eq_spans": [],
436
- "section": "Evaluation",
437
- "sec_num": "4"
438
- },
439
- {
440
- "text": "We also trained a tree using both the name and misspelling features. The results can be found in the second line of Table 4 . Unlike the case when we trained the misspelling identifier on all the features, the extended tree for the name identifier provides increased recall as well as increased precision. Unlike the case with the misspelling decision-tree, the misspelling-identification features do provide predictive information for name identification. If we review the features, this result seems quite reasonable: features such as corpus frequency and non-English characters can provide evidence for/against name iden- A review of the errors made by the name identifier again provides insight for future development. Among those unknown words that are names but which were not identified as such are predominantly names that can (and did) appear with determiners.",
441
- "cite_spans": [],
442
- "ref_spans": [
443
- {
444
- "start": 116,
445
- "end": 123,
446
- "text": "Table 4",
447
- "ref_id": null
448
- }
449
- ],
450
- "eq_spans": [],
451
- "section": "Evaluation",
452
- "sec_num": "4"
453
- },
454
- {
455
- "text": "Examples of this class include steelers in the steelers, and pathfinder in the pathfinder. Hence, the name identifier seems adept at finding the names of individual people and places, which typically cannot be combined with determiners. But, the name identifier has more problems with names that have similar distributions to common nouns.",
456
- "cite_spans": [],
457
- "ref_spans": [],
458
- "eq_spans": [],
459
- "section": "Evaluation",
460
- "sec_num": "4"
461
- },
462
- {
463
- "text": "The cases where the name identifier incorrectly identifies unknown words as names also have identifiable characteristics. These examples mostly include words with unusual character sequences such as the misspellings sxetion and fwlamg. No doubt these have similar characteristics to foreign names. As the misidentified words are also correctly identified as misspellings by the misspelling identifier, these are less problematic. It is the task of the decisionmaking component to resolve issues such as these.",
464
- "cite_spans": [],
465
- "ref_spans": [],
466
- "eq_spans": [],
467
- "section": "Evaluation",
468
- "sec_num": "4"
469
- },
470
- {
471
- "text": "The final results we include are for the unknown word categorizer itself using the voting procedure outlined in previous discussion. As introduced previously, confidence measure is used as a tie-breaker in cases where the two components make positive decision. We evaluate the categorizer using precision and recall metrics. The precision metric identifies the number of correct misspelling or name categorizations over the total number of times a word was identified as a misspelling or a name. The recall metric identifies the number of times the system correctly identifies a misspelling or name over the number of misspellings and names existing in the data. As illustrated in Table 5 , the unknown word categorizer achieves 86% precision and 89.9% recall on the task of identifying names and misspellings.",
472
- "cite_spans": [],
473
- "ref_spans": [
474
- {
475
- "start": 681,
476
- "end": 688,
477
- "text": "Table 5",
478
- "ref_id": null
479
- }
480
- ],
481
- "eq_spans": [],
482
- "section": "Evaluation",
483
- "sec_num": "4"
484
- },
485
- {
486
- "text": "An examination of the confusion matrix of the tiebreaker decisions is also revealing. We include the confusion matrix for one test data set in Table 6 . Firstly, in only about 5% of the cases was it necessary to revert to confidence measure to determine the category of the unknown word. In all other cases the predictions were compatible. Secondly, in the majority of cases the decision-maker rules in favour of the name prediction. In hindsight this is not surprising since the name decision tree has higher resuits and hence is likely to have higher confidence measures.",
487
- "cite_spans": [],
488
- "ref_spans": [
489
- {
490
- "start": 143,
491
- "end": 150,
492
- "text": "Table 6",
493
- "ref_id": null
494
- }
495
- ],
496
- "eq_spans": [],
497
- "section": "Evaluation",
498
- "sec_num": "4"
499
- },
500
- {
501
- "text": "A review of the largest error category in this confusion matrix is also insightful. These are cases where the decision-maker classifies the unknown word as a name when it should be a misspelling (37 cases). The words in this category are typically examples where the misspelled word has a phonetic relationship with the intended word. For example, temt for tempt, floyda for florida, and dimow part of the intended word democrat. Not surprisingly, it was these types of words which were identified as problematic for the current misspelling identifier. Augmenting the misspelling identifier with features to identify these types of misspellings should also lead to improvement in the decision-maker.",
502
- "cite_spans": [],
503
- "ref_spans": [],
504
- "eq_spans": [],
505
- "section": "Evaluation",
506
- "sec_num": "4"
507
- },
508
- {
509
- "text": "We find these results encouraging: they indicate that the approach we are taking is productive. Our future work will focus on three fronts. Firstly, we will improve our existing components by developing further features which are sensitive to the distinction between names and misspellings. The discussion in this section has indicated several promising directions. Secondly, we will develop components to identify the remaining types of unknown words, such as abbreviations, morphological variants, etc. Thirdly, we will experiment with alternative decision-making processes.",
510
- "cite_spans": [],
511
- "ref_spans": [],
512
- "eq_spans": [],
513
- "section": "Evaluation",
514
- "sec_num": "4"
515
- },
516
- {
517
- "text": "In this paper we have introduced a means for identifying names and misspellings from among other types of unknown words and have illustrated the process using the domain of closed captions. Although not explicitly specified, one of the goals of the research has been to develop an approach that will be portable to new domains and languages.",
518
- "cite_spans": [],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "Examining Portability",
522
- "sec_num": "5"
523
- },
524
- {
525
- "text": "We are optimistic that the approach we have developed is portable. The system that we have developed requires very little in terms of linguistic resources. Apart from a corpus of the new domain and language, the only other requirements are some means of generating spelling suggestions (ispell is available for many languages) and a part-of-speech tagger. Table 5 : Precision and recall for decision-making component more information sources are available, then these can be readily included in the information provided to the decision tree training algorithm. For many languages, the features used in the unknown word categorizer may well be sufficient. However, the features used do make some assumptions about the nature of the writing system used. For example, the edit distance feature in the misspelling identifier assumes that words consist of alphabetic characters which have undergone substitution/addition/deletion. However, this feature will be less useful in a language such as Japanese or Chinese which use ideographic characters. However, while the exact features used in this paper may be inappropriate for a given language, we believe the generM approach is transferable. In the case of a language such as Japanese, one would consider the means by which misspellings differ from their intended word and identify features to capture these differences.",
526
- "cite_spans": [],
527
- "ref_spans": [
528
- {
529
- "start": 356,
530
- "end": 363,
531
- "text": "Table 5",
532
- "ref_id": null
533
- }
534
- ],
535
- "eq_spans": [],
536
- "section": "Examining Portability",
537
- "sec_num": "5"
538
- },
539
- {
540
- "text": "There is little research that has focused on differentiating the different types of unknown words. For example, research on spelling error detection and correction for the most part assumes that all unknown words are misspellings and makes no attempt to identify other types of unknown words, e.g. (Elmi and Evens, 1998) . Naturally, these are not appropriate comparisons for the work reported here. However, as is evident from the discussion above, previous spelling research does provide an important role in suggesting productive features to include in the decision tree.",
541
- "cite_spans": [
542
- {
543
- "start": 298,
544
- "end": 320,
545
- "text": "(Elmi and Evens, 1998)",
546
- "ref_id": "BIBREF4"
547
- }
548
- ],
549
- "ref_spans": [],
550
- "eq_spans": [],
551
- "section": "Related Research",
552
- "sec_num": "6"
553
- },
554
- {
555
- "text": "Research that is more similar in goal to that outlined in this paper is Vosse (Vosse, 1992) . Vosse uses a simple algorithm to identify three classes of unknown words: misspellings, neologisms, and names. Capitalization is his sole means of identifying names. However, capitalization information is not available in closed captions. Hence, his system would be ineffective on the closed caption domain with which we are working. (Granger, 1983) uses expectations generated by scripts to anMyze unknown words. The drawback of his system is that it lacks portability since it incorporates scripts that make use of world knowledge of the situation being described; in this case, naval ship-to-shore messages.",
556
- "cite_spans": [
557
- {
558
- "start": 78,
559
- "end": 91,
560
- "text": "(Vosse, 1992)",
561
- "ref_id": "BIBREF15"
562
- },
563
- {
564
- "start": 428,
565
- "end": 443,
566
- "text": "(Granger, 1983)",
567
- "ref_id": "BIBREF6"
568
- }
569
- ],
570
- "ref_spans": [],
571
- "eq_spans": [],
572
- "section": "Related Research",
573
- "sec_num": "6"
574
- },
575
- {
576
- "text": "Research that is similar in technique to that reported here is (Baluja et al., 1999) . Baluja and his colleagues use a decision tree classifier to identify proper names in text. They incorporate three types of features: word level (essentially utilizes case information), dictionary-level (comparable to our ispell feature), and POS information (comparable to our POS tagging). Their highest F-score for name identification is 95.2, slightly higher than our name identifier. However, it is difficult to compare the two sets of results since our tasks are slightly different. The goal of Baluja's research, and all other proper name identification research, is to identify all those words and phrases in the text which are proper names. Our research, on the other hand, is not concerned with all text, but only those words which are unknown. Also preventing comparison is the type of data that we deal with. Baluja's data contains case information whereas ours does not-the lack of case information makes name identification significantly more difficult. Indeed, Baluja's results when they exclude their word-level (case) features are significantly lower: a maximum F-score of 79.7.",
577
- "cite_spans": [
578
- {
579
- "start": 63,
580
- "end": 84,
581
- "text": "(Baluja et al., 1999)",
582
- "ref_id": "BIBREF1"
583
- }
584
- ],
585
- "ref_spans": [],
586
- "eq_spans": [],
587
- "section": "Related Research",
588
- "sec_num": "6"
589
- },
590
- {
591
- "text": "In this paper we have introduced an unknown word eategorizer that can identify misspellings and names. The unknown word categorizer consists of individual components, each of which specialize in identifying a particular class of unknown word. The two existing components are implemented as decision trees. The system provides encouraging results when evaluated against a particularly challenging domain: transcripts from live closed captions.",
592
- "cite_spans": [],
593
- "ref_spans": [],
594
- "eq_spans": [],
595
- "section": "Conclusion",
596
- "sec_num": "7"
597
- },
598
- {
599
- "text": "Predicted Spelling Predicted Name Neither name nor misspelling 0 6 Misspelling 10 37 Name 4 43 Table 6 : Confusion matrix for decision maker: includes only those examples where both components made a positive prediction.",
600
- "cite_spans": [],
601
- "ref_spans": [
602
- {
603
- "start": 95,
604
- "end": 102,
605
- "text": "Table 6",
606
- "ref_id": null
607
- }
608
- ],
609
- "eq_spans": [],
610
- "section": "Conclusion",
611
- "sec_num": "7"
612
- }
613
- ],
614
- "back_matter": [],
615
- "bib_entries": {
616
- "BIBREF0": {
617
- "ref_id": "b0",
618
- "title": "Towards a single proposal in spelling correction",
619
- "authors": [
620
- {
621
- "first": "K",
622
- "middle": [],
623
- "last": "Agirre",
624
- "suffix": ""
625
- },
626
- {
627
- "first": "K",
628
- "middle": [],
629
- "last": "Gojenola",
630
- "suffix": ""
631
- },
632
- {
633
- "first": "A",
634
- "middle": [],
635
- "last": "Sarasola",
636
- "suffix": ""
637
- },
638
- {
639
- "first": "",
640
- "middle": [],
641
- "last": "Voutilainen",
642
- "suffix": ""
643
- }
644
- ],
645
- "year": 1998,
646
- "venue": "Proceedings of the 36th Ammal Meeting of the ACL and the 17th International",
647
- "volume": "",
648
- "issue": "",
649
- "pages": "",
650
- "other_ids": {},
651
- "num": null,
652
- "urls": [],
653
- "raw_text": "Agirre, K. Gojenola, K. Sarasola, , and A. Vouti- lainen. 1998. Towards a single proposal in spelling correction. In Proceedings of the 36th Ammal Meeting of the ACL and the 17th International",
654
- "links": null
655
- },
656
- "BIBREF1": {
657
- "ref_id": "b1",
658
- "title": "Applying machine learning for high performance named-entity extraction",
659
- "authors": [
660
- {
661
- "first": "S",
662
- "middle": [],
663
- "last": "Baluja",
664
- "suffix": ""
665
- },
666
- {
667
- "first": "V",
668
- "middle": [],
669
- "last": "Mittal",
670
- "suffix": ""
671
- },
672
- {
673
- "first": "R",
674
- "middle": [],
675
- "last": "Sukthankar",
676
- "suffix": ""
677
- }
678
- ],
679
- "year": 1999,
680
- "venue": "Proceedings of the Colzference of the Pacific Association for Computational Linguistics",
681
- "volume": "",
682
- "issue": "",
683
- "pages": "365--378",
684
- "other_ids": {},
685
- "num": null,
686
- "urls": [],
687
- "raw_text": "S. Baluja, V. Mittal, and R.. Sukthankar. 1999. Applying machine learning for high performance named-entity extraction. In Proceedings of the Colzference of the Pacific Association for Com- putational Linguistics , pages 365-378.",
688
- "links": null
689
- },
690
- "BIBREF2": {
691
- "ref_id": "b2",
692
- "title": "A stochastic parts program and noun phrase parser for unrestricted text",
693
- "authors": [
694
- {
695
- "first": "K",
696
- "middle": [],
697
- "last": "Church",
698
- "suffix": ""
699
- }
700
- ],
701
- "year": 1988,
702
- "venue": "Proceedings of the Second Conference on Applied Natural Language Processing",
703
- "volume": "",
704
- "issue": "",
705
- "pages": "136--143",
706
- "other_ids": {},
707
- "num": null,
708
- "urls": [],
709
- "raw_text": "K. Church 1988. A stochastic parts program and noun phrase parser for unrestricted text. In Pro- ceedings of the Second Conference on Applied Nat- ural Language Processing, pages 136-143.",
710
- "links": null
711
- },
712
- "BIBREF3": {
713
- "ref_id": "b3",
714
- "title": "A technique for computer detection and correction of spelling errors",
715
- "authors": [
716
- {
717
- "first": "F",
718
- "middle": [],
719
- "last": "Damerau",
720
- "suffix": ""
721
- }
722
- ],
723
- "year": 1964,
724
- "venue": "Communications of the ACM",
725
- "volume": "7",
726
- "issue": "",
727
- "pages": "171--176",
728
- "other_ids": {},
729
- "num": null,
730
- "urls": [],
731
- "raw_text": "F. Damerau. 1964. A technique for computer detec- tion and correction of spelling errors. Communi- cations of the ACM, 7:171-176.",
732
- "links": null
733
- },
734
- "BIBREF4": {
735
- "ref_id": "b4",
736
- "title": "Spelling correction using context",
737
- "authors": [
738
- {
739
- "first": "M",
740
- "middle": [],
741
- "last": "Elmi",
742
- "suffix": ""
743
- },
744
- {
745
- "first": "M",
746
- "middle": [],
747
- "last": "Evens",
748
- "suffix": ""
749
- }
750
- ],
751
- "year": 1998,
752
- "venue": "Proceedings of the 36th Annual Meeting of the A CL and the 17th hlternational Collference on Computational Linguistics",
753
- "volume": "",
754
- "issue": "",
755
- "pages": "360--364",
756
- "other_ids": {},
757
- "num": null,
758
- "urls": [],
759
- "raw_text": "M. Elmi and M. Evens. 1998. Spelling correction using context. In Proceedings of the 36th Annual Meeting of the A CL and the 17th hlternational Collference on Computational Linguistics, pages 360-364.",
760
- "links": null
761
- },
762
- "BIBREF5": {
763
- "ref_id": "b5",
764
- "title": "Language identification with confidence limits",
765
- "authors": [
766
- {
767
- "first": "D",
768
- "middle": [],
769
- "last": "Elworthy",
770
- "suffix": ""
771
- }
772
- ],
773
- "year": 1998,
774
- "venue": "Proceedings of the 6th Workshop on Very large Corpora",
775
- "volume": "",
776
- "issue": "",
777
- "pages": "",
778
- "other_ids": {},
779
- "num": null,
780
- "urls": [],
781
- "raw_text": "D. Elworthy. 1998. Language identification with confidence limits. In Proceedings of the 6th Work- shop on Very large Corpora.",
782
- "links": null
783
- },
784
- "BIBREF6": {
785
- "ref_id": "b6",
786
- "title": "The nomad system: expectationbased detection and correction of errors during understanding of syntactically and semantically illformed text",
787
- "authors": [
788
- {
789
- "first": "R",
790
- "middle": [],
791
- "last": "Granger",
792
- "suffix": ""
793
- }
794
- ],
795
- "year": 1983,
796
- "venue": "American Journal of Computational Linguistics",
797
- "volume": "9",
798
- "issue": "",
799
- "pages": "188--198",
800
- "other_ids": {},
801
- "num": null,
802
- "urls": [],
803
- "raw_text": "R. Granger. 1983. The nomad system: expectation- based detection and correction of errors during un- derstanding of syntactically and semantically ill- formed text. American Journal of Computational Linguistics, 9:188-198.",
804
- "links": null
805
- },
806
- "BIBREF7": {
807
- "ref_id": "b7",
808
- "title": "Experiments in text recognition with binary n-gram and viterbi algorithms",
809
- "authors": [
810
- {
811
- "first": "J",
812
- "middle": [],
813
- "last": "Hull",
814
- "suffix": ""
815
- },
816
- {
817
- "first": "S",
818
- "middle": [],
819
- "last": "Srihari",
820
- "suffix": ""
821
- }
822
- ],
823
- "year": 1982,
824
- "venue": "IEEE Trans. Patt. Anal. Machine b~tell. PAMI-4",
825
- "volume": "5",
826
- "issue": "",
827
- "pages": "520--530",
828
- "other_ids": {},
829
- "num": null,
830
- "urls": [],
831
- "raw_text": "J. Hull and S. Srihari. 1982. Experiments in text recognition with binary n-gram and viterbi algo- rithms. IEEE Trans. Patt. Anal. Machine b~tell. PAMI-4, 5:520-530.",
832
- "links": null
833
- },
834
- "BIBREF8": {
835
- "ref_id": "b8",
836
- "title": "Techniques for automatically correcting words in text",
837
- "authors": [
838
- {
839
- "first": "K",
840
- "middle": [],
841
- "last": "Kukich",
842
- "suffix": ""
843
- }
844
- ],
845
- "year": 1992,
846
- "venue": "ACM Computing Surveys",
847
- "volume": "24",
848
- "issue": "",
849
- "pages": "377--439",
850
- "other_ids": {},
851
- "num": null,
852
- "urls": [],
853
- "raw_text": "K.. Kukich. 1992. Techniques for automatically cor- recting words in text. ACM Computing Surveys, 24:377-439.",
854
- "links": null
855
- },
856
- "BIBREF9": {
857
- "ref_id": "b9",
858
- "title": "Corpus Processing for Lexical Acquisition, chapter Identifying unknown proper names in newswire text",
859
- "authors": [
860
- {
861
- "first": "I",
862
- "middle": [],
863
- "last": "Mani",
864
- "suffix": ""
865
- },
866
- {
867
- "first": "R",
868
- "middle": [],
869
- "last": "Mcmillan",
870
- "suffix": ""
871
- },
872
- {
873
- "first": "S",
874
- "middle": [],
875
- "last": "Luperfoy",
876
- "suffix": ""
877
- },
878
- {
879
- "first": "E",
880
- "middle": [],
881
- "last": "Lusher",
882
- "suffix": ""
883
- },
884
- {
885
- "first": "S",
886
- "middle": [],
887
- "last": "Laskowski",
888
- "suffix": ""
889
- }
890
- ],
891
- "year": 1996,
892
- "venue": "",
893
- "volume": "",
894
- "issue": "",
895
- "pages": "",
896
- "other_ids": {},
897
- "num": null,
898
- "urls": [],
899
- "raw_text": "I. Mani, R. McMillan, S. Luperfoy, E. Lusher, and S. Laskowski, 1996. Corpus Processing for Lexical Acquisition, chapter Identifying unknown proper names in newswire text. MIT Press, Cambridge.",
900
- "links": null
901
- },
902
- "BIBREF10": {
903
- "ref_id": "b10",
904
- "title": "Corpus Processing for Lexical Acquisition, chapter Internal and external evidence in the identification and semantic categorization of proper names",
905
- "authors": [
906
- {
907
- "first": "D",
908
- "middle": [],
909
- "last": "Mcdonald",
910
- "suffix": ""
911
- }
912
- ],
913
- "year": 1996,
914
- "venue": "",
915
- "volume": "",
916
- "issue": "",
917
- "pages": "",
918
- "other_ids": {},
919
- "num": null,
920
- "urls": [],
921
- "raw_text": "D. McDonald, 1996. Corpus Processing for Lexi- cal Acquisition, chapter Internal and external ev- idence in the identification and semantic catego- rization of proper names. MIT Press, Cambridge.",
922
- "links": null
923
- },
924
- "BIBREF11": {
925
- "ref_id": "b11",
926
- "title": "Integrated control of chart items for error repair",
927
- "authors": [
928
- {
929
- "first": "K",
930
- "middle": [],
931
- "last": "Min",
932
- "suffix": ""
933
- },
934
- {
935
- "first": "W",
936
- "middle": [],
937
- "last": "Wilson",
938
- "suffix": ""
939
- }
940
- ],
941
- "year": 1998,
942
- "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and the 17th hlternational Conferet~ce on Computational Linguistics",
943
- "volume": "",
944
- "issue": "",
945
- "pages": "",
946
- "other_ids": {},
947
- "num": null,
948
- "urls": [],
949
- "raw_text": "K. Min and W. Wilson. 1998. Integrated control of chart items for error repair. In Proceedings of the 36th Annual Meeting of the Association for Com- putational Linguistics and the 17th hlternational Conferet~ce on Computational Linguistics.",
950
- "links": null
951
- },
952
- "BIBREF12": {
953
- "ref_id": "b12",
954
- "title": "Spelling checkers, spelling coffeetots, and the misspellings of poor spellers",
955
- "authors": [
956
- {
957
- "first": "K",
958
- "middle": [],
959
- "last": "Min",
960
- "suffix": ""
961
- }
962
- ],
963
- "year": 1987,
964
- "venue": "Inf. Process. Manage",
965
- "volume": "23",
966
- "issue": "",
967
- "pages": "495--505",
968
- "other_ids": {},
969
- "num": null,
970
- "urls": [],
971
- "raw_text": "K. Min. 1996. Hierarchical Error Re.covery Based on Bidirectional Chart Parsing Techniques. Ph.D. thesis, University of NSW, Sydney, Australia. R. Mitton. 1987. Spelling checkers, spelling coffee- tots, and the misspellings of poor spellers. Inf. Process. Manage, 23:495-505.",
972
- "links": null
973
- },
974
- "BIBREF13": {
975
- "ref_id": "b13",
976
- "title": "Categorizing Unknown Words: A decision tree-based misspelling identifier",
977
- "authors": [
978
- {
979
- "first": "J",
980
- "middle": [],
981
- "last": "Toole",
982
- "suffix": ""
983
- }
984
- ],
985
- "year": 1999,
986
- "venue": "Advanced Topics in Artificial h2telligence",
987
- "volume": "",
988
- "issue": "",
989
- "pages": "122--133",
990
- "other_ids": {},
991
- "num": null,
992
- "urls": [],
993
- "raw_text": "J. Toole 1999 Categorizing Unknown Words: A de- cision tree-based misspelling identifier In Foo, N (ed.) Advanced Topics in Artificial h2telligence, pages 122-133.",
994
- "links": null
995
- },
996
- "BIBREF14": {
997
- "ref_id": "b14",
998
- "title": "Improving data driven word class tagging by system combination",
999
- "authors": [
1000
- {
1001
- "first": "H",
1002
- "middle": [],
1003
- "last": "Van Halteren",
1004
- "suffix": ""
1005
- },
1006
- {
1007
- "first": "J",
1008
- "middle": [],
1009
- "last": "Zavrel",
1010
- "suffix": ""
1011
- },
1012
- {
1013
- "first": "W",
1014
- "middle": [],
1015
- "last": "Daelemans",
1016
- "suffix": ""
1017
- }
1018
- ],
1019
- "year": 1998,
1020
- "venue": "Proceedings of the 36th Annual Meeting of the ACL and the 17th International Conference on Computational Linguistics",
1021
- "volume": "",
1022
- "issue": "",
1023
- "pages": "491--497",
1024
- "other_ids": {},
1025
- "num": null,
1026
- "urls": [],
1027
- "raw_text": "H. van Halteren, J. Zavrel, and W. Daelemans. 1998. Improving data driven word class tagging by sys- tem combination. In Proceedings of the 36th An- nual Meeting of the ACL and the 17th Interna- tional Conference on Computational Linguistics, pages 491-497.",
1028
- "links": null
1029
- },
1030
- "BIBREF15": {
1031
- "ref_id": "b15",
1032
- "title": "Detecting and correcting morphosyntactic errors in real texts",
1033
- "authors": [
1034
- {
1035
- "first": "T",
1036
- "middle": [],
1037
- "last": "Vosse",
1038
- "suffix": ""
1039
- }
1040
- ],
1041
- "year": 1992,
1042
- "venue": "Proceedin9s of the 3rd Conference o11 Applied Natural Language Processing",
1043
- "volume": "",
1044
- "issue": "",
1045
- "pages": "111--118",
1046
- "other_ids": {},
1047
- "num": null,
1048
- "urls": [],
1049
- "raw_text": "T. Vosse. 1992. Detecting and correcting morpho- syntactic errors in real texts. In Proceedin9s of the 3rd Conference o11 Applied Natural Language Processing, pages 111-118.",
1050
- "links": null
1051
- },
1052
- "BIBREF16": {
1053
- "ref_id": "b16",
1054
- "title": "Predictive Data Mining",
1055
- "authors": [
1056
- {
1057
- "first": "S",
1058
- "middle": [],
1059
- "last": "Weiss",
1060
- "suffix": ""
1061
- },
1062
- {
1063
- "first": "N",
1064
- "middle": [],
1065
- "last": "Indurkhya",
1066
- "suffix": ""
1067
- }
1068
- ],
1069
- "year": 1998,
1070
- "venue": "",
1071
- "volume": "",
1072
- "issue": "",
1073
- "pages": "",
1074
- "other_ids": {},
1075
- "num": null,
1076
- "urls": [],
1077
- "raw_text": "S. Weiss and N. Indurkhya. 1998. Predictive Data Mining. Morgan Kauffman Publishers.",
1078
- "links": null
1079
- },
1080
- "BIBREF17": {
1081
- "ref_id": "b17",
1082
- "title": "Maximizing text-mining performance",
1083
- "authors": [
1084
- {
1085
- "first": "S",
1086
- "middle": [],
1087
- "last": "Weiss",
1088
- "suffix": ""
1089
- },
1090
- {
1091
- "first": "C",
1092
- "middle": [],
1093
- "last": "Apte",
1094
- "suffix": ""
1095
- },
1096
- {
1097
- "first": "F",
1098
- "middle": [],
1099
- "last": "Damerau",
1100
- "suffix": ""
1101
- },
1102
- {
1103
- "first": "D",
1104
- "middle": [],
1105
- "last": "Johnson",
1106
- "suffix": ""
1107
- },
1108
- {
1109
- "first": "F",
1110
- "middle": [],
1111
- "last": "Oles",
1112
- "suffix": ""
1113
- },
1114
- {
1115
- "first": "T",
1116
- "middle": [],
1117
- "last": "Goetz",
1118
- "suffix": ""
1119
- },
1120
- {
1121
- "first": "T",
1122
- "middle": [],
1123
- "last": "Hampp",
1124
- "suffix": ""
1125
- }
1126
- ],
1127
- "year": 1999,
1128
- "venue": "IEEE Intelligent Systems and their Applications",
1129
- "volume": "14",
1130
- "issue": "4",
1131
- "pages": "63--69",
1132
- "other_ids": {},
1133
- "num": null,
1134
- "urls": [],
1135
- "raw_text": "S. Weiss, and C. Apte, and F. Damerau, and D. Johnson, and F. Oles and T. Goetz, and T. Hampp. 1999 Maximizing text-mining per- formance. IEEE Intelligent Systems and their Applications, 14(4):63-69",
1136
- "links": null
1137
- },
1138
- "BIBREF18": {
1139
- "ref_id": "b18",
1140
- "title": "The use of tri-gram analysis for spelling error detection. he",
1141
- "authors": [
1142
- {
1143
- "first": "E",
1144
- "middle": [],
1145
- "last": "Zamora",
1146
- "suffix": ""
1147
- },
1148
- {
1149
- "first": "J",
1150
- "middle": [],
1151
- "last": "Pollock",
1152
- "suffix": ""
1153
- },
1154
- {
1155
- "first": "A",
1156
- "middle": [],
1157
- "last": "Zamora",
1158
- "suffix": ""
1159
- }
1160
- ],
1161
- "year": 1981,
1162
- "venue": "Process. Manage",
1163
- "volume": "17",
1164
- "issue": "",
1165
- "pages": "305--316",
1166
- "other_ids": {},
1167
- "num": null,
1168
- "urls": [],
1169
- "raw_text": "E. Zamora, J. Pollock, and A. Zamora. 1981. The use of tri-gram analysis for spelling error detec- tion. he Process. Manage., 17:305-316.",
1170
- "links": null
1171
- }
1172
- },
1173
- "ref_entries": {
1174
- "TABREF1": {
1175
- "content": "<table/>",
1176
- "text": "",
1177
- "type_str": "table",
1178
- "num": null,
1179
- "html": null
1180
- },
1181
- "TABREF3": {
1182
- "content": "<table><tr><td/><td colspan=\"3\">Baseline Precision Precision Recall F-score</td></tr><tr><td>Name features only</td><td>70.4%</td><td>86.5% 92.9%</td><td>89.6</td></tr><tr><td>All Features</td><td/><td>91.8% 94.5%</td><td>93.1</td></tr><tr><td colspan=\"3\">Table 4: Precision and recall for name identification</td><td/></tr><tr><td/><td/><td colspan=\"2\">Precision Recall F-score</td></tr><tr><td colspan=\"2\">Predicting Names and Misspellings</td><td>86.6% 89.9%</td><td>88.2</td></tr></table>",
1183
- "text": "For this reason, the unknown word categorizer should be portable to new languages, even where extensive language resources do not exist. If",
1184
- "type_str": "table",
1185
- "num": null,
1186
- "html": null
1187
- }
1188
- }
1189
- }
1190
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1025.json DELETED
@@ -1,1768 +0,0 @@
1
- {
2
- "paper_id": "A00-1025",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:02.578390Z"
6
- },
7
- "title": "Examining the Role of Statistical and Linguistic Knowledge Sources in a General-Knowledge Question-Answering System",
8
- "authors": [
9
- {
10
- "first": "Claire",
11
- "middle": [],
12
- "last": "Cardie",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Cornell University",
17
- "location": {
18
- "postCode": "148531",
19
- "settlement": "Ithaca",
20
- "region": "NY",
21
- "country": "SaBIR Research"
22
- }
23
- },
24
- "email": "[email protected]"
25
- },
26
- {
27
- "first": "Vincent",
28
- "middle": [],
29
- "last": "Ng",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "",
33
- "institution": "Cornell University",
34
- "location": {
35
- "postCode": "148531",
36
- "settlement": "Ithaca",
37
- "region": "NY",
38
- "country": "SaBIR Research"
39
- }
40
- },
41
- "email": ""
42
- },
43
- {
44
- "first": "David",
45
- "middle": [],
46
- "last": "Pierce",
47
- "suffix": "",
48
- "affiliation": {
49
- "laboratory": "",
50
- "institution": "Cornell University",
51
- "location": {
52
- "postCode": "148531",
53
- "settlement": "Ithaca",
54
- "region": "NY",
55
- "country": "SaBIR Research"
56
- }
57
- },
58
- "email": "[email protected]"
59
- },
60
- {
61
- "first": "Chris",
62
- "middle": [],
63
- "last": "Buckley",
64
- "suffix": "",
65
- "affiliation": {
66
- "laboratory": "",
67
- "institution": "Cornell University",
68
- "location": {
69
- "postCode": "148531",
70
- "settlement": "Ithaca",
71
- "region": "NY",
72
- "country": "SaBIR Research"
73
- }
74
- },
75
- "email": "[email protected]"
76
- }
77
- ],
78
- "year": "",
79
- "venue": null,
80
- "identifiers": {},
81
- "abstract": "We describe and evaluate an implemented system for general-knowledge question answering. The system combines techniques for standard ad-hoc information retrieval (IR), query-dependent text summarization, and shallow syntactic and semantic sentence analysis. In a series of experiments we examine the role of each statistical and linguistic knowledge source in the question-answering system. In contrast to previous results, we find first that statistical knowledge of word co-occurrences as computed by IR vector space methods can be used to quickly and accurately locate the relevant documents for each question. The use of query-dependent text summarization techniques, however, provides only small increases in performance and severely limits recall levels when inaccurate. Nevertheless, it is the text summarization component that allows subsequent linguistic filters to focus on relevant passages. We find that even very weak linguistic knowledge can offer substantial improvements over purely IRbased techniques for question answering, especially when smoothly integrated with statistical preferences computed by the IR subsystems.",
82
- "pdf_parse": {
83
- "paper_id": "A00-1025",
84
- "_pdf_hash": "",
85
- "abstract": [
86
- {
87
- "text": "We describe and evaluate an implemented system for general-knowledge question answering. The system combines techniques for standard ad-hoc information retrieval (IR), query-dependent text summarization, and shallow syntactic and semantic sentence analysis. In a series of experiments we examine the role of each statistical and linguistic knowledge source in the question-answering system. In contrast to previous results, we find first that statistical knowledge of word co-occurrences as computed by IR vector space methods can be used to quickly and accurately locate the relevant documents for each question. The use of query-dependent text summarization techniques, however, provides only small increases in performance and severely limits recall levels when inaccurate. Nevertheless, it is the text summarization component that allows subsequent linguistic filters to focus on relevant passages. We find that even very weak linguistic knowledge can offer substantial improvements over purely IRbased techniques for question answering, especially when smoothly integrated with statistical preferences computed by the IR subsystems.",
88
- "cite_spans": [],
89
- "ref_spans": [],
90
- "eq_spans": [],
91
- "section": "Abstract",
92
- "sec_num": null
93
- }
94
- ],
95
- "body_text": [
96
- {
97
- "text": "In this paper, we describe and evaluate an implemented system for general-knowledge question answering. Open-ended question-answering systems that allow users to pose a question of any type, in any language, without domain restrictions, remain beyond the scope of today's text-processing systems. We investigate instead a restricted, but nevertheless useful variation of the problem (TREC-8, 2000) :",
98
- "cite_spans": [
99
- {
100
- "start": 383,
101
- "end": 397,
102
- "text": "(TREC-8, 2000)",
103
- "ref_id": null
104
- }
105
- ],
106
- "ref_spans": [],
107
- "eq_spans": [],
108
- "section": "Introduction",
109
- "sec_num": "1"
110
- },
111
- {
112
- "text": "Given a large text collection and a set of questions specified in English, find answers to the questions in the collection.",
113
- "cite_spans": [],
114
- "ref_spans": [],
115
- "eq_spans": [],
116
- "section": "Introduction",
117
- "sec_num": "1"
118
- },
119
- {
120
- "text": "In addition, the restricted task guarantees that:",
121
- "cite_spans": [],
122
- "ref_spans": [],
123
- "eq_spans": [],
124
- "section": "Introduction",
125
- "sec_num": "1"
126
- },
127
- {
128
- "text": "\u2022 the answer exists in the collection,",
129
- "cite_spans": [],
130
- "ref_spans": [],
131
- "eq_spans": [],
132
- "section": "Introduction",
133
- "sec_num": "1"
134
- },
135
- {
136
- "text": "\u2022 all supporting information for the answer lies in a single document, and",
137
- "cite_spans": [],
138
- "ref_spans": [],
139
- "eq_spans": [],
140
- "section": "Introduction",
141
- "sec_num": "1"
142
- },
143
- {
144
- "text": "\u2022 the answer is short m less than 50 bytes in length.",
145
- "cite_spans": [],
146
- "ref_spans": [],
147
- "eq_spans": [],
148
- "section": "Introduction",
149
- "sec_num": "1"
150
- },
151
- {
152
- "text": "Consider, for example, the question Which country has the largest part of the Amazon rain forest?, taken from the TREC8 Question Answering development corpus. The answer (in document LA032590-0089) is Brazil Previous research has addressed similar questionanswering (QA) scenarios using a variety of natural language processing (NLP) and information retrieval (IR) techniques. Lehnert (1978) tackles the difficult task of answering questions in the context of story understanding. Unlike our restricted QA task, questions to Lehnert's system often require answers that are not explicitly mentioned in the story. Her goal then is to answer questions by making inferences about actions and actors in the story using world knowledge in the form of scripts, plans, and goals (Schank and Abelson, 1977) . More recently, Burke et al. (1995; 1997) describe a system that answers natural language questions using a database of question-answer pairs built from existing frequentlyasked question (FAQ) files. Their FAQFinder system uses IR techniques to match the given question to questions in the database. It then uses the Word-Net lexical semantic knowledge base (Miller et al., 1990; Fellbaum, 1998) to improve the quality of the match. Kupiec (1993) investigates a closed-class QA task that is similar in many respects to the TREC8 QA task that we address here: the system answers general-knowledge questions using an encyclopedia. In addition, Kupiec assumes that all answers are noun phrases. Although our task does not explicitly include a \"noun phrase\" constraint, the answer length restriction effectively imposes the same bias toward noun phrase answers. Kupiec's MURAX system applies a combination of statistical (IR) and linguistic (NLP) techniques. A series of secondary boolean search queries with proximity constraints is combined with shallow parsing methods to find relevant sections of the encyclopedia, to extract answer hypotheses, and to confirm phrase relations specified in the question. In an evaluation on 70 \"Trivial Pursuit\" who and what questions, Kupiec concludes that robust natural language analysis can add to the quality of the information retrieval process. In addition, he claims that, for their closed-class QA task, vector space IR methods (Salton et al., 1975) appear inadequate.",
153
- "cite_spans": [
154
- {
155
- "start": 377,
156
- "end": 391,
157
- "text": "Lehnert (1978)",
158
- "ref_id": "BIBREF12"
159
- },
160
- {
161
- "start": 771,
162
- "end": 797,
163
- "text": "(Schank and Abelson, 1977)",
164
- "ref_id": "BIBREF23"
165
- },
166
- {
167
- "start": 815,
168
- "end": 834,
169
- "text": "Burke et al. (1995;",
170
- "ref_id": "BIBREF6"
171
- },
172
- {
173
- "start": 835,
174
- "end": 840,
175
- "text": "1997)",
176
- "ref_id": "BIBREF7"
177
- },
178
- {
179
- "start": 1157,
180
- "end": 1178,
181
- "text": "(Miller et al., 1990;",
182
- "ref_id": "BIBREF15"
183
- },
184
- {
185
- "start": 1179,
186
- "end": 1194,
187
- "text": "Fellbaum, 1998)",
188
- "ref_id": "BIBREF10"
189
- },
190
- {
191
- "start": 1232,
192
- "end": 1245,
193
- "text": "Kupiec (1993)",
194
- "ref_id": "BIBREF11"
195
- },
196
- {
197
- "start": 2269,
198
- "end": 2290,
199
- "text": "(Salton et al., 1975)",
200
- "ref_id": "BIBREF20"
201
- }
202
- ],
203
- "ref_spans": [],
204
- "eq_spans": [],
205
- "section": "Introduction",
206
- "sec_num": "1"
207
- },
208
- {
209
- "text": "We present here a new approach to the restricted question-answering task described above. Like MURAX, our system draws from both statistical and linguistic sources to find answers to generalknowledge questions. The underlying architecture of the system, however, is very different: it combines vector space IR techniques for document retrieval, a vector space approach to query-dependent text summarization, shallow corpus-based syntactic analysis, and knowledge-based semantic analysis. We evaluate the system on the TREC8 QA development corpus as well as the TREC8 QA test corpus. In particular, all parameters for the final QA system are determined using the development corpus. Our current results are encouraging but not outstanding: the system is able to correctly answer 22 out of 38 of the development questions and 91 out of 200 of the test questions given five guesses for each question. Furthermore, the first guess is correct for 16 out of the 22 development questions and 53 out of 91 of the test questions.",
210
- "cite_spans": [],
211
- "ref_spans": [],
212
- "eq_spans": [],
213
- "section": "Introduction",
214
- "sec_num": "1"
215
- },
216
- {
217
- "text": "More importantly, we investigate the relative role of each statistical and linguistic knowledge source in the proposed IR/NLP question-answering system. In contrast to previous results, we find that statistical knowledge of word co-occurrences as computed by vector space models of IR can be used to quickly and accurately locate relevant documents in the restricted QA task. When used in isolation, vector space methods for query-dependent text summarization, however, provide relatively small increases in performance. In addition, we find that the text summarization component can severely limit recall levels. Nevertheless, it is the summarization component that allows the linguistic filters to focus on relevant passages. In particular, we find that very weak linguistic knowledge can offer substantial improvements over purely IR-based techniques for question answering, especially when smoothly integrated with the statistical preferences computed by the IR subsystems.",
218
- "cite_spans": [],
219
- "ref_spans": [],
220
- "eq_spans": [],
221
- "section": "Introduction",
222
- "sec_num": "1"
223
- },
224
- {
225
- "text": "In the next section, we describe the general architecture of the question-answering system. Section 3 describes the baseline system and its information retrieval component. Sections 4-7 describe and evaluate a series of variations to the baseline system that incorporate, in turn, query-dependent text summarization, a syntactic filter, a semantic filter, and an algorithm that allows syntactic knowledge to influence the initial ordering of summary extracts. Section 8 compares our approach to some of those in the recent TREC8 QA evaluation (TREC-8, 2000) and describes directions for future work.",
226
- "cite_spans": [
227
- {
228
- "start": 543,
229
- "end": 557,
230
- "text": "(TREC-8, 2000)",
231
- "ref_id": null
232
- }
233
- ],
234
- "ref_spans": [],
235
- "eq_spans": [],
236
- "section": "Introduction",
237
- "sec_num": "1"
238
- },
239
- {
240
- "text": "The basic architecture of the question-answering system is depicted in Figure 1 . It contains two main components: the IR subsystems and the linguistic filters. As a preliminary, ofl]ine step, the IR subsystem first indexes the text collection from which answers are to be extracted. Given a question, the goal of the IR component is then to return a ranked list of those text chunks (e.g. documents, sentences, or paragraphs) from the indexed collection that are most relevant to the query and from which answer hypotheses can he extracted. Next, the QA system optionally applies one or more linguistic filters to the text chunks to extract an ordered list of answer hypotheses. The top hypotheses are concatenated to form five 50-byte guesses as allowed by the TREC8 guidelines. Note that many of these guesses may be difficult to read and judged as incorrect by the TREC8 assessors: we will also describe the results of generating single phrases as guesses wherever this is possible. In the sections below, we present and evaluate a series of instantiations of this general architecture, each of which makes different assumptions regarding the type of information that will best support the QA task. The next section begins by describing the baseline QA system.",
241
- "cite_spans": [],
242
- "ref_spans": [
243
- {
244
- "start": 71,
245
- "end": 79,
246
- "text": "Figure 1",
247
- "ref_id": "FIGREF0"
248
- }
249
- ],
250
- "eq_spans": [],
251
- "section": "System Architecture",
252
- "sec_num": "2"
253
- },
254
- {
255
- "text": "It is clear that a successful QA system will need some way to find the documents that are most relevant to the user's question. In a baseline system, we assume that standard IR techniques can be used for this task. In contrast to MURAX, however, we hypothesize that the vector space retrieval model will suffice. In the vector space model, both the question and the documents are represented as vectors with one entry for every unique word that appears in the collection. Each entry is the term weight, a real number that indicates the presence or absence of the word in the text. The similarity between a question vector, Q = ql,q2,... ,qn, and a document vector, D = dl, d2,..., tin, is traditionally computed using a cosine similarity measure:",
256
- "cite_spans": [],
257
- "ref_spans": [],
258
- "eq_spans": [],
259
- "section": "The Vector Space Model for Document Retrieval",
260
- "sec_num": "3"
261
- },
262
- {
263
- "text": "n 8im(Q,D) = Z d, .q, i..~ l",
264
- "cite_spans": [],
265
- "ref_spans": [],
266
- "eq_spans": [],
267
- "section": "The Vector Space Model for Document Retrieval",
268
- "sec_num": "3"
269
- },
270
- {
271
- "text": "Using this measure, the IR system returns a ranked list of those documents most similar to the question.",
272
- "cite_spans": [],
273
- "ref_spans": [],
274
- "eq_spans": [],
275
- "section": "The Vector Space Model for Document Retrieval",
276
- "sec_num": "3"
277
- },
278
- {
279
- "text": "The Baseline QA System: The Smart Vector Space Model. For the IR component of the baseline QA system, we use Smart (Salton, 1971 ), a sophisticated text-processing system based on the vector space model and employed as the retrieval engine for a number of the top-performing systems at recent Text REtrieval Conferences (e.g. Buckley et al., 1998a Buckley et al., , 1998b . Given a question, Smart returns a ranked list of the documents most relevant to the question. For the baseline QA system and all subsequent variations, we use Smart with standard term-weighting strategies I and do not use automatic relevance feedback (Buckley, 1995) . In addition, the baseline system applies no linguistic filters. To generate answers for a particular question, the system starts at the beginning of the top-ranked document returned by Smart for the question and constructs five 50-byte chunks consisting of document text with stopwords removed.",
280
- "cite_spans": [
281
- {
282
- "start": 115,
283
- "end": 128,
284
- "text": "(Salton, 1971",
285
- "ref_id": null
286
- },
287
- {
288
- "start": 326,
289
- "end": 347,
290
- "text": "Buckley et al., 1998a",
291
- "ref_id": "BIBREF1"
292
- },
293
- {
294
- "start": 348,
295
- "end": 371,
296
- "text": "Buckley et al., , 1998b",
297
- "ref_id": "BIBREF3"
298
- },
299
- {
300
- "start": 625,
301
- "end": 640,
302
- "text": "(Buckley, 1995)",
303
- "ref_id": "BIBREF5"
304
- }
305
- ],
306
- "ref_spans": [],
307
- "eq_spans": [],
308
- "section": "The Vector Space Model for Document Retrieval",
309
- "sec_num": "3"
310
- },
311
- {
312
- "text": "lWe use Lnu term weighting for documents and Itu term weighting for the question (Singhal et al., 1996) .",
313
- "cite_spans": [
314
- {
315
- "start": 81,
316
- "end": 103,
317
- "text": "(Singhal et al., 1996)",
318
- "ref_id": "BIBREF24"
319
- }
320
- ],
321
- "ref_spans": [],
322
- "eq_spans": [],
323
- "section": "The Vector Space Model for Document Retrieval",
324
- "sec_num": "3"
325
- },
326
- {
327
- "text": "Evaluation. As noted above, we evaluate each variation of our QA system on 38 TREC8 development questions and 200 TREC8 test questions. The indexed collection is TREC disks 4 and 5 (without Congressional Records). Results for the baseline Smart IR QA system are shown in the first row of Table 1 . The system gets 3 out of 38 development questions and 29 out of 200 test questions correct. We judge the system correct if any of the five guesses contains each word of one of the answers. The final column of results shows the mean answer rank across all questions correctly answered.",
328
- "cite_spans": [],
329
- "ref_spans": [
330
- {
331
- "start": 288,
332
- "end": 295,
333
- "text": "Table 1",
334
- "ref_id": null
335
- }
336
- ],
337
- "eq_spans": [],
338
- "section": "The Vector Space Model for Document Retrieval",
339
- "sec_num": "3"
340
- },
341
- {
342
- "text": "Smart is actually performing much better than its scores would suggest. For 18 of the 38 development questions, the answer appears in the top-ranked document; for 33 questions, the answer appears in one of the top seven documents. For only two questions does Smart fail to retrieve a good document in the top 25 documents. For the test corpus, over half of the 200 questions are answered in the top-ranked document (110); over 75% of the questions (155) are answered in top five documents. Only 19 questions were not answered in the top 20 documents.",
343
- "cite_spans": [],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "The Vector Space Model for Document Retrieval",
347
- "sec_num": "3"
348
- },
349
- {
350
- "text": "for Question",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "Summarization",
355
- "sec_num": null
356
- },
357
- {
358
- "text": "We next hypothesize that query-dependent text summarization algorithms will improve the performance of the QA system by focusing the system on the most relevant portions of the retrieved documents. The goal for query-dependent summarization algorithms is to provide a short summary of a document with respect to a specific query. Although a number of methods for query-dependent text summarization are beginning to be developed and evaluated in a variety of realistic settings (Mani et al., 1999) , we again propose the use of vector space methods from IR, which can be easily extended to the summarization task (Salton et al., 1994) :",
359
- "cite_spans": [
360
- {
361
- "start": 477,
362
- "end": 496,
363
- "text": "(Mani et al., 1999)",
364
- "ref_id": "BIBREF13"
365
- },
366
- {
367
- "start": 612,
368
- "end": 633,
369
- "text": "(Salton et al., 1994)",
370
- "ref_id": "BIBREF21"
371
- }
372
- ],
373
- "ref_spans": [],
374
- "eq_spans": [],
375
- "section": "Answering",
376
- "sec_num": null
377
- },
378
- {
379
- "text": "1. Given a question and a document, divide the document into chunks (e.g. sentences, paragraphs, 200-word passages).",
380
- "cite_spans": [],
381
- "ref_spans": [],
382
- "eq_spans": [],
383
- "section": "Answering",
384
- "sec_num": null
385
- },
386
- {
387
- "text": "2. Generate the vector representation for the question and for each document chunk.",
388
- "cite_spans": [],
389
- "ref_spans": [],
390
- "eq_spans": [],
391
- "section": "Answering",
392
- "sec_num": null
393
- },
394
- {
395
- "text": "3. Use the cosine similarity measure to determine the similarity of each chunk to the question.",
396
- "cite_spans": [],
397
- "ref_spans": [],
398
- "eq_spans": [],
399
- "section": "Answering",
400
- "sec_num": null
401
- },
402
- {
403
- "text": "4. Return as the query-dependent summary the most similar chunks up to a predetermined summary length (e.g. 10% or 20% of the original document).",
404
- "cite_spans": [],
405
- "ref_spans": [],
406
- "eq_spans": [],
407
- "section": "Answering",
408
- "sec_num": null
409
- },
410
- {
411
- "text": "This approach to text summarization was shown to be quite successful in the recent SUMMAC evaluation of text summarization systems (Mani et al., 1999; Buckley et al., 1999) . Our general assumption here is that Ii~ approaches can be used to quickly and accurately find both relevant documents and relevant document portions. In related work, Chali et al. (1999) also propose text summarization techniques as a primary component for their QA system. They employ a combination of vector-space methods and lexical chaining to derive their sentencebased summaries. We hypothesize that deeper analysis of the summary extracts is better accomplished by methods from NLP that can determine syntactic and semantic relationships between relevant constituents. There is a risk in using query-dependent summaries to focus the search for answer hypotheses, however: if the summarization algorithm is inaccurate, the desired answers will occur outside of the summaries and will not be accessible to subsequent components of the QA system.",
412
- "cite_spans": [
413
- {
414
- "start": 131,
415
- "end": 150,
416
- "text": "(Mani et al., 1999;",
417
- "ref_id": "BIBREF13"
418
- },
419
- {
420
- "start": 151,
421
- "end": 172,
422
- "text": "Buckley et al., 1999)",
423
- "ref_id": "BIBREF4"
424
- },
425
- {
426
- "start": 342,
427
- "end": 361,
428
- "text": "Chali et al. (1999)",
429
- "ref_id": "BIBREF9"
430
- }
431
- ],
432
- "ref_spans": [],
433
- "eq_spans": [],
434
- "section": "Answering",
435
- "sec_num": null
436
- },
437
- {
438
- "text": "The Query-Dependent Text Summarization QA System. In the next version of the QA system, we augment the baseline system to perform query-dependent text summarization for the top k retrieved documents. More specifically, the IR subsystem returns the summary extracts (sentences or paragraphs) for the top k documents after sorting them according to their cosine similarity scores w.r.t, the question. As before, no linguistic filters are applied, and answers are generated by constructing 50-byte chunks from the ordered extracts after removing stopwords. In the experiments below, k = 7 for the development questions and k = 6 for the test questions. 2 Evaluation. Results for the Text Summarization QA system using sentence-based summaries are shown in the second row of Table 1 . Here we see a relatively small improvement: the system now answers four development and 45 test questions correctly. The mean answer rank, however, improves noticeably from 3.33 to 2.25 for the development corpus and from 3.07 to 2.67 for the test corpus. Paragraph-based summaries yield similar but slightly smaller improvements; as a result, sentence summaries are used exclusively in subsequent sections. Unfortunately, the system's reliance on querydependent text summarization actually limits its potential: in only 23 of the 38 development questions (61%), for example, does the correct answer appear in the summary for one of the top k --7 documents. The QA system cannot hope to answer correctly any of the remaining 15 questions. For only 135 of the 200 questions in the test corpus (67.5%) does the correct answer appear in the summary for one of 2The value for k was chosen so that at least 80% of the questions in the set had answers appearing in the retrieved documents ranked 1-k. We have not experimented extensively with many values of k and expect that better performance can be obtained by tuning k for each text collection.",
439
- "cite_spans": [],
440
- "ref_spans": [
441
- {
442
- "start": 771,
443
- "end": 778,
444
- "text": "Table 1",
445
- "ref_id": null
446
- }
447
- ],
448
- "eq_spans": [],
449
- "section": "Answering",
450
- "sec_num": null
451
- },
452
- {
453
- "text": "the top k --6 documents. 3 It is possible that automatic relevance feedback or coreference resolution would improve performance. We are investigating these options in current work.",
454
- "cite_spans": [],
455
- "ref_spans": [],
456
- "eq_spans": [],
457
- "section": "Answering",
458
- "sec_num": null
459
- },
460
- {
461
- "text": "The decision of whether or not to incorporate text summarization in the QA system depends, in part, on the ability of subsequent processing components (i.e. the linguistic filters) to locate answer hypotheses. If subsequent components are very good at discarding implausible answers, then summarization methods may limit system performance. Therefore, we investigate next the use of two linguistic filters in conjunction with the query-dependent text summarization methods evaluated here.",
462
- "cite_spans": [],
463
- "ref_spans": [],
464
- "eq_spans": [],
465
- "section": "Answering",
466
- "sec_num": null
467
- },
468
- {
469
- "text": "Incorporating the Noun Phrase Filter",
470
- "cite_spans": [],
471
- "ref_spans": [],
472
- "eq_spans": [],
473
- "section": "5",
474
- "sec_num": null
475
- },
476
- {
477
- "text": "The restricted QA task that we investigate requires answers to be short --no more than 50 bytes in length. This effectively eliminates how or why questions from consideration. Almost all of the remaining question types are likely to have noun phrases as answers. In the TREC8 development corpus, for example, 36 of 38 questions have noun phrase answers.",
478
- "cite_spans": [],
479
- "ref_spans": [],
480
- "eq_spans": [],
481
- "section": "5",
482
- "sec_num": null
483
- },
484
- {
485
- "text": "As a result, we next investigate the use of a very simple linguistic filter that considers only noun phrases as answer hypotheses. The filter operates on the ordered list of summary extracts for a particular question and produces a list of answer hypotheses, one for each noun phrase (NP) in the extracts in the left-to-right order in which they appeared.",
486
- "cite_spans": [],
487
- "ref_spans": [],
488
- "eq_spans": [],
489
- "section": "5",
490
- "sec_num": null
491
- },
492
- {
493
- "text": "The NP-based QA System. Our implementation of the NP-based QA system uses the Empire noun phrase finder, which is described in detail in Cardie and Pierce (1998) . Empire identifies base NPs --non-recursive noun phrases --using a very simple algorithm that matches part-of-speech tag sequences based on a learned noun phrase grammar. The approach is able to achieve 94% precision and recall for base NPs derived from the Penn Treebank Wall Street Journal (Marcus et al., 1993) . In the experiments below, the NP filter follows the application of the document retrieval and text summarization components. Pronoun answer hypotheses are discarded, and the NPs are assembled into 50-byte chunks.",
494
- "cite_spans": [
495
- {
496
- "start": 137,
497
- "end": 161,
498
- "text": "Cardie and Pierce (1998)",
499
- "ref_id": "BIBREF8"
500
- },
501
- {
502
- "start": 455,
503
- "end": 476,
504
- "text": "(Marcus et al., 1993)",
505
- "ref_id": "BIBREF14"
506
- }
507
- ],
508
- "ref_spans": [],
509
- "eq_spans": [],
510
- "section": "5",
511
- "sec_num": null
512
- },
513
- {
514
- "text": "Evaluation. Results for the NP-based QA system are shown in the third row of Table 1 . The noun phrase filter markedly improves system performance for the development corpus, nearly dou-3Paragraph-based summaries provide better coverage on the test corpus than sentence-based summaries: for 151 questions, the correct answer appears in the summary for one of the top k documents. This suggests that paragraph summaries might be better suited for use with more sophisticated linguistic filters that are capable of discerning the answer in the larger summary. bling the number of questions answered correctly. We found these results somewhat surprising since this linguistic filter is rather weak: we expected it to work well only in combination with the semantic filter described below. The noun phrase filter has much less of an effect on the test corpus, improving performance on questions answered from 45 to 50. In a separate experiment, we applied the NP filter to the baseline system that includes no text summa\u00b0 rization component. Here the NP filter does not improve performance --the system gets only two questions correct. This indicates that the NP filter depends critically on the text summarization component. As a result, we will continue to use querydependent text summarization in the experiments below.",
515
- "cite_spans": [],
516
- "ref_spans": [
517
- {
518
- "start": 77,
519
- "end": 84,
520
- "text": "Table 1",
521
- "ref_id": null
522
- }
523
- ],
524
- "eq_spans": [],
525
- "section": "5",
526
- "sec_num": null
527
- },
528
- {
529
- "text": "The NP filter provides the first opportunity to look at single-phrase answers. The preceding QA systems produced answers that were rather unnaturally chunked into 50-byte strings. When such chunking is disabled, only one development and 20 test questions are answered. The difference in performance between the NP filter with chunking and the NP filter alone clearly indicates that the NP filter is extracting good guesses, but that subsequent linguistic processing is needed to promote the best guesses to the top of the ranked guess list.",
530
- "cite_spans": [],
531
- "ref_spans": [],
532
- "eq_spans": [],
533
- "section": "5",
534
- "sec_num": null
535
- },
536
- {
537
- "text": "The NP filter does not explicitly consider the question in its search for noun phrase answers. It is clear, however, that a QA system must pay greater attention to the syntactic and semantic constraints specified in the question. For example, a question like Who was president of the US in 19957 indicates that the answer is likely to be a person. In addition, there should be supporting evidence from the answer document that the person was president, and, more specifically, held this office in the US and in 1995.",
538
- "cite_spans": [
539
- {
540
- "start": 284,
541
- "end": 295,
542
- "text": "US in 19957",
543
- "ref_id": null
544
- }
545
- ],
546
- "ref_spans": [],
547
- "eq_spans": [],
548
- "section": "Incorporating Semantic Type Information",
549
- "sec_num": "6"
550
- },
551
- {
552
- "text": "We introduce here a second linguistic filter that considers the primary semantic constraint from the question. The filter begins by determining the ques-tion type, i.e. the semantic type requested in the question. It then takes the ordered set of summary extracts supplied by the IR subsytem, uses the syntactic filter from Section 5 to extract NPs, and generates an answer hypothesis for every noun phrase that is semantically compatible with the question type. Our implementation of this semantic class filter is described below. The filter currently makes no attempt to confirm other linguistic relations mentioned in the question.",
553
- "cite_spans": [],
554
- "ref_spans": [],
555
- "eq_spans": [],
556
- "section": "Incorporating Semantic Type Information",
557
- "sec_num": "6"
558
- },
559
- {
560
- "text": "The Semantic Type Checking QA System. For most questions, the question word itself determines the semantic type of the answer. This is true for who, where, and when questions, for example, which request a person, place, and time expression as an answer. For many which and what questions, however, determining the question type requires additional syntactic analysis. For these, we currently extract the head noun in the question as the question type. For example, in Which country has the largest part o$ the Amazon rain :forest? we identify country as the question type. Our heuristics for determining question type were based on the development corpus and were designed to be general, but have not yet been directly evaluated on a separate question corpus.",
561
- "cite_spans": [],
562
- "ref_spans": [],
563
- "eq_spans": [],
564
- "section": "Incorporating Semantic Type Information",
565
- "sec_num": "6"
566
- },
567
- {
568
- "text": "\u2022 Given the question type and an answer hypothesis, the Semantic Type Checking QA System then uses WordNet to check that an appropriate ancestordescendent relationship holds. Given Brazil as an answer hypothesis for the above question, for example, Wordnet's type hierarchy confirms that Brazil is a subtype of country, allowing the system to conclude that the semantic type of the answer hypothesis matches the question type.",
569
- "cite_spans": [],
570
- "ref_spans": [],
571
- "eq_spans": [],
572
- "section": "Incorporating Semantic Type Information",
573
- "sec_num": "6"
574
- },
575
- {
576
- "text": "For words (mostly proper nouns) that do not appear in WordNet, heuristics are used to determine semantic type. There are heuristics to recognize 13 basic question types: Person, Location, Date, Month, Year, Time, Age, Weight, Area, Volume, Length, Amount, and Number. For Person questions, for example, the system relies primarily on a rule that checks for capitalization and abbreviations in order to identify phrases that correspond to people. There are approximately 20 such rules that together cover all 13 question types listed above. The rules effectively operate as a very simple named entity identifier. Evaluation. Results for the Semantic Type Checking variation of the QA system are shown in the fourth row of Table 1 . Here we see a dramatic increase in performance: the system answers three times as many development questions (21) correctly over the previous variation. This is especially encouraging given that the IR and text summarization components limit the maximum number correct to 23. In addition, the mean answer rank improves from 2.29 to 1.38. A closer look at Table 1 , however, indicates problems with the semantic type checking linguistic filter. While performance on the development corpus increases by 37 percentage points (from 18.4% correct to 55.3% correct), relative gains for the test corpus are much smaller. There is only an improvement of 18 percentage points, from 25.0% correct (50/200) to 43.0% correct (86/200). This is a clear indication that the heuristics used in the semantic type checking component, which were designed based on the development corpus, do not generalize well to different question sets. Replacing the current heuristics with a Named Entity identification component or learning the heuristics using standard inductive learning techniques should help with the scalability of this linguistic filter.",
577
- "cite_spans": [],
578
- "ref_spans": [
579
- {
580
- "start": 721,
581
- "end": 728,
582
- "text": "Table 1",
583
- "ref_id": null
584
- },
585
- {
586
- "start": 1086,
587
- "end": 1093,
588
- "text": "Table 1",
589
- "ref_id": null
590
- }
591
- ],
592
- "eq_spans": [],
593
- "section": "Incorporating Semantic Type Information",
594
- "sec_num": "6"
595
- },
596
- {
597
- "text": "Nevertheless, it is somewhat surprising that very weak syntactic information (the NP filter) and weak semantic class information (question type checking) can produce such improvements. In particular, it appears that it is reasonable to rely implicitly on the IR subsystems to enforce the other linguistic relationships specified in the query (e.g. that Clinton is president, that this office was held in the US and in 1995).",
598
- "cite_spans": [],
599
- "ref_spans": [],
600
- "eq_spans": [],
601
- "section": "Incorporating Semantic Type Information",
602
- "sec_num": "6"
603
- },
604
- {
605
- "text": "Finally, when 50-byte chunking is disabled for the semantic type checking QA variation, there is a decrease in the number of questions correctly answered, to 19 and 57 for the development and test corpus, respectively.",
606
- "cite_spans": [],
607
- "ref_spans": [],
608
- "eq_spans": [],
609
- "section": "Incorporating Semantic Type Information",
610
- "sec_num": "6"
611
- },
612
- {
613
- "text": "Syntactic and semantic linguistic knowledge has been used thus far as post-processing filters that locate and confirm answer hypotheses from the statistically specified summary extracts. We hypothesized that further improvements might be made by allowing this linguistic knowledge to influence the initial ordering of text chunks for the linguistic filters. In a final system, we begin to investigate this claim. Our general approach is to define a new scoring measure that operates on the summary extracts and can be used to reorder the extracts based on linguistic knowledge. The QA System with Linguistic Reordering of Summary Extracts. As described above, our final version of the QA system ranks summary extracts according to both their vector space similarity to the question as well as linguistic evidence that the answer lies within the extract. In particular, each summary extract E for question q is ranked according to a new score, Sq:",
614
- "cite_spans": [],
615
- "ref_spans": [],
616
- "eq_spans": [],
617
- "section": "Extracts",
618
- "sec_num": null
619
- },
620
- {
621
- "text": "The intuition behind the new score is to prefer summary extracts that exhibit the same linguistic relationships as the question (as indicated by LRq) and to give more weight (as indicated by w) to linguistic relationship matches in extracts from higher-ranked documents. More specifically, LRq(E ) is the number of linguistic relationships from the question that appear in E. In the experiments below, LRq(E) is just the number of base NPs from the question that appear in the summary extract. In future work, we plan to include other pairwise linguistic relationships (e.g. subject-verb relationships, verbobject relationships, pp-attachment relationships).",
622
- "cite_spans": [
623
- {
624
- "start": 402,
625
- "end": 408,
626
- "text": "LRq(E)",
627
- "ref_id": null
628
- }
629
- ],
630
- "ref_spans": [],
631
- "eq_spans": [],
632
- "section": "sq(E) = w(E) . LRq(E)",
633
- "sec_num": null
634
- },
635
- {
636
- "text": "The weight w(E) is a number between 0 and 1 that is based on the retrieval rank r of the document that contains E: w(E) = max (m, 1 -p. r) In our experiments, m = 0.5 and p = 0.1. Both values were selected manually based on the development corpus; an extensive search for the best such values was not done.",
637
- "cite_spans": [
638
- {
639
- "start": 126,
640
- "end": 138,
641
- "text": "(m, 1 -p. r)",
642
- "ref_id": null
643
- }
644
- ],
645
- "ref_spans": [],
646
- "eq_spans": [],
647
- "section": "sq(E) = w(E) . LRq(E)",
648
- "sec_num": null
649
- },
650
- {
651
- "text": "The summary extracts are sorted according to the new scoring measure and the ranked list of sentences is provided to the linguistic filters as before.",
652
- "cite_spans": [],
653
- "ref_spans": [],
654
- "eq_spans": [],
655
- "section": "sq(E) = w(E) . LRq(E)",
656
- "sec_num": null
657
- },
658
- {
659
- "text": "Evaluation. Results for this final variation of the QA system are shown in the bottom row of Table 1. Here we see a fairly minor increase in performance over the use of linguistic filters alone: the system answers only one more question correctly than the previous variation for the development corpus and answers five additional questions for the test corpus. The mean answer rank improves only negligibly. Sixteen of the 22 correct answers (73%) appear as the top-ranked guess for the development corpus; only 53 out of 91 correct answers (58%) appear as the top-ranked guess for the test corpus. Unfortunately, when 50-byte chunking is disabled, system performance drops precipitously, by 5% (to 20 out of 38) for the development corpus and by 13% (to 65 out of 200) for the test corpus. As noted above, this indicates that the filters are finding the answers, but more sophisticated linguistic sorting is needed to promote the best answers to the top. Through its LRq term, the new scoring measure does provide a mechanism for allowing other linguistic relationships to influence the initial ordering of summary extracts. The current results, however, indicate that with only very weak syntactic information (i.e. base noun phrases), the new scoring measure is only marginally successful in reordering the summary extracts based on syntactic information.",
660
- "cite_spans": [],
661
- "ref_spans": [
662
- {
663
- "start": 93,
664
- "end": 101,
665
- "text": "Table 1.",
666
- "ref_id": null
667
- }
668
- ],
669
- "eq_spans": [],
670
- "section": "sq(E) = w(E) . LRq(E)",
671
- "sec_num": null
672
- },
673
- {
674
- "text": "As noted above, the final system (with the liberal 50-byte answer chunker) correctly answers 22 out of 38 questions for the development corpus. Of the 16 errors, the text retrieval component is responsible for five (31.2%), the text summarization component for ten (62.5%), and the linguistic filters for one (6.3%). In this analysis we consider the linguistic filters responsible for an error if they were unable to promote an available answer hypothesis to one of the top five guesses. A slightly different situation arises for the test corpus: of the 109 errors, the text retrieval component is responsible for 39 (35.8%), the text summarization component for 26 (23.9%), and the linguistic filters for 44 (40.4%). As discussed in Section 6, the heuristics that comprise the semantic type checking filter do not scale to the test corpus and are the primary reason for the larger percentage of errors attributed to the linguistic filters for that corpus.",
675
- "cite_spans": [],
676
- "ref_spans": [],
677
- "eq_spans": [],
678
- "section": "sq(E) = w(E) . LRq(E)",
679
- "sec_num": null
680
- },
681
- {
682
- "text": "We have described and evaluated a series of question-answering systems, each of which incorporates a different combination of statistical and linguistic knowledge sources. We find that even very weak linguistic knowledge can offer substantial improvements over purely IR-based techniques especially when smoothly integrated with the text passage preferences computed by the IR subsystems. Although our primary goal was to investigate the use of statistical and linguistic knowledge sources, it is possible to compare our approach and our results to those for systems in the recent TREC8 QA evaluation. Scores on the TREC8 test corpus for systems participating in the QA evaluation ranged between 3 and 146 correct. Discarding the top three scores and the worst three scores, the remaining eight systems achieved between 52 and 91 correct. Using the liberal answer chunker, our final QA system equals the best of these systems (91 correct); without it, our score of 65 correct places our QA system near the middle of this group of eight. Like the work described here, virtually all of the top-ranked TREC8 systems use a combination of IR and shallow NLP for their QA systems. IBM's AnSel system (Prager et al., 2000) , for example, employs finite-state patterns as its primary shallow NLP component. These are used to recognize a fairly broad set of about 20 named entities. The IR component indexes only text passages associated with these entities. The AT&T QA system (Singhal et al., 2000) , the Qanda system (Breck et al., 2000) , and the SyncMatcher system (Oard et al., 2000) all employ vector-space methods from IR, named entity identifiers, and a fairly simple question type determiner. In addition, SyncMatcher uses a broad-coverage dependency parser to enforce phrase relationship constraints. Instead of the vector space model, the LASSO system (Moldovan et al., 2000) uses boolean search operators for paragraph retrieval. Recognition of answer hypotheses in their system relies on identifying named entities. Finally, the Cymphony QA system (Srihari and Li, 2000) relies heavily on named entity identification; it also employs standard IR techniques and a shallow parser.",
683
- "cite_spans": [
684
- {
685
- "start": 1194,
686
- "end": 1215,
687
- "text": "(Prager et al., 2000)",
688
- "ref_id": "BIBREF19"
689
- },
690
- {
691
- "start": 1469,
692
- "end": 1491,
693
- "text": "(Singhal et al., 2000)",
694
- "ref_id": "BIBREF25"
695
- },
696
- {
697
- "start": 1511,
698
- "end": 1531,
699
- "text": "(Breck et al., 2000)",
700
- "ref_id": "BIBREF0"
701
- },
702
- {
703
- "start": 1561,
704
- "end": 1580,
705
- "text": "(Oard et al., 2000)",
706
- "ref_id": "BIBREF18"
707
- },
708
- {
709
- "start": 1855,
710
- "end": 1878,
711
- "text": "(Moldovan et al., 2000)",
712
- "ref_id": "BIBREF16"
713
- },
714
- {
715
- "start": 2053,
716
- "end": 2075,
717
- "text": "(Srihari and Li, 2000)",
718
- "ref_id": "BIBREF26"
719
- }
720
- ],
721
- "ref_spans": [],
722
- "eq_spans": [],
723
- "section": "Related Work and Conclusions",
724
- "sec_num": "8"
725
- },
726
- {
727
- "text": "In terms of statistical and linguistic knowledge sources employed, the primary difference between these systems and ours is our lack of an adequate named entity tagger. Incorporation of such a tagger will be a focus of future work. In addition, we believe that the retrieval and summarization components can be improved by incorporating automatic relevance feedback (Buckley, 1995) and coreference resolution. Morton (1999) , for example, shows that coreference resolution improves passage retrieval for their question-answering system. We also plan to reconsider paragraph-based summaries given their coverage on the test corpus. The most critical area for improvement, however, is the linguistic filters. The semantic type filter will be greatly improved by the addition of a named entity tagger, but we believe that additional gains can be attained by augmenting named entity identification with information from WordNet. Finally, we currently make no attempt to confirm any phrase relations from the query. Without this, system performance will remain severely limited.",
728
- "cite_spans": [
729
- {
730
- "start": 366,
731
- "end": 381,
732
- "text": "(Buckley, 1995)",
733
- "ref_id": "BIBREF5"
734
- },
735
- {
736
- "start": 410,
737
- "end": 423,
738
- "text": "Morton (1999)",
739
- "ref_id": "BIBREF17"
740
- }
741
- ],
742
- "ref_spans": [],
743
- "eq_spans": [],
744
- "section": "Related Work and Conclusions",
745
- "sec_num": "8"
746
- }
747
- ],
748
- "back_matter": [
749
- {
750
- "text": "This work was supported in part by NSF Grants IRI-9624639 and GER-9454149.",
751
- "cite_spans": [],
752
- "ref_spans": [],
753
- "eq_spans": [],
754
- "section": "Acknowledgments",
755
- "sec_num": null
756
- }
757
- ],
758
- "bib_entries": {
759
- "BIBREF0": {
760
- "ref_id": "b0",
761
- "title": "A Sys Called Qanda",
762
- "authors": [
763
- {
764
- "first": "E",
765
- "middle": [],
766
- "last": "Breck",
767
- "suffix": ""
768
- },
769
- {
770
- "first": "J",
771
- "middle": [],
772
- "last": "Burger",
773
- "suffix": ""
774
- },
775
- {
776
- "first": "L",
777
- "middle": [],
778
- "last": "Ferro",
779
- "suffix": ""
780
- },
781
- {
782
- "first": "D",
783
- "middle": [],
784
- "last": "House",
785
- "suffix": ""
786
- },
787
- {
788
- "first": "M",
789
- "middle": [],
790
- "last": "Light",
791
- "suffix": ""
792
- },
793
- {
794
- "first": "I",
795
- "middle": [],
796
- "last": "Mani",
797
- "suffix": ""
798
- }
799
- ],
800
- "year": 2000,
801
- "venue": "Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
802
- "volume": "",
803
- "issue": "",
804
- "pages": "",
805
- "other_ids": {},
806
- "num": null,
807
- "urls": [],
808
- "raw_text": "E. Breck, J. Burger, L. Ferro, D. House, M. Light, and I. Mani. 2000. A Sys Called Qanda. In E. Voorhees, editor, Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Spe- cial Publication. In press.",
809
- "links": null
810
- },
811
- "BIBREF1": {
812
- "ref_id": "b1",
813
- "title": "SMART high precision: TREC 7",
814
- "authors": [
815
- {
816
- "first": "C",
817
- "middle": [],
818
- "last": "Buckley",
819
- "suffix": ""
820
- },
821
- {
822
- "first": "M",
823
- "middle": [],
824
- "last": "Mitra",
825
- "suffix": ""
826
- },
827
- {
828
- "first": "J",
829
- "middle": [],
830
- "last": "Walz",
831
- "suffix": ""
832
- },
833
- {
834
- "first": "C",
835
- "middle": [],
836
- "last": "Cardie",
837
- "suffix": ""
838
- }
839
- ],
840
- "year": 1998,
841
- "venue": "",
842
- "volume": "",
843
- "issue": "",
844
- "pages": "",
845
- "other_ids": {},
846
- "num": null,
847
- "urls": [],
848
- "raw_text": "C. Buckley, M. Mitra, J. Walz, and C. Cardie. 1998a. SMART high precision: TREC 7. In",
849
- "links": null
850
- },
851
- "BIBREF2": {
852
- "ref_id": "b2",
853
- "title": "Proceedings of the Seventh Text REtrieval Conference TREC 7",
854
- "authors": [],
855
- "year": null,
856
- "venue": "",
857
- "volume": "",
858
- "issue": "",
859
- "pages": "500--242",
860
- "other_ids": {},
861
- "num": null,
862
- "urls": [],
863
- "raw_text": "E. Voorhees, editor, Proceedings of the Seventh Text REtrieval Conference TREC 7, pages 285- 298. NIST Special Publication 500-242.",
864
- "links": null
865
- },
866
- "BIBREF3": {
867
- "ref_id": "b3",
868
- "title": "Using clustering and superconcepts within SMART : TREC 6",
869
- "authors": [
870
- {
871
- "first": "C",
872
- "middle": [],
873
- "last": "Buckley",
874
- "suffix": ""
875
- },
876
- {
877
- "first": "M",
878
- "middle": [],
879
- "last": "Mitra",
880
- "suffix": ""
881
- },
882
- {
883
- "first": "J",
884
- "middle": [],
885
- "last": "Walz",
886
- "suffix": ""
887
- },
888
- {
889
- "first": "C",
890
- "middle": [],
891
- "last": "Cardie",
892
- "suffix": ""
893
- }
894
- ],
895
- "year": 1998,
896
- "venue": "Proceedings of the Sixth Text REtrieval Conference TREC 6",
897
- "volume": "",
898
- "issue": "",
899
- "pages": "500--240",
900
- "other_ids": {},
901
- "num": null,
902
- "urls": [],
903
- "raw_text": "C. Buckley, M. Mitra, J. Walz, and C. Cardie. 1998b. Using clustering and superconcepts within SMART : TREC 6. In E. Voorhees, editor, Pro- ceedings of the Sixth Text REtrieval Conference TREC 6, pages 107-124. NIST Special Publica- tion 500-240.",
904
- "links": null
905
- },
906
- "BIBREF4": {
907
- "ref_id": "b4",
908
- "title": "The Smart/Empire TIPSTER IR System",
909
- "authors": [
910
- {
911
- "first": "C",
912
- "middle": [],
913
- "last": "Buckley",
914
- "suffix": ""
915
- },
916
- {
917
- "first": "C",
918
- "middle": [],
919
- "last": "Cardie",
920
- "suffix": ""
921
- },
922
- {
923
- "first": "S",
924
- "middle": [],
925
- "last": "Mardis",
926
- "suffix": ""
927
- },
928
- {
929
- "first": "M",
930
- "middle": [],
931
- "last": "Mitra",
932
- "suffix": ""
933
- },
934
- {
935
- "first": "D",
936
- "middle": [],
937
- "last": "Pierce",
938
- "suffix": ""
939
- },
940
- {
941
- "first": "K",
942
- "middle": [],
943
- "last": "Wagstaff",
944
- "suffix": ""
945
- },
946
- {
947
- "first": "J",
948
- "middle": [],
949
- "last": "Walz",
950
- "suffix": ""
951
- }
952
- ],
953
- "year": 1999,
954
- "venue": "Proceedings, TIPSTER Text Program (Phase III). Morgan Kauhnann",
955
- "volume": "",
956
- "issue": "",
957
- "pages": "",
958
- "other_ids": {},
959
- "num": null,
960
- "urls": [],
961
- "raw_text": "C. Buckley, C. Cardie, S. Mardis, M. Mitra, D. Pierce, K. Wagstaff, and J. Walz. 1999. The Smart/Empire TIPSTER IR System. In Proceed- ings, TIPSTER Text Program (Phase III). Mor- gan Kauhnann. To appear.",
962
- "links": null
963
- },
964
- "BIBREF5": {
965
- "ref_id": "b5",
966
- "title": "Massive Query Expansion /or Relevance Feedback",
967
- "authors": [
968
- {
969
- "first": "Chris",
970
- "middle": [],
971
- "last": "Buckley",
972
- "suffix": ""
973
- }
974
- ],
975
- "year": 1995,
976
- "venue": "",
977
- "volume": "",
978
- "issue": "",
979
- "pages": "",
980
- "other_ids": {},
981
- "num": null,
982
- "urls": [],
983
- "raw_text": "Chris Buckley. 1995. Massive Query Expansion /or Relevance Feedback. Cornell University, Ph.D. Thesis, Ithaca, New York.",
984
- "links": null
985
- },
986
- "BIBREF6": {
987
- "ref_id": "b6",
988
- "title": "Knowledge-Based Information Retrieval from Semi-Structured Text",
989
- "authors": [
990
- {
991
- "first": "R",
992
- "middle": [],
993
- "last": "Burke",
994
- "suffix": ""
995
- },
996
- {
997
- "first": "K",
998
- "middle": [],
999
- "last": "Hammond",
1000
- "suffix": ""
1001
- },
1002
- {
1003
- "first": "J",
1004
- "middle": [],
1005
- "last": "Kozlovsky",
1006
- "suffix": ""
1007
- }
1008
- ],
1009
- "year": 1995,
1010
- "venue": "Working Notes of the AAAI Fall Symposium on AI Applications in Knowledge Navigation and Retrieval",
1011
- "volume": "",
1012
- "issue": "",
1013
- "pages": "19--24",
1014
- "other_ids": {},
1015
- "num": null,
1016
- "urls": [],
1017
- "raw_text": "R. Burke, K. Hammond, and J. Kozlovsky. 1995. Knowledge-Based Information Retrieval from Semi-Structured Text. In Working Notes of the AAAI Fall Symposium on AI Applications in Knowledge Navigation and Retrieval, pages 19-24. AAAI Press.",
1018
- "links": null
1019
- },
1020
- "BIBREF7": {
1021
- "ref_id": "b7",
1022
- "title": "question answering from Frequently-Asked Question Files",
1023
- "authors": [
1024
- {
1025
- "first": "K",
1026
- "middle": [],
1027
- "last": "Burke",
1028
- "suffix": ""
1029
- },
1030
- {
1031
- "first": "V",
1032
- "middle": [],
1033
- "last": "Hammond",
1034
- "suffix": ""
1035
- },
1036
- {
1037
- "first": "S",
1038
- "middle": [],
1039
- "last": "Kulyukin",
1040
- "suffix": ""
1041
- },
1042
- {
1043
- "first": "N",
1044
- "middle": [],
1045
- "last": "Lytihen",
1046
- "suffix": ""
1047
- },
1048
- {
1049
- "first": "S",
1050
- "middle": [],
1051
- "last": "Tomuro",
1052
- "suffix": ""
1053
- },
1054
- {
1055
- "first": "",
1056
- "middle": [],
1057
- "last": "Schoenberg",
1058
- "suffix": ""
1059
- }
1060
- ],
1061
- "year": 1997,
1062
- "venue": "",
1063
- "volume": "",
1064
- "issue": "",
1065
- "pages": "",
1066
- "other_ids": {},
1067
- "num": null,
1068
- "urls": [],
1069
- "raw_text": "Burke, K. Hammond, V. Kulyukin, S. Lyti- hen, N. Tomuro, and S. Schoenberg. 1997. ques- tion answering from Frequently-Asked Question Files. Technical Report TR-97-05, University of Chicago.",
1070
- "links": null
1071
- },
1072
- "BIBREF8": {
1073
- "ref_id": "b8",
1074
- "title": "Error-Driven Pruning of Treebank Grammars for Base Noun Phrase Identification",
1075
- "authors": [
1076
- {
1077
- "first": "C",
1078
- "middle": [],
1079
- "last": "Cardie",
1080
- "suffix": ""
1081
- },
1082
- {
1083
- "first": "D",
1084
- "middle": [],
1085
- "last": "Pierce",
1086
- "suffix": ""
1087
- }
1088
- ],
1089
- "year": 1998,
1090
- "venue": "Proceedings of the 36th Annual Meeting of the Association .for Computational Linguistics and COLING-98",
1091
- "volume": "",
1092
- "issue": "",
1093
- "pages": "218--224",
1094
- "other_ids": {},
1095
- "num": null,
1096
- "urls": [],
1097
- "raw_text": "C. Cardie and D. Pierce. 1998. Error-Driven Prun- ing of Treebank Grammars for Base Noun Phrase Identification. In Proceedings of the 36th An- nual Meeting of the Association .for Computa- tional Linguistics and COLING-98, pages 218- 224, University of Montreal, Montreal, Canada. Association for Computational Linguistics.",
1098
- "links": null
1099
- },
1100
- "BIBREF9": {
1101
- "ref_id": "b9",
1102
- "title": "Query-Biased Text Summarization as a Question-Answering Technique",
1103
- "authors": [
1104
- {
1105
- "first": "Y",
1106
- "middle": [],
1107
- "last": "Chali",
1108
- "suffix": ""
1109
- },
1110
- {
1111
- "first": "S",
1112
- "middle": [],
1113
- "last": "Matwin",
1114
- "suffix": ""
1115
- },
1116
- {
1117
- "first": "S",
1118
- "middle": [],
1119
- "last": "Szpakowicz",
1120
- "suffix": ""
1121
- }
1122
- ],
1123
- "year": 1999,
1124
- "venue": "Proceedings o.f the AAAI Fall Symposium on Question Answering Systems",
1125
- "volume": "",
1126
- "issue": "",
1127
- "pages": "52--56",
1128
- "other_ids": {},
1129
- "num": null,
1130
- "urls": [],
1131
- "raw_text": "Y. Chali, S. Matwin, and S. Szpakowicz. 1999. Query-Biased Text Summarization as a Question- Answering Technique. In Proceedings o.f the AAAI Fall Symposium on Question Answering Systems, pages 52-56. AAAI Press. AAAI TR FS-99-02.",
1132
- "links": null
1133
- },
1134
- "BIBREF10": {
1135
- "ref_id": "b10",
1136
- "title": "WordNet: An Electronical Lexiced Database",
1137
- "authors": [
1138
- {
1139
- "first": "C",
1140
- "middle": [],
1141
- "last": "Fellbaum",
1142
- "suffix": ""
1143
- }
1144
- ],
1145
- "year": 1998,
1146
- "venue": "",
1147
- "volume": "",
1148
- "issue": "",
1149
- "pages": "",
1150
- "other_ids": {},
1151
- "num": null,
1152
- "urls": [],
1153
- "raw_text": "C. Fellbaum. 1998. WordNet: An Electronical Lex- iced Database. MIT Press, Cambridge, MA.",
1154
- "links": null
1155
- },
1156
- "BIBREF11": {
1157
- "ref_id": "b11",
1158
- "title": "MURAX: A Robust Linguistic approach For Question Answering Using An On-Line Encyclopedia",
1159
- "authors": [
1160
- {
1161
- "first": "J",
1162
- "middle": [],
1163
- "last": "Kupiec",
1164
- "suffix": ""
1165
- }
1166
- ],
1167
- "year": 1993,
1168
- "venue": "Proceedings of A CM SI-GIR",
1169
- "volume": "",
1170
- "issue": "",
1171
- "pages": "181--190",
1172
- "other_ids": {},
1173
- "num": null,
1174
- "urls": [],
1175
- "raw_text": "J. Kupiec. 1993. MURAX: A Robust Linguistic ap- proach For Question Answering Using An On- Line Encyclopedia. In Proceedings of A CM SI- GIR, pages 181-190.",
1176
- "links": null
1177
- },
1178
- "BIBREF12": {
1179
- "ref_id": "b12",
1180
- "title": "The Process o/ Question Answering",
1181
- "authors": [
1182
- {
1183
- "first": "W",
1184
- "middle": [],
1185
- "last": "Lehnert",
1186
- "suffix": ""
1187
- }
1188
- ],
1189
- "year": 1978,
1190
- "venue": "",
1191
- "volume": "",
1192
- "issue": "",
1193
- "pages": "",
1194
- "other_ids": {},
1195
- "num": null,
1196
- "urls": [],
1197
- "raw_text": "W. Lehnert. 1978. The Process o/ Question Answer- ing. Lawrence Erlbaum Associates, Hillsdale, NJ.",
1198
- "links": null
1199
- },
1200
- "BIBREF13": {
1201
- "ref_id": "b13",
1202
- "title": "The TIPSTER SUMMAC Text Summarization Evaluation",
1203
- "authors": [
1204
- {
1205
- "first": "T",
1206
- "middle": [],
1207
- "last": "Mani",
1208
- "suffix": ""
1209
- },
1210
- {
1211
- "first": "D",
1212
- "middle": [],
1213
- "last": "Firmin",
1214
- "suffix": ""
1215
- },
1216
- {
1217
- "first": "G",
1218
- "middle": [],
1219
- "last": "House",
1220
- "suffix": ""
1221
- },
1222
- {
1223
- "first": "B",
1224
- "middle": [],
1225
- "last": "Klein",
1226
- "suffix": ""
1227
- },
1228
- {
1229
- "first": "L",
1230
- "middle": [],
1231
- "last": "Sundheim",
1232
- "suffix": ""
1233
- },
1234
- {
1235
- "first": "",
1236
- "middle": [],
1237
- "last": "Hirschman",
1238
- "suffix": ""
1239
- }
1240
- ],
1241
- "year": 1999,
1242
- "venue": "Ninth Annual Meeting o.f the EACL",
1243
- "volume": "",
1244
- "issue": "",
1245
- "pages": "",
1246
- "other_ids": {},
1247
- "num": null,
1248
- "urls": [],
1249
- "raw_text": "Mani, T. Firmin, D. House, G. Klein, B. Sund- heim, and L. Hirschman. 1999. The TIPSTER SUMMAC Text Summarization Evaluation. In Ninth Annual Meeting o.f the EACL, University of Bergen, Bergen, Norway.",
1250
- "links": null
1251
- },
1252
- "BIBREF14": {
1253
- "ref_id": "b14",
1254
- "title": "Building a Large Annotated Corpus of English: The Penn Treebank",
1255
- "authors": [
1256
- {
1257
- "first": "M",
1258
- "middle": [],
1259
- "last": "Marcus",
1260
- "suffix": ""
1261
- },
1262
- {
1263
- "first": "M",
1264
- "middle": [],
1265
- "last": "Marcinkiewicz",
1266
- "suffix": ""
1267
- },
1268
- {
1269
- "first": "B",
1270
- "middle": [],
1271
- "last": "Santorini",
1272
- "suffix": ""
1273
- }
1274
- ],
1275
- "year": 1993,
1276
- "venue": "Computational Linguistics",
1277
- "volume": "19",
1278
- "issue": "2",
1279
- "pages": "313--330",
1280
- "other_ids": {},
1281
- "num": null,
1282
- "urls": [],
1283
- "raw_text": "M. Marcus, M. Marcinkiewicz, and B. Santorini. 1993. Building a Large Annotated Corpus of En- glish: The Penn Treebank. Computational Lin- guistics, 19(2):313-330.",
1284
- "links": null
1285
- },
1286
- "BIBREF15": {
1287
- "ref_id": "b15",
1288
- "title": "WordNet: an on-line lexical database",
1289
- "authors": [
1290
- {
1291
- "first": "G",
1292
- "middle": [
1293
- "A"
1294
- ],
1295
- "last": "Miller",
1296
- "suffix": ""
1297
- },
1298
- {
1299
- "first": "R",
1300
- "middle": [],
1301
- "last": "Beckwith",
1302
- "suffix": ""
1303
- },
1304
- {
1305
- "first": "C",
1306
- "middle": [],
1307
- "last": "Feubaum",
1308
- "suffix": ""
1309
- },
1310
- {
1311
- "first": "D",
1312
- "middle": [],
1313
- "last": "Gross",
1314
- "suffix": ""
1315
- },
1316
- {
1317
- "first": "K",
1318
- "middle": [
1319
- "J"
1320
- ],
1321
- "last": "Miller",
1322
- "suffix": ""
1323
- }
1324
- ],
1325
- "year": 1990,
1326
- "venue": "International Journal of Lexicography",
1327
- "volume": "3",
1328
- "issue": "4",
1329
- "pages": "235--245",
1330
- "other_ids": {},
1331
- "num": null,
1332
- "urls": [],
1333
- "raw_text": "G. A. Miller, R. Beckwith, C. FeUbaum, D. Gross, and K. J. Miller. 1990. WordNet: an on-line lex- ical database. International Journal of Lexicogra- phy, 3(4):235-245.",
1334
- "links": null
1335
- },
1336
- "BIBREF16": {
1337
- "ref_id": "b16",
1338
- "title": "LASSO: A Tool for Surfing the Answer Net",
1339
- "authors": [
1340
- {
1341
- "first": "D",
1342
- "middle": [],
1343
- "last": "Moldovan",
1344
- "suffix": ""
1345
- },
1346
- {
1347
- "first": "S",
1348
- "middle": [],
1349
- "last": "Harabagiu",
1350
- "suffix": ""
1351
- },
1352
- {
1353
- "first": "M",
1354
- "middle": [],
1355
- "last": "Pa~ca",
1356
- "suffix": ""
1357
- },
1358
- {
1359
- "first": "R",
1360
- "middle": [],
1361
- "last": "Mihalcea",
1362
- "suffix": ""
1363
- },
1364
- {
1365
- "first": "R",
1366
- "middle": [],
1367
- "last": "Goodrum",
1368
- "suffix": ""
1369
- },
1370
- {
1371
- "first": "R",
1372
- "middle": [],
1373
- "last": "Girju",
1374
- "suffix": ""
1375
- },
1376
- {
1377
- "first": "V",
1378
- "middle": [],
1379
- "last": "Rus",
1380
- "suffix": ""
1381
- }
1382
- ],
1383
- "year": 2000,
1384
- "venue": "Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
1385
- "volume": "",
1386
- "issue": "",
1387
- "pages": "",
1388
- "other_ids": {},
1389
- "num": null,
1390
- "urls": [],
1391
- "raw_text": "D. Moldovan, S. Harabagiu, M. Pa~ca, R. Mihal- cea, R. Goodrum, R. Girju, and V. Rus. 2000. LASSO: A Tool for Surfing the Answer Net. In E. Voorhees, editor, Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Spe- cial Publication. In press.",
1392
- "links": null
1393
- },
1394
- "BIBREF17": {
1395
- "ref_id": "b17",
1396
- "title": "Using Coreference to Improve Passage Retrieval for Question Answering",
1397
- "authors": [
1398
- {
1399
- "first": "T",
1400
- "middle": [
1401
- "S"
1402
- ],
1403
- "last": "Morton",
1404
- "suffix": ""
1405
- }
1406
- ],
1407
- "year": 1999,
1408
- "venue": "Proceedings of the AAAI Fall Symposium on Question Answering Systems",
1409
- "volume": "",
1410
- "issue": "",
1411
- "pages": "72--74",
1412
- "other_ids": {},
1413
- "num": null,
1414
- "urls": [],
1415
- "raw_text": "T. S. Morton. 1999. Using Coreference to Im- prove Passage Retrieval for Question Answering. In Proceedings of the AAAI Fall Symposium on Question Answering Systems, pages 72-74. AAAI Press. AAAI TR FS-99-02.",
1416
- "links": null
1417
- },
1418
- "BIBREF18": {
1419
- "ref_id": "b18",
1420
- "title": "TREC-8 Experiments at Maryland: CLIR, QA and Routing",
1421
- "authors": [
1422
- {
1423
- "first": "D",
1424
- "middle": [
1425
- "W"
1426
- ],
1427
- "last": "Oard",
1428
- "suffix": ""
1429
- },
1430
- {
1431
- "first": "J",
1432
- "middle": [],
1433
- "last": "Wang",
1434
- "suffix": ""
1435
- },
1436
- {
1437
- "first": "D",
1438
- "middle": [],
1439
- "last": "Lin",
1440
- "suffix": ""
1441
- },
1442
- {
1443
- "first": "I",
1444
- "middle": [],
1445
- "last": "Soboroff",
1446
- "suffix": ""
1447
- }
1448
- ],
1449
- "year": 2000,
1450
- "venue": "Proceedings o.f the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
1451
- "volume": "",
1452
- "issue": "",
1453
- "pages": "",
1454
- "other_ids": {},
1455
- "num": null,
1456
- "urls": [],
1457
- "raw_text": "D. W. Oard, J. Wang, D. Lin, and I. Soboroff. 2000. TREC-8 Experiments at Maryland: CLIR, QA and Routing. In E. Voorhees, editor, Proceedings o.f the Eighth Text REtrieval Conference TREC 8. NIST Special Publication. In press.",
1458
- "links": null
1459
- },
1460
- "BIBREF19": {
1461
- "ref_id": "b19",
1462
- "title": "The Use of Predictive Annotation for Question Answering in TRECS",
1463
- "authors": [
1464
- {
1465
- "first": "J",
1466
- "middle": [],
1467
- "last": "Prager",
1468
- "suffix": ""
1469
- },
1470
- {
1471
- "first": "D",
1472
- "middle": [],
1473
- "last": "Radev",
1474
- "suffix": ""
1475
- },
1476
- {
1477
- "first": "E",
1478
- "middle": [],
1479
- "last": "Brown",
1480
- "suffix": ""
1481
- },
1482
- {
1483
- "first": "A",
1484
- "middle": [],
1485
- "last": "Coden",
1486
- "suffix": ""
1487
- },
1488
- {
1489
- "first": "V",
1490
- "middle": [],
1491
- "last": "Samn",
1492
- "suffix": ""
1493
- }
1494
- ],
1495
- "year": 2000,
1496
- "venue": "Proceedings o/ the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
1497
- "volume": "",
1498
- "issue": "",
1499
- "pages": "",
1500
- "other_ids": {},
1501
- "num": null,
1502
- "urls": [],
1503
- "raw_text": "J. Prager, D. Radev, E. Brown, A. Coden, and V. Samn. 2000. The Use of Predictive Anno- tation for Question Answering in TRECS. In E. Voorhees, editor, Proceedings o/ the Eighth Text REtrieval Conference TREC 8. NIST Spe- cial Publication. In press.",
1504
- "links": null
1505
- },
1506
- "BIBREF20": {
1507
- "ref_id": "b20",
1508
- "title": "A vector space model for information retrieval",
1509
- "authors": [
1510
- {
1511
- "first": "G",
1512
- "middle": [],
1513
- "last": "Salton",
1514
- "suffix": ""
1515
- },
1516
- {
1517
- "first": "A",
1518
- "middle": [],
1519
- "last": "Wong",
1520
- "suffix": ""
1521
- },
1522
- {
1523
- "first": "C",
1524
- "middle": [
1525
- "S"
1526
- ],
1527
- "last": "Yang",
1528
- "suffix": ""
1529
- }
1530
- ],
1531
- "year": 1975,
1532
- "venue": "Communications o/the ACM",
1533
- "volume": "18",
1534
- "issue": "",
1535
- "pages": "613--620",
1536
- "other_ids": {},
1537
- "num": null,
1538
- "urls": [],
1539
- "raw_text": "G. Salton, A. Wong, and C.S. Yang. 1975. A vector space model for information retrieval. Communi- cations o/the ACM, 18(11):613-620.",
1540
- "links": null
1541
- },
1542
- "BIBREF21": {
1543
- "ref_id": "b21",
1544
- "title": "Automatic analysis, theme generation and summarization of machine-readable texts",
1545
- "authors": [
1546
- {
1547
- "first": "G",
1548
- "middle": [],
1549
- "last": "Salton",
1550
- "suffix": ""
1551
- },
1552
- {
1553
- "first": "J",
1554
- "middle": [],
1555
- "last": "Allan",
1556
- "suffix": ""
1557
- },
1558
- {
1559
- "first": "C",
1560
- "middle": [],
1561
- "last": "Buckley",
1562
- "suffix": ""
1563
- },
1564
- {
1565
- "first": "M",
1566
- "middle": [],
1567
- "last": "Mitra",
1568
- "suffix": ""
1569
- }
1570
- ],
1571
- "year": 1994,
1572
- "venue": "Science",
1573
- "volume": "264",
1574
- "issue": "",
1575
- "pages": "1421--1426",
1576
- "other_ids": {},
1577
- "num": null,
1578
- "urls": [],
1579
- "raw_text": "G. Salton, J. Allan, C. Buckley, and M. Mitra. 1994. Automatic analysis, theme generation and sum- marization of machine-readable texts. Science, 264:1421-1426, June.",
1580
- "links": null
1581
- },
1582
- "BIBREF22": {
1583
- "ref_id": "b22",
1584
- "title": "The SMART Retrieval System--Experiments in Automatic Document Processing",
1585
- "authors": [],
1586
- "year": 1971,
1587
- "venue": "",
1588
- "volume": "",
1589
- "issue": "",
1590
- "pages": "",
1591
- "other_ids": {},
1592
- "num": null,
1593
- "urls": [],
1594
- "raw_text": "Gerard Salton, editor. 1971. The SMART Re- trieval System--Experiments in Automatic Doc- ument Processing. Prentice Hall Inc., Englewood Cliffs, NJ.",
1595
- "links": null
1596
- },
1597
- "BIBREF23": {
1598
- "ref_id": "b23",
1599
- "title": "Scripts, plans, goals, and understanding",
1600
- "authors": [
1601
- {
1602
- "first": "R",
1603
- "middle": [
1604
- "C"
1605
- ],
1606
- "last": "Schank",
1607
- "suffix": ""
1608
- },
1609
- {
1610
- "first": "R",
1611
- "middle": [
1612
- "P"
1613
- ],
1614
- "last": "Abelson",
1615
- "suffix": ""
1616
- }
1617
- ],
1618
- "year": 1977,
1619
- "venue": "Lawrence Erlbantu Associates",
1620
- "volume": "",
1621
- "issue": "",
1622
- "pages": "",
1623
- "other_ids": {},
1624
- "num": null,
1625
- "urls": [],
1626
- "raw_text": "R. C. Schank and R. P. Abelson. 1977. Scripts, plans, goals, and understanding. Lawrence Erl- bantu Associates, Hillsdale, NJ.",
1627
- "links": null
1628
- },
1629
- "BIBREF24": {
1630
- "ref_id": "b24",
1631
- "title": "Pivoted document length normalization",
1632
- "authors": [
1633
- {
1634
- "first": "Amit",
1635
- "middle": [],
1636
- "last": "Singhal",
1637
- "suffix": ""
1638
- },
1639
- {
1640
- "first": "Chris",
1641
- "middle": [],
1642
- "last": "Buckley",
1643
- "suffix": ""
1644
- },
1645
- {
1646
- "first": "Mandar",
1647
- "middle": [],
1648
- "last": "Mitra",
1649
- "suffix": ""
1650
- }
1651
- ],
1652
- "year": 1996,
1653
- "venue": "Proceedings o/the Nineteenth Annual International ACM SIGIR Conference on Research and Development in Information Retrieval",
1654
- "volume": "",
1655
- "issue": "",
1656
- "pages": "21--29",
1657
- "other_ids": {},
1658
- "num": null,
1659
- "urls": [],
1660
- "raw_text": "Amit Singhal, Chris Buckley, and Mandar Mitra. 1996. Pivoted document length normalization. In H. Frei, D. Harman, P. Schauble, and R. Wilkin- son, editors, Proceedings o/the Nineteenth An- nual International ACM SIGIR Conference on Research and Development in Information Re- trieval, pages 21-29. Association for Computing Machinery.",
1661
- "links": null
1662
- },
1663
- "BIBREF25": {
1664
- "ref_id": "b25",
1665
- "title": "AT&T at TREC-8",
1666
- "authors": [
1667
- {
1668
- "first": "A",
1669
- "middle": [],
1670
- "last": "Singhal",
1671
- "suffix": ""
1672
- },
1673
- {
1674
- "first": "S",
1675
- "middle": [],
1676
- "last": "Abney",
1677
- "suffix": ""
1678
- },
1679
- {
1680
- "first": "M",
1681
- "middle": [],
1682
- "last": "Bacchiani",
1683
- "suffix": ""
1684
- },
1685
- {
1686
- "first": "M",
1687
- "middle": [],
1688
- "last": "Collins",
1689
- "suffix": ""
1690
- },
1691
- {
1692
- "first": "D",
1693
- "middle": [],
1694
- "last": "Hindle",
1695
- "suffix": ""
1696
- },
1697
- {
1698
- "first": "F",
1699
- "middle": [],
1700
- "last": "Pereira",
1701
- "suffix": ""
1702
- }
1703
- ],
1704
- "year": 2000,
1705
- "venue": "Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
1706
- "volume": "",
1707
- "issue": "",
1708
- "pages": "",
1709
- "other_ids": {},
1710
- "num": null,
1711
- "urls": [],
1712
- "raw_text": "A. Singhal, S. Abney, M. Bacchiani, M. Collins, D. Hindle, and F. Pereira. 2000. AT&T at TREC- 8. In E. Voorhees, editor, Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Special Publication. In press.",
1713
- "links": null
1714
- },
1715
- "BIBREF26": {
1716
- "ref_id": "b26",
1717
- "title": "Question Answering Supported by Information Extraction",
1718
- "authors": [
1719
- {
1720
- "first": "R",
1721
- "middle": [],
1722
- "last": "Srihari",
1723
- "suffix": ""
1724
- },
1725
- {
1726
- "first": "W",
1727
- "middle": [],
1728
- "last": "Li",
1729
- "suffix": ""
1730
- }
1731
- ],
1732
- "year": 2000,
1733
- "venue": "Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Special Publication",
1734
- "volume": "",
1735
- "issue": "",
1736
- "pages": "",
1737
- "other_ids": {},
1738
- "num": null,
1739
- "urls": [],
1740
- "raw_text": "R. Srihari and W. Li. 2000. Question Answer- ing Supported by Information Extraction. In E. Voorhees, editor, Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST Spe- cial Publication. In press.",
1741
- "links": null
1742
- },
1743
- "BIBREF27": {
1744
- "ref_id": "b27",
1745
- "title": "Proceedings of the Eighth Text REtrieval Conference TREC 8. NIST. In press",
1746
- "authors": [],
1747
- "year": null,
1748
- "venue": "",
1749
- "volume": "",
1750
- "issue": "",
1751
- "pages": "",
1752
- "other_ids": {},
1753
- "num": null,
1754
- "urls": [],
1755
- "raw_text": "TREC-8. 2000. Proceedings of the Eighth Text RE- trieval Conference TREC 8. NIST. In press.",
1756
- "links": null
1757
- }
1758
- },
1759
- "ref_entries": {
1760
- "FIGREF0": {
1761
- "type_str": "figure",
1762
- "num": null,
1763
- "text": "General Architecture of the Question-Answering System",
1764
- "uris": null
1765
- }
1766
- }
1767
- }
1768
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1026.json DELETED
@@ -1,1524 +0,0 @@
1
- {
2
- "paper_id": "A00-1026",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:11:52.328209Z"
6
- },
7
- "title": "Extracting Molecular Binding Relationships from Biomedical Text",
8
- "authors": [
9
- {
10
- "first": "Thomas",
11
- "middle": [
12
- "C"
13
- ],
14
- "last": "Rindflesch",
15
- "suffix": "",
16
- "affiliation": {
17
- "laboratory": "",
18
- "institution": "National Library of Medicine",
19
- "location": {
20
- "postCode": "8600, 20894",
21
- "settlement": "Rockville Pike Bethesda",
22
- "region": "MD"
23
- }
24
- },
25
- "email": ""
26
- },
27
- {
28
- "first": "Jayant",
29
- "middle": [
30
- "V"
31
- ],
32
- "last": "Rajan",
33
- "suffix": "",
34
- "affiliation": {
35
- "laboratory": "",
36
- "institution": "University of Rochester Rochester",
37
- "location": {
38
- "postCode": "14620",
39
- "region": "NY"
40
- }
41
- },
42
- "email": "[email protected]"
43
- },
44
- {
45
- "first": "Lawrence",
46
- "middle": [],
47
- "last": "Hunter",
48
- "suffix": "",
49
- "affiliation": {
50
- "laboratory": "",
51
- "institution": "National Cancer Institute",
52
- "location": {
53
- "addrLine": "7550 Wisconsin Avenue Bethesda",
54
- "postCode": "20894"
55
- }
56
- },
57
- "email": "[email protected]"
58
- }
59
- ],
60
- "year": "",
61
- "venue": null,
62
- "identifiers": {},
63
- "abstract": "ARBITER is a Prolog program that extracts assertions about macromolecular binding relationships from biomedical text. We describe the domain knowledge and the underspecified linguistic analyses that support the identification of these predications. After discussing a formal evaluation of ARBITER, we report on its application to 491,000 MEDLINE ~ abstracts, during which almost 25,000 binding relationships suitable for entry into a database of macromolecular function were extracted.",
64
- "pdf_parse": {
65
- "paper_id": "A00-1026",
66
- "_pdf_hash": "",
67
- "abstract": [
68
- {
69
- "text": "ARBITER is a Prolog program that extracts assertions about macromolecular binding relationships from biomedical text. We describe the domain knowledge and the underspecified linguistic analyses that support the identification of these predications. After discussing a formal evaluation of ARBITER, we report on its application to 491,000 MEDLINE ~ abstracts, during which almost 25,000 binding relationships suitable for entry into a database of macromolecular function were extracted.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Abstract",
74
- "sec_num": null
75
- }
76
- ],
77
- "body_text": [
78
- {
79
- "text": "Far more scientific information exists in the literature than in any structured database. Convenient access to this information could significantly benefit research activities in various fields. The emerging technology of information extraction (Appelt and Israel 1997, Hearst 1999) provides a means of gaining access to this information. In this paper we report on a project to extract biomolecular data from biomedical text. We concentrate on molecular binding affinity, which provides a strong indication of macromolecular function and is a core phenomenon in molecular biology. Our ultimate goal is to automatically construct a database of binding relationships asserted in MEDLINE citations.",
80
- "cite_spans": [
81
- {
82
- "start": 245,
83
- "end": 256,
84
- "text": "(Appelt and",
85
- "ref_id": "BIBREF2"
86
- },
87
- {
88
- "start": 257,
89
- "end": 282,
90
- "text": "Israel 1997, Hearst 1999)",
91
- "ref_id": null
92
- }
93
- ],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "Introduction",
97
- "sec_num": null
98
- },
99
- {
100
- "text": "The National Library of Medicine's MEDLINE textual database is an online repository of more than 10 million citations from the biomedical literature. All citations contain the title of the corresponding article along with other bibliographic information. In addition, a large number of citations contain author-supplied abstracts. Initial studies indicate that there are ap-proximately 500,000 MEDLINE citations relevant to molecular binding affinity.",
101
- "cite_spans": [],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "Introduction",
105
- "sec_num": null
106
- },
107
- {
108
- "text": "Our decision to apply information extraction technology to binding relationships was guided not only by the biological importance of this phenomenon but also by the relatively straightforward syntactic cuing of binding predications in text. The inflectional forms of a single verb, bind, indicate this relationship in the vast majority of cases, and our initial work is limited to these instances. For example, our goal in this project is to extract the binding predications in (2) from the text in (1).",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "Introduction",
113
- "sec_num": null
114
- },
115
- {
116
- "text": "(1) CC chemokine receptor 1 (CCR1) is expressed in neutrophils, monocytes, lymphocytes, and eosinophils, and binds the leukocyte chemoattractant and hematopoiesis regulator macrophage inflammatory protein (MIP)-1 alpha, as well as several related CC chemokines.",
117
- "cite_spans": [],
118
- "ref_spans": [],
119
- "eq_spans": [],
120
- "section": "Introduction",
121
- "sec_num": null
122
- },
123
- {
124
- "text": "(2) <CC chemokine receptor 1> BINDS <leukocyte chemoattractant> <CC chemokine receptor 1> BINDS <hematopoiesis regulator macrophage inflammatory protein-1 alpha> <CC chemokine receptor 1> BINDS <related CC chemokine> Considerable interest in information extraction has concentrated on identifying named entities in text pertaining to current events (for example, Wacholder et al. 1997, Voorhees and Harman 1998, and MUC-7) ; however, several recent efforts have been directed at biomolecular data (Blaschke et al. 1999 , Craven and Kumlien 1999 , and Rindflesch et al. 2000 . The overall goal is to transform the information encoded in text into a more readily accessible tbrmat, typically a template with slots named for the participants in the scenario of interest. The template for molecular binding can be thought of as a simple predication with predicate \"bind\" and two arguments which participate (symmetrically) in the relationship: BINDS(<X>, <Y>).",
125
- "cite_spans": [
126
- {
127
- "start": 363,
128
- "end": 398,
129
- "text": "Wacholder et al. 1997, Voorhees and",
130
- "ref_id": null
131
- },
132
- {
133
- "start": 399,
134
- "end": 415,
135
- "text": "Harman 1998, and",
136
- "ref_id": "BIBREF22"
137
- },
138
- {
139
- "start": 416,
140
- "end": 422,
141
- "text": "MUC-7)",
142
- "ref_id": null
143
- },
144
- {
145
- "start": 497,
146
- "end": 518,
147
- "text": "(Blaschke et al. 1999",
148
- "ref_id": "BIBREF5"
149
- },
150
- {
151
- "start": 519,
152
- "end": 544,
153
- "text": ", Craven and Kumlien 1999",
154
- "ref_id": "BIBREF7"
155
- },
156
- {
157
- "start": 545,
158
- "end": 573,
159
- "text": ", and Rindflesch et al. 2000",
160
- "ref_id": "BIBREF20"
161
- }
162
- ],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "Introduction",
166
- "sec_num": null
167
- },
168
- {
169
- "text": "Various strategies, both linguistic and statistical, have been used in information extraction efforts. We introduce a Prolog program called ARBITER (Assess and Retrieve Binding Terminology) that takes advantage of an existing domain knowledge source and relies on syntactic cues provided by a partial parser in order to identify and extract binding relations from text. We discuss the syntactic processing used and then report on a formal evaluation of ARBITER against a test collection of 116 MEDLINE citations in which the binding relations were marked by hand. Finally, we provide a brief overview of the results of applying ARBITER to the 500,000 MEDLINE citations discussing molecular binding affinity.",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "Introduction",
174
- "sec_num": null
175
- },
176
- {
177
- "text": "Our strategy for extracting binding relationships from text divides the task into two phases: During the first phase we identify all potential binding arguments, and then in the second phase we extract just those binding terms which are asserted in the text as participating in a particular binding predication. In support of this processing, we rely on the linguistic and domain knowledge contained in the National Library of Medicine's Unified Medical Language System ~ (UMLS \u00ae) as well an existing tool, the SPECIALIST minimal commitment parser (Aronson et al. 1994) . The UMLS (Humphreys et al. 1998) consists of several knowledge sources applicable in the biomedical domain: the Metathesaums, Semantic Network, and SPECIALIST Lexicon (McCray et al. 1994) . The Metathesaurus was constructed from more than forty controlled vocabularies and contains more than 620,000 biomedical concepts. The characteristic of the Metathesaurus most relevant for this project is that each concept is associated with a semantic type that categorizes the concept into subareas of biology or medicine. Examples pertinent to binding terminology include the semantic types 'Amino Acid, Peptide, or Protein' and 'Nucleotide Sequence'. The SPECIALIST Lexicon (with associated lexical access tools) supplies syntactic information for a large compilation of biomedical and general English terms.",
178
- "cite_spans": [
179
- {
180
- "start": 548,
181
- "end": 569,
182
- "text": "(Aronson et al. 1994)",
183
- "ref_id": "BIBREF3"
184
- },
185
- {
186
- "start": 581,
187
- "end": 604,
188
- "text": "(Humphreys et al. 1998)",
189
- "ref_id": "BIBREF12"
190
- },
191
- {
192
- "start": 739,
193
- "end": 759,
194
- "text": "(McCray et al. 1994)",
195
- "ref_id": "BIBREF13"
196
- }
197
- ],
198
- "ref_spans": [],
199
- "eq_spans": [],
200
- "section": "Extracting Binding Relationships from Text",
201
- "sec_num": "1"
202
- },
203
- {
204
- "text": "The SPECIALIST minimal commitment parser relies on the SPECIALIST Lexicon as well as the Xerox stochastic tagger (Cutting et al. 1992) . The output produced is in the tradition of partial parsing (Hindle 1983 , McDonald 1992 , Weischedel et al. 1993 ) and concentrates on the simple noun phrase, what Weischedel et al. (1993) call the \"core noun phrase,\" that is a noun phrase with no modification to the right of the head. Several approaches provide similar output based on statistics (Church 1988 , Zhai 1997 , for example), a finite-state machine (Ait-Mokhtar and Chanod 1997), or a hybrid approach combining statistics and linguistic rules (Voutilainen and Padro 1997).",
205
- "cite_spans": [
206
- {
207
- "start": 113,
208
- "end": 134,
209
- "text": "(Cutting et al. 1992)",
210
- "ref_id": "BIBREF8"
211
- },
212
- {
213
- "start": 196,
214
- "end": 208,
215
- "text": "(Hindle 1983",
216
- "ref_id": "BIBREF11"
217
- },
218
- {
219
- "start": 209,
220
- "end": 224,
221
- "text": ", McDonald 1992",
222
- "ref_id": "BIBREF14"
223
- },
224
- {
225
- "start": 225,
226
- "end": 249,
227
- "text": ", Weischedel et al. 1993",
228
- "ref_id": "BIBREF25"
229
- },
230
- {
231
- "start": 301,
232
- "end": 325,
233
- "text": "Weischedel et al. (1993)",
234
- "ref_id": "BIBREF25"
235
- },
236
- {
237
- "start": 486,
238
- "end": 498,
239
- "text": "(Church 1988",
240
- "ref_id": "BIBREF6"
241
- },
242
- {
243
- "start": 499,
244
- "end": 510,
245
- "text": ", Zhai 1997",
246
- "ref_id": "BIBREF26"
247
- }
248
- ],
249
- "ref_spans": [],
250
- "eq_spans": [],
251
- "section": "Extracting Binding Relationships from Text",
252
- "sec_num": "1"
253
- },
254
- {
255
- "text": "The SPECIALIST parser is based on the notion of barrier words (Tersmette et al. 1988) , which indicate boundaries between phrases. After lexical look-up and resolution of category label ambiguity by the Xerox tagger, complementizers, conjunctions, modals, prepositions, and verbs are marked as boundaries. Subsequently, boundaries are considered to open a new phrase (and close the preceding phrase). Any phrase containing a noun is considered to be a (simple) noun phrase, and in such a phrase, the right-most noun is labeled as the head, and all other items (other than determiners) are labeled as modifiers. An example of the output from the SPECIALIST parser is given below in (4). The partial parse produced serves as the basis for the first phase of extraction of binding relationships, namely the identification of those simple noun phrases acting as potential binding arguments (referred to as \"binding terms\").",
256
- "cite_spans": [
257
- {
258
- "start": 62,
259
- "end": 85,
260
- "text": "(Tersmette et al. 1988)",
261
- "ref_id": "BIBREF21"
262
- }
263
- ],
264
- "ref_spans": [],
265
- "eq_spans": [],
266
- "section": "Extracting Binding Relationships from Text",
267
- "sec_num": "1"
268
- },
269
- {
270
- "text": "In order to identify binding terminology in text we rely on the approach discussed in (Rindfiesch et al. 1999) . Text with locally-defined acronyms expanded is submitted to the Xerox tagger and the SPECIALIST parser. Subsequent processing concentrates on the heads of simple noun phrases and proceeds in a series of cascaded steps that depend on existing domain knowledge as well as several small, special-purpose resources in order to determine whether each noun phrase encountered is to be considered a binding term.",
271
- "cite_spans": [
272
- {
273
- "start": 86,
274
- "end": 110,
275
- "text": "(Rindfiesch et al. 1999)",
276
- "ref_id": null
277
- }
278
- ],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "Identifying binding terminology",
282
- "sec_num": "1.1"
283
- },
284
- {
285
- "text": "As the first step in the process, an existing program, MetaMap, (Aronson et al. 1994) attempts to map each simple noun phrase to a concept in the UMLS Metathesaurus. The semantic type for concepts corresponding to successfully mapped noun phrases is then checked against a small subset of UMLS semantic types referring to bindable entities, such as 'Amino Acid, Peptide, or Protein', 'Nucleotide Sequence', 'Carbohydrate', 'Cell', and 'Virus'. For concepts with a semantic type in this set, the corresponding noun phrase is considered to be a binding",
286
- "cite_spans": [
287
- {
288
- "start": 64,
289
- "end": 85,
290
- "text": "(Aronson et al. 1994)",
291
- "ref_id": "BIBREF3"
292
- }
293
- ],
294
- "ref_spans": [],
295
- "eq_spans": [],
296
- "section": "Identifying binding terminology",
297
- "sec_num": "1.1"
298
- },
299
- {
300
- "text": "The heads of noun phrases that do not map to a concept in the Metathesaurus are tested against a small set of general \"binding words,\" which often indicate that the noun phrase in which they appear is a binding term. The set of binding words includes such nouns as cleft, groove, membrane, ligand, motif, receptor, domain, element, and molecule.",
301
- "cite_spans": [],
302
- "ref_spans": [],
303
- "eq_spans": [],
304
- "section": "term.",
305
- "sec_num": null
306
- },
307
- {
308
- "text": "The head of a noun phrase that did not submit to the preceding steps is examined to see whether it adheres to the morphologic shape of a normal English word. In this context such a word is often an acronym not defined locally and indicates the presence of a binding term (Fukuda et al. 1998) . A normal English word has at least one vowel and no digits, and a text token that contains at least one letter and is not a norreal English word functions as a binding word in this context. The final step in identifying binding terms is to join contiguous simple noun phrases qualifying as binding terms into a single macro-noun phrase. Rindflesch et al. (1999) use the term \"macro-noun phrase\" to refer to structures that include reduced relative clauses (commonly introduced by prepositions or participles) as well as appositives. Two binding terms joined by a form of be are also treated as though they formed a macro-noun phrase, as in Jel42 is an IgG which binds ...",
309
- "cite_spans": [
310
- {
311
- "start": 271,
312
- "end": 291,
313
- "text": "(Fukuda et al. 1998)",
314
- "ref_id": "BIBREF9"
315
- },
316
- {
317
- "start": 631,
318
- "end": 655,
319
- "text": "Rindflesch et al. (1999)",
320
- "ref_id": "BIBREF19"
321
- }
322
- ],
323
- "ref_spans": [],
324
- "eq_spans": [],
325
- "section": "term.",
326
- "sec_num": null
327
- },
328
- {
329
- "text": "The results of identifying binding terms (and thus potential binding arguments) are given in (4) for the sentence in (3). In (4) evidence supporting identification as a binding term is given in braces. Note that in the underspecified syntactic analysis, prepositional phrases are treated as (simple) noun phrases that have a preposition as their first member.",
330
- "cite_spans": [],
331
- "ref_spans": [],
332
- "eq_spans": [],
333
- "section": "term.",
334
- "sec_num": null
335
- },
336
- {
337
- "text": "(3) Je142 is an IgG which binds to the small bacterial protein, HPr and the structure of the complex is known at high resolution.",
338
- "cite_spans": [],
339
- "ref_spans": [],
340
- "eq_spans": [],
341
- "section": "term.",
342
- "sec_num": null
343
- },
344
- {
345
- "text": "(4) [binding_term([ head(Je142) ",
346
- "cite_spans": [
347
- {
348
- "start": 4,
349
- "end": 31,
350
- "text": "[binding_term([ head(Je142)",
351
- "ref_id": null
352
- }
353
- ],
354
- "ref_spans": [],
355
- "eq_spans": [],
356
- "section": "term.",
357
- "sec_num": null
358
- },
359
- {
360
- "text": "Before addressing the strategy for determining the arguments of binding predications, we discuss the general treatment of macro-noun phrases during the second part of the processing. Although ARBITER attempts to recover complete macro-noun phrases during the first phase, only the most specific (and biologically useful) part of a macro-noun phrase is recovered during the extraction of binding predications. Terms referring to specific molecules are more useful than those referring to general classes of bindable entities, such as receptor, ligand, protein, or molecule. The syntactic head of a macro-noun phrase (the first simple noun phrase in the list) is not always the most specific or most useful term in the construction.",
361
- "cite_spans": [],
362
- "ref_spans": [],
363
- "eq_spans": [],
364
- "section": "Identifying binding terms as arguments of relationships",
365
- "sec_num": "1.2"
366
- },
367
- {
368
- "text": "The Specificity Rule for determining the most specific part of the list of simple binding terms constituting a macro-noun phrase chooses the first simple term in the list which has either of the following two characteristics: a) The head was identified by the Morphology Shape Rule. b) The noun phrase maps to a UMLS concept having one of the following semantic types: 'Amino Acid, Peptide, or Protein', 'Nucleic Acid, Nucleoside, or Nucleotide', 'Nucleotide Sequence', 'Immunologic Factor', or 'Gene or Genome'. For example, in (5), the second simple term, TNF-alpha promoter, maps to the Metathesaurus with semantic type 'Nucleotide Sequence' and is thus considered to be the most specific term in this complex-noun phrase. In identifying binding terms as arguments of a complete binding predication, as indicated above, we examine only those binding relations cued by some form of the verb bind (bind, binds, bound, and binding) . The list of minimal syntactic phrases constituting the partial parse of the input sentence is examined from left to right; for each occurrence of a form of binds, the two binding terms serving as arguments are then sought. (During the tagging process, we force bind, binds, and bound to be labeled as \"verb,\" and binding as \"noun.\") A partial analysis of negation and coordination is undertaken by ARBITER, but anaphora resolution and a syntactic treatment of relativization are not attempted. With the added constraint that a binding argument must have been identified as a binding term based on the domain knowledge resources used, the partial syntactic analysis available to ARBITER supports the accurate identification of a large number of binding predications asserted in the research literature.",
369
- "cite_spans": [
370
- {
371
- "start": 898,
372
- "end": 931,
373
- "text": "(bind, binds, bound, and binding)",
374
- "ref_id": null
375
- }
376
- ],
377
- "ref_spans": [],
378
- "eq_spans": [],
379
- "section": "l_qt~",
380
- "sec_num": null
381
- },
382
- {
383
- "text": "It is convenient to categorize binding predications into two classes depending on which form of bind cues the predication: a) binding and b) bind, binds, and bound. In our test collection (discussed below), about half of the binding re-lationships asserted in the text are cued by the gerundive or participial form binding. In this syntactic predication, the resources available from the underspecified syntactic parse serve quite well as the basis for correctly identifying the arguments of the binding relationship.",
384
- "cite_spans": [],
385
- "ref_spans": [],
386
- "eq_spans": [],
387
- "section": "Arguments of binding",
388
- "sec_num": "1.2.1"
389
- },
390
- {
391
- "text": "The ",
392
- "cite_spans": [],
393
- "ref_spans": [],
394
- "eq_spans": [],
395
- "section": "Arguments of binding",
396
- "sec_num": "1.2.1"
397
- },
398
- {
399
- "text": "The arguments of forms of bind other than binding invariably occur on either side of the cuing verb form. The default strategy for identifying both arguments in these instances is to choose the closest binding term on either side of the verb. In the cases we have investigated, this strategy works often enough to be useful for the surface object. However, due to predicate coordination as well as relativization, such a strategy often fails to identify correctly the surface subject of bind (binds or bound) when more than one binding term precedes the verb. We therefore use the strategy summarized in (7) for recovering the surface subject in such instances.",
400
- "cite_spans": [],
401
- "ref_spans": [],
402
- "eq_spans": [],
403
- "section": "Arguments of bind",
404
- "sec_num": "1.2.2"
405
- },
406
- {
407
- "text": "(7) When more than one binding term precedes a form of bind other than binding, choose the most specific of these binding terms as the surface subject of the predication.",
408
- "cite_spans": [],
409
- "ref_spans": [],
410
- "eq_spans": [],
411
- "section": "Arguments of bind",
412
- "sec_num": "1.2.2"
413
- },
414
- {
415
- "text": "\"Most specific\" is determined (recursively) for a series of binding terms in the same way that the most specific part of a complex binding term is determined.",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "Arguments of bind",
420
- "sec_num": "1.2.2"
421
- },
422
- {
423
- "text": "The input text (8) provides an example of a binding predication cued by binds in which the arguments appear (immediately) on either side of the cuing verb. The two macro-noun phrases serving as potential arguments are underlined.",
424
- "cite_spans": [],
425
- "ref_spans": [],
426
- "eq_spans": [],
427
- "section": "Arguments of bind",
428
- "sec_num": "1.2.2"
429
- },
430
- {
431
- "text": "(8) A transcription factor, Auxin Response Factor 1, that binds to tl!e sequence TGTCTC in auxin response elements was cloned from Arabidopsis by using a yeast one-hybrid system.",
432
- "cite_spans": [],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "Arguments of bind",
436
- "sec_num": "1.2.2"
437
- },
438
- {
439
- "text": "(9) <auxin response factor 1> BINDS <sequence tgtctc>",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "Arguments of bind",
444
- "sec_num": "1.2.2"
445
- },
446
- {
447
- "text": "In the extracted binding relationship in (9), the Specificity Rule chooses Auxin Response Factor 1 from the first macro-noun phrase because it maps to the UMLS Metathesaurus with semantic type 'Amino Acid, Peptide, or Protein'. In the second argument, the sequence TGTCTC has a head that submits to the Morphology Shape Rule and hence is considered to be more specific than auxin response elements.",
448
- "cite_spans": [],
449
- "ref_spans": [],
450
- "eq_spans": [],
451
- "section": "Arguments of bind",
452
- "sec_num": "1.2.2"
453
- },
454
- {
455
- "text": "In (10), the Specificity Rule applies correctly to select the surface subject of the binding predication when multiple binding terms appear to the left of the verb.",
456
- "cite_spans": [],
457
- "ref_spans": [],
458
- "eq_spans": [],
459
- "section": "Arguments of bind",
460
- "sec_num": "1.2.2"
461
- },
462
- {
463
- "text": "(10) Phosphatidylinositol transfer protein has a single lipid-binding site that can reversibly bind phosphatidylinositol and phosphatidylcholine and transfer these lipids between membrane compartments in vitro.",
464
- "cite_spans": [],
465
- "ref_spans": [],
466
- "eq_spans": [],
467
- "section": "Arguments of bind",
468
- "sec_num": "1.2.2"
469
- },
470
- {
471
- "text": "<phosphatidylinositol transfer protein> BINDS <phosphatidylcholine> <phosphatidylinositol transfer protein>",
472
- "cite_spans": [],
473
- "ref_spans": [],
474
- "eq_spans": [],
475
- "section": "Arguments of bind",
476
- "sec_num": "1.2.2"
477
- },
478
- {
479
- "text": "Both Phosphatidylinositol transfer protein and a single lipid-binding site occur to the left of bind and have been identified as binding terms by the first phase of processing. However, Phosphatidylinositol transfer protein maps to the corresponding Metathesaurus concept with semantic type 'Amino Acid, Peptide, or Protein, thus causing it to be more specific than a single lipidbinding site. The second predication listed in (10) was correctly extracted due to coordination processing.",
480
- "cite_spans": [],
481
- "ref_spans": [],
482
- "eq_spans": [],
483
- "section": "BINDS <phosphatidylinositol>",
484
- "sec_num": null
485
- },
486
- {
487
- "text": "ARBITER pursues limited coordination identification in the spirit of Agarwal and Boggess (1992) and Rindflesch (1995) . Only binding terms are considered as candidates for coordination. For each conjunction encountered, the phrase immediately to the right is examined; if it is a binding term, all contiguous binding terms occurring immediately to the left of the conjunct are considered to be coordinate with the right conjunct. Coordination inside the simple noun phrase is not considered, and therefore structures such as The TCR alpha beta or -gamma delta chains are not recognized. Nonetheless, as indicated in (11), this limited approach to noun phrase coordination is often effective.",
488
- "cite_spans": [
489
- {
490
- "start": 100,
491
- "end": 117,
492
- "text": "Rindflesch (1995)",
493
- "ref_id": "BIBREF18"
494
- }
495
- ],
496
- "ref_spans": [],
497
- "eq_spans": [],
498
- "section": "BINDS <phosphatidylinositol>",
499
- "sec_num": null
500
- },
501
- {
502
- "text": "(11) Purified recombinant NC 1, like authentic NC 1, also bound specifically to fibronectin, collagen type I, and a laminin 5/6 complex. <authentic ncl> BINDS <laminin 5 / 6 complex> <authentic ncl > BINDS <collagen type i> <authentic ncl> BINDS <fibronectin> <purified recombinant ncl > BINDS <laminin 5 / 6 complex> <purified recombinant ncl> BINDS <collagen type i> <purified recombinant ncl > BINDS <fibronectin> Although the particular underspecified syntactic analysis used in the identification of binding predications in the biomedical research literature is limited in several important ways, it appears adequate to enable this project with a useful level of effectiveness, and this is supported by evaluation.",
503
- "cite_spans": [],
504
- "ref_spans": [],
505
- "eq_spans": [],
506
- "section": "BINDS <phosphatidylinositol>",
507
- "sec_num": null
508
- },
509
- {
510
- "text": "In order to determine ARBITER's effectiveness, the program was formally evaluated against a gold standard of MEDLINE citations in which the binding predications asserted were marked by hand. A search of MEDLINE limited to one month (June 1997) and based on the text words ((bind, binds, binding, or bound) and (protein or proteins)) retrieved 116 citations with 1,141 sentences; of these, 346 contained some form of the verb bind. 260 binding predications were identified in the binding sentences. (The binding sentences also contained 2,025 simple noun phrases, 1,179 of which were marked as being binding terms.)",
511
- "cite_spans": [
512
- {
513
- "start": 272,
514
- "end": 305,
515
- "text": "((bind, binds, binding, or bound)",
516
- "ref_id": null
517
- }
518
- ],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "Evaluation",
522
- "sec_num": "2"
523
- },
524
- {
525
- "text": "In processing this test collection, ARBITER extracted 181 binding predications, 132 of which were correct. Since ARBITER missed 128 marked binding predications (false negatives) and incorrectly identified 49 such relationships, recall and precision as measures of effectiveness are 51% and 73%, respectively.",
526
- "cite_spans": [],
527
- "ref_spans": [],
528
- "eq_spans": [],
529
- "section": "Evaluation",
530
- "sec_num": "2"
531
- },
532
- {
533
- "text": "In comparing ARBITER's output against that marked in the gold standard, fairly stringent matching criteria were used. A binding predication extracted from a particular sentence by ARBITER had to appear in that same sentence in the gold standard (not just the same citation) in order to be counted as correct. Further, in the gold standard, only the most specific component of a macro-noun phrase was marked as being the correct argument for a particular binding predication. If ARBITER retrieved any other part of a macro-noun phrase in identifying the arguments of that predication, it was assessed as an error.",
534
- "cite_spans": [],
535
- "ref_spans": [],
536
- "eq_spans": [],
537
- "section": "Evaluation",
538
- "sec_num": "2"
539
- },
540
- {
541
- "text": "A large number of ARBITER errors are due to two phenomena: difficulties in correctly identifying binding terms during the first phase of processing and syntactic complexity confounding argument identification during the second phase. An example of the first error type is seen in (12), where the failure to identify ran as a binding term caused ARBITER to miss the correct binding predication asserted in this sentence (indicated by \"-FN->\").",
542
- "cite_spans": [],
543
- "ref_spans": [],
544
- "eq_spans": [],
545
- "section": "Evaluation",
546
- "sec_num": "2"
547
- },
548
- {
549
- "text": "(12) Requirement of guanosine triphosphatebound ran for signal-mediated nuclear protein export.",
550
- "cite_spans": [],
551
- "ref_spans": [],
552
- "eq_spans": [],
553
- "section": "Evaluation",
554
- "sec_num": "2"
555
- },
556
- {
557
- "text": "-FN-> <guanosine triphosphate> BINDS <Ran> -FP-> < guanosine triphosphate> BINDS <signal -mediate nuclear protein export>",
558
- "cite_spans": [],
559
- "ref_spans": [],
560
- "eq_spans": [],
561
- "section": "Evaluation",
562
- "sec_num": "2"
563
- },
564
- {
565
- "text": "This error then led to the false positive error (\"-FP->\") when the program wrongly interpreted the next noun phrase in the sentence (signalmediated nuclear protein export) as the second argument in this predication. The interaction of coordination and negation in (13) caused ARBITER to partially misinterpret the binding predications in this sentence. Although some of the coordination in (13) was processed properly, resulting in the relationships listed, the negated coordination associated with the noun phrase visual arrestin was not interpreted correctly, and thus ARBITER failed to identify the predication marked as a false negative.",
566
- "cite_spans": [],
567
- "ref_spans": [],
568
- "eq_spans": [],
569
- "section": "Evaluation",
570
- "sec_num": "2"
571
- },
572
- {
573
- "text": "As an initial application of ARBITER we ran the program on 491,356 MEDLINE citations, which were retrieved using the same search strategy responsible for the gold standard. During this run, 331,777 sentences in 192,997 citations produced 419,782 total binding assertions. Extrapolating from the gold standard evaluation, we assume that this is about half of the total binding predications asserted in the citations processed and that somewhat less than three quarters of those extracted are correct. The initial list of 419,982 binding triples represents what ARBITER determined was asserted in the text being processed. Many of these assertions, such as those in (14), while correct, are too general to be useful. 14<receptors> BINDS <Peptides> <Erythrocytes> BINDS <Antibodies> Further processing on ARBITER raw output extracted specific protein names and genomic structures and reduced the number of such binding predications to 345,706. From these more specific binding predication, we began the construction of a database containing binding relations asserted in the literature. More detailed discussion of this database can be found in (Rajan et al. in prep); however, here we give an initial description of its characteristics.",
574
- "cite_spans": [],
575
- "ref_spans": [],
576
- "eq_spans": [],
577
- "section": "Application",
578
- "sec_num": "3"
579
- },
580
- {
581
- "text": "We submitted the 345,706 more specific ARBITER binding predications to a search in GenBank (Benson et al. 1998) and determined that 106,193 referred to a GenBank entry. The number of Genbank entries with at least one binding assertion is 11,617. Preliminary results indicate that the database we are constructing will have some of the following characteristics:",
582
- "cite_spans": [
583
- {
584
- "start": 91,
585
- "end": 111,
586
- "text": "(Benson et al. 1998)",
587
- "ref_id": null
588
- }
589
- ],
590
- "ref_spans": [],
591
- "eq_spans": [],
592
- "section": "Application",
593
- "sec_num": "3"
594
- },
595
- {
596
- "text": "\u2022 10,769 bindings between two distinct Genbank entries (5,569 unique) \u2022 875 more binding assertions found between an entry and a specific DNA sequence",
597
- "cite_spans": [],
598
- "ref_spans": [],
599
- "eq_spans": [],
600
- "section": "Application",
601
- "sec_num": "3"
602
- },
603
- {
604
- "text": "\u2022 27,345 bindings between a Genbank entry and a UMLS Metathesaurus concept \u2022 5,569 unique relationships among pairs of entries (involving 11,617 unique entries)",
605
- "cite_spans": [],
606
- "ref_spans": [],
607
- "eq_spans": [],
608
- "section": "Application",
609
- "sec_num": "3"
610
- },
611
- {
612
- "text": "The cooperation of structured domain knowledge and underspecified syntactic analysis enables the extraction of macromolecular binding relationships from the research literature. Although our implementation is domain-specific, the underlying principles are amenable to broader applicability. ARBITER makes a distinction between first labeling binding terms and then identifying certain of these terms as arguments in a binding predication. The first phase of this processing is dependent on biomedical domain knowledge accessible from the UMLS. Applying the techniques we propose in other areas would require at least a minimum of semantic classification of the concepts involved. General, automated techniques that could supply this requirement are becoming increasingly available (Morin and Jacquemin 1999, for example).",
613
- "cite_spans": [],
614
- "ref_spans": [],
615
- "eq_spans": [],
616
- "section": "Conclusion",
617
- "sec_num": null
618
- },
619
- {
620
- "text": "Although we concentrated on the inflectional forms of a single verb, the principles we invoke to support argument identification during the second phase of processing apply generally to English predication encoding strategies (with a minimum of effort necessary to address prepositional cuing of gerundive arguments for specific verbs). The approach to noun phrase coordination also applies generally, so long as hypernymic classification is available for the heads of the potential conjuncts.",
621
- "cite_spans": [],
622
- "ref_spans": [],
623
- "eq_spans": [],
624
- "section": "Conclusion",
625
- "sec_num": null
626
- }
627
- ],
628
- "back_matter": [
629
- {
630
- "text": "We are grateful to John Wilbur for assistance with accessing GenBank, to Alan Aronson for modifications to MetaMap, and to James Mork for providing the distributed system that supported the processing of MEDLINE citations.",
631
- "cite_spans": [],
632
- "ref_spans": [],
633
- "eq_spans": [],
634
- "section": "Acknowledgements",
635
- "sec_num": null
636
- }
637
- ],
638
- "bib_entries": {
639
- "BIBREF0": {
640
- "ref_id": "b0",
641
- "title": "A simple but useful approach to conjunct identification",
642
- "authors": [
643
- {
644
- "first": "R",
645
- "middle": [],
646
- "last": "Agaxwal",
647
- "suffix": ""
648
- },
649
- {
650
- "first": "L",
651
- "middle": [],
652
- "last": "Boggess",
653
- "suffix": ""
654
- }
655
- ],
656
- "year": 1992,
657
- "venue": "Proceedings of the 30th Annual Meeting of the Association for Computational Linguistics",
658
- "volume": "",
659
- "issue": "",
660
- "pages": "15--21",
661
- "other_ids": {},
662
- "num": null,
663
- "urls": [],
664
- "raw_text": "Agaxwal R. and Boggess L. (1992) A simple but use- ful approach to conjunct identification. Proceed- ings of the 30th Annual Meeting of the Associa- tion for Computational Linguistics, pp. 15-21.",
665
- "links": null
666
- },
667
- "BIBREF1": {
668
- "ref_id": "b1",
669
- "title": "Incremental finite-state parsing",
670
- "authors": [
671
- {
672
- "first": "S",
673
- "middle": [],
674
- "last": "Ait-Mokhtar",
675
- "suffix": ""
676
- },
677
- {
678
- "first": "Chanod J.-P",
679
- "middle": [],
680
- "last": "",
681
- "suffix": ""
682
- }
683
- ],
684
- "year": 1997,
685
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
686
- "volume": "",
687
- "issue": "",
688
- "pages": "72--81",
689
- "other_ids": {},
690
- "num": null,
691
- "urls": [],
692
- "raw_text": "Ait-Mokhtar S. and Chanod J.-P. (1997) Incremental finite-state parsing. Proceedings of the Fifth Con- ference on Applied Natural Language Processing, pp. 72-9.",
693
- "links": null
694
- },
695
- "BIBREF2": {
696
- "ref_id": "b2",
697
- "title": "Tutorial on building information extraction systems",
698
- "authors": [
699
- {
700
- "first": "D",
701
- "middle": [
702
- "E"
703
- ],
704
- "last": "Appelt",
705
- "suffix": ""
706
- },
707
- {
708
- "first": "D",
709
- "middle": [],
710
- "last": "Israel",
711
- "suffix": ""
712
- }
713
- ],
714
- "year": 1997,
715
- "venue": "Fifth Conference on Applied Natural Language Processing",
716
- "volume": "",
717
- "issue": "",
718
- "pages": "",
719
- "other_ids": {},
720
- "num": null,
721
- "urls": [],
722
- "raw_text": "Appelt D. E. and Israel D. (1997) Tutorial on build- ing information extraction systems. Fifth Confer- ence on Applied Natural Language Processing.",
723
- "links": null
724
- },
725
- "BIBREF3": {
726
- "ref_id": "b3",
727
- "title": "Exploiting a large thesaurus for information retrieval",
728
- "authors": [
729
- {
730
- "first": "A",
731
- "middle": [
732
- "R"
733
- ],
734
- "last": "Aronson",
735
- "suffix": ""
736
- },
737
- {
738
- "first": "T",
739
- "middle": [
740
- "C"
741
- ],
742
- "last": "Rindflesch",
743
- "suffix": ""
744
- },
745
- {
746
- "first": "A",
747
- "middle": [
748
- "C"
749
- ],
750
- "last": "Browne",
751
- "suffix": ""
752
- }
753
- ],
754
- "year": 1994,
755
- "venue": "Proceedings of RIAO 94",
756
- "volume": "",
757
- "issue": "",
758
- "pages": "197--216",
759
- "other_ids": {},
760
- "num": null,
761
- "urls": [],
762
- "raw_text": "Aronson A. R., Rindflesch T. C., and Browne A. C. (1994) Exploiting a large thesaurus for informa- tion retrieval. Proceedings of RIAO 94, pp. 197- 216.",
763
- "links": null
764
- },
765
- "BIBREF5": {
766
- "ref_id": "b5",
767
- "title": "Automatic extraction of biological information from scientific text: protein-protein interactions",
768
- "authors": [
769
- {
770
- "first": "C",
771
- "middle": [],
772
- "last": "Blaschke",
773
- "suffix": ""
774
- },
775
- {
776
- "first": "M",
777
- "middle": [
778
- "A"
779
- ],
780
- "last": "Andrade",
781
- "suffix": ""
782
- },
783
- {
784
- "first": "C",
785
- "middle": [],
786
- "last": "Ouzounis",
787
- "suffix": ""
788
- },
789
- {
790
- "first": "Valencia",
791
- "middle": [
792
- "A"
793
- ],
794
- "last": "",
795
- "suffix": ""
796
- }
797
- ],
798
- "year": 1999,
799
- "venue": "Intelligent Systems for Molecular Biology (ISMB)",
800
- "volume": "7",
801
- "issue": "",
802
- "pages": "60--67",
803
- "other_ids": {},
804
- "num": null,
805
- "urls": [],
806
- "raw_text": "Blaschke C., Andrade M. A., Ouzounis C., and Va- lencia A. (1999) Automatic extraction of biological information from scientific text: protein-protein interactions. Intelligent Systems for Molecular Bi- ology (ISMB), 7, pp. 60-7.",
807
- "links": null
808
- },
809
- "BIBREF6": {
810
- "ref_id": "b6",
811
- "title": "A stochastic parts program and noun phrase parser for unrestricted text",
812
- "authors": [
813
- {
814
- "first": "K",
815
- "middle": [
816
- "W"
817
- ],
818
- "last": "Church",
819
- "suffix": ""
820
- }
821
- ],
822
- "year": 1988,
823
- "venue": "Proceedings of the Second Conference on Applied Natural Language Processing",
824
- "volume": "",
825
- "issue": "",
826
- "pages": "136--143",
827
- "other_ids": {},
828
- "num": null,
829
- "urls": [],
830
- "raw_text": "Church K. W. (1988) A stochastic parts program and noun phrase parser for unrestricted text. Proceed- ings of the Second Conference on Applied Natural Language Processing, pp. 136-143.",
831
- "links": null
832
- },
833
- "BIBREF7": {
834
- "ref_id": "b7",
835
- "title": "Constructing biological knowledge bases by extracting information from text sources. Intelligent Systems for Molecular",
836
- "authors": [
837
- {
838
- "first": "M",
839
- "middle": [],
840
- "last": "Craven",
841
- "suffix": ""
842
- },
843
- {
844
- "first": "J",
845
- "middle": [],
846
- "last": "Kumlien",
847
- "suffix": ""
848
- }
849
- ],
850
- "year": 1999,
851
- "venue": "Biology",
852
- "volume": "7",
853
- "issue": "",
854
- "pages": "77--86",
855
- "other_ids": {},
856
- "num": null,
857
- "urls": [],
858
- "raw_text": "Craven M. and Kumlien J. (1999) Constructing bio- logical knowledge bases by extracting information from text sources. Intelligent Systems for Molecu- lar Biology (ISMB), 7, pp. 77-86.",
859
- "links": null
860
- },
861
- "BIBREF8": {
862
- "ref_id": "b8",
863
- "title": "A practical part-of-speech tagger",
864
- "authors": [
865
- {
866
- "first": "D",
867
- "middle": [
868
- "R"
869
- ],
870
- "last": "Cutting",
871
- "suffix": ""
872
- },
873
- {
874
- "first": "J",
875
- "middle": [],
876
- "last": "Kupiec",
877
- "suffix": ""
878
- },
879
- {
880
- "first": "J",
881
- "middle": [
882
- "O"
883
- ],
884
- "last": "Pedersen",
885
- "suffix": ""
886
- },
887
- {
888
- "first": "P",
889
- "middle": [],
890
- "last": "Sibun",
891
- "suffix": ""
892
- }
893
- ],
894
- "year": 1992,
895
- "venue": "Proceedings of the Third Conference on Applied Natural Language Processing",
896
- "volume": "",
897
- "issue": "",
898
- "pages": "",
899
- "other_ids": {},
900
- "num": null,
901
- "urls": [],
902
- "raw_text": "Cutting D. R., Kupiec J., Pedersen J. O., and Sibun P. (1992) A practical part-of-speech tagger. Pro- ceedings of the Third Conference on Applied Natu- ral Language Processing.",
903
- "links": null
904
- },
905
- "BIBREF9": {
906
- "ref_id": "b9",
907
- "title": "Toward information extraction: Identifying protein names from biological papers",
908
- "authors": [
909
- {
910
- "first": "F",
911
- "middle": [],
912
- "last": "Fukuda",
913
- "suffix": ""
914
- },
915
- {
916
- "first": "T",
917
- "middle": [],
918
- "last": "Tsunoda",
919
- "suffix": ""
920
- },
921
- {
922
- "first": "A",
923
- "middle": [],
924
- "last": "Tamura",
925
- "suffix": ""
926
- },
927
- {
928
- "first": "T",
929
- "middle": [],
930
- "last": "Takagi",
931
- "suffix": ""
932
- }
933
- ],
934
- "year": 1998,
935
- "venue": "Pacific Symposium on Biocomputing (PSB)",
936
- "volume": "3",
937
- "issue": "",
938
- "pages": "705--721",
939
- "other_ids": {},
940
- "num": null,
941
- "urls": [],
942
- "raw_text": "Fukuda F., Tsunoda T., Tamura A., and Takagi T. (1998) Toward information extraction: Identifying protein names from biological papers. Pacific Symposium on Biocomputing (PSB), 3, pp. 705- 16.",
943
- "links": null
944
- },
945
- "BIBREF10": {
946
- "ref_id": "b10",
947
- "title": "Untangling text data mining",
948
- "authors": [
949
- {
950
- "first": "M",
951
- "middle": [
952
- "A"
953
- ],
954
- "last": "Hearst",
955
- "suffix": ""
956
- }
957
- ],
958
- "year": 1999,
959
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics",
960
- "volume": "",
961
- "issue": "",
962
- "pages": "3--10",
963
- "other_ids": {},
964
- "num": null,
965
- "urls": [],
966
- "raw_text": "Hearst M. A. (1999) Untangling text data mining. Proceedings of the 37th Annual Meeting of the As- sociation for Computational Linguistics, pp. 3-10.",
967
- "links": null
968
- },
969
- "BIBREF11": {
970
- "ref_id": "b11",
971
- "title": "Deterministic parsing of syntactic non-fluencies",
972
- "authors": [
973
- {
974
- "first": "D",
975
- "middle": [],
976
- "last": "Hindle",
977
- "suffix": ""
978
- }
979
- ],
980
- "year": 1983,
981
- "venue": "Proceedings of the 21st Annual Meeting of the Association for Computational Linguistics",
982
- "volume": "",
983
- "issue": "",
984
- "pages": "123--131",
985
- "other_ids": {},
986
- "num": null,
987
- "urls": [],
988
- "raw_text": "Hindle D. (1983) Deterministic parsing of syntactic non-fluencies. Proceedings of the 21st Annual Meeting of the Association for Computational Lin- guistics, pp. 123-8.",
989
- "links": null
990
- },
991
- "BIBREF12": {
992
- "ref_id": "b12",
993
- "title": "The Unified Medical language System: An informatics research collaboration",
994
- "authors": [
995
- {
996
- "first": "B",
997
- "middle": [
998
- "L"
999
- ],
1000
- "last": "Humphreys",
1001
- "suffix": ""
1002
- },
1003
- {
1004
- "first": "D",
1005
- "middle": [
1006
- "A B"
1007
- ],
1008
- "last": "Lindberg",
1009
- "suffix": ""
1010
- },
1011
- {
1012
- "first": "H",
1013
- "middle": [
1014
- "M"
1015
- ],
1016
- "last": "Schoolman",
1017
- "suffix": ""
1018
- },
1019
- {
1020
- "first": "G",
1021
- "middle": [
1022
- "O"
1023
- ],
1024
- "last": "Barnett",
1025
- "suffix": ""
1026
- }
1027
- ],
1028
- "year": 1998,
1029
- "venue": "Journal of the American Medical Informatics Association",
1030
- "volume": "",
1031
- "issue": "1",
1032
- "pages": "1--13",
1033
- "other_ids": {},
1034
- "num": null,
1035
- "urls": [],
1036
- "raw_text": "Humphreys B. L., Lindberg D. A. B., Schoolman H. M., and Barnett G. O. (1998) The Unified Medical language System: An informatics research collabo- ration. Journal of the American Medical Informat- ics Association, 5/1, pp. 1-13.",
1037
- "links": null
1038
- },
1039
- "BIBREF13": {
1040
- "ref_id": "b13",
1041
- "title": "Lexical methods for managing variation in biomedical terminologies",
1042
- "authors": [
1043
- {
1044
- "first": "A",
1045
- "middle": [
1046
- "T"
1047
- ],
1048
- "last": "Mccray",
1049
- "suffix": ""
1050
- },
1051
- {
1052
- "first": "S",
1053
- "middle": [],
1054
- "last": "Srinivasan",
1055
- "suffix": ""
1056
- },
1057
- {
1058
- "first": "A",
1059
- "middle": [
1060
- "C"
1061
- ],
1062
- "last": "Browne",
1063
- "suffix": ""
1064
- }
1065
- ],
1066
- "year": 1994,
1067
- "venue": "Proceedings of the 18th Annual Symposium on Computer Applications in Medical Care",
1068
- "volume": "",
1069
- "issue": "",
1070
- "pages": "235--244",
1071
- "other_ids": {},
1072
- "num": null,
1073
- "urls": [],
1074
- "raw_text": "McCray A. T., Srinivasan S., and Browne A. C. (1994) Lexical methods for managing variation in biomedical terminologies. Proceedings of the 18th Annual Symposium on Computer Applications in Medical Care, pp. 235-9.",
1075
- "links": null
1076
- },
1077
- "BIBREF14": {
1078
- "ref_id": "b14",
1079
- "title": "Robust partial parsing through incremental, multi-algorithm processing",
1080
- "authors": [
1081
- {
1082
- "first": "D",
1083
- "middle": [
1084
- "D"
1085
- ],
1086
- "last": "Mcdonald",
1087
- "suffix": ""
1088
- }
1089
- ],
1090
- "year": 1992,
1091
- "venue": "",
1092
- "volume": "",
1093
- "issue": "",
1094
- "pages": "83--99",
1095
- "other_ids": {},
1096
- "num": null,
1097
- "urls": [],
1098
- "raw_text": "McDonald D. D. (1992) Robust partial parsing through incremental, multi-algorithm processing. In \"Text-Based Intelligent Systems,\" P. S. Jacobs, ed., pp. 83-99.",
1099
- "links": null
1100
- },
1101
- "BIBREF15": {
1102
- "ref_id": "b15",
1103
- "title": "Projecting corpus-based semantic links on a thesaurus",
1104
- "authors": [
1105
- {
1106
- "first": "E",
1107
- "middle": [],
1108
- "last": "Morin",
1109
- "suffix": ""
1110
- },
1111
- {
1112
- "first": "C",
1113
- "middle": [],
1114
- "last": "Jacquemin",
1115
- "suffix": ""
1116
- }
1117
- ],
1118
- "year": null,
1119
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics",
1120
- "volume": "",
1121
- "issue": "",
1122
- "pages": "389--96",
1123
- "other_ids": {},
1124
- "num": null,
1125
- "urls": [],
1126
- "raw_text": "Morin E. and Jacquemin C. Projecting corpus-based semantic links on a thesaurus. Proceedings of the 37th Annual Meeting of the Association for Com- putational Linguistics, pp. 389-96.",
1127
- "links": null
1128
- },
1129
- "BIBREF16": {
1130
- "ref_id": "b16",
1131
- "title": "MUC-7. Message Understanding Conference Proceedings",
1132
- "authors": [],
1133
- "year": null,
1134
- "venue": "",
1135
- "volume": "",
1136
- "issue": "",
1137
- "pages": "",
1138
- "other_ids": {},
1139
- "num": null,
1140
- "urls": [],
1141
- "raw_text": "MUC-7. Message Understanding Conference Pro- ceedings, http://www.muc.saic.com.",
1142
- "links": null
1143
- },
1144
- "BIBREF17": {
1145
- "ref_id": "b17",
1146
- "title": "prep.) Mining MEDLINE",
1147
- "authors": [
1148
- {
1149
- "first": "J",
1150
- "middle": [
1151
- "V"
1152
- ],
1153
- "last": "Rajan",
1154
- "suffix": ""
1155
- },
1156
- {
1157
- "first": "L",
1158
- "middle": [],
1159
- "last": "Hunter",
1160
- "suffix": ""
1161
- },
1162
- {
1163
- "first": "T",
1164
- "middle": [
1165
- "C"
1166
- ],
1167
- "last": "Rindflesch",
1168
- "suffix": ""
1169
- }
1170
- ],
1171
- "year": null,
1172
- "venue": "",
1173
- "volume": "",
1174
- "issue": "",
1175
- "pages": "",
1176
- "other_ids": {},
1177
- "num": null,
1178
- "urls": [],
1179
- "raw_text": "Rajan J. V., Hunter L., and Rindflesch T. C. (In prep.) Mining MEDLINE.",
1180
- "links": null
1181
- },
1182
- "BIBREF18": {
1183
- "ref_id": "b18",
1184
- "title": "Integrating natural language processing and biomedical domain knowledge for increased information retrieval effectiveness. Proceedings of the 5th Annual Dual-Use Technologies and Applications Conference",
1185
- "authors": [
1186
- {
1187
- "first": "T",
1188
- "middle": [
1189
- "C"
1190
- ],
1191
- "last": "Rindflesch",
1192
- "suffix": ""
1193
- }
1194
- ],
1195
- "year": 1995,
1196
- "venue": "",
1197
- "volume": "",
1198
- "issue": "",
1199
- "pages": "260--265",
1200
- "other_ids": {},
1201
- "num": null,
1202
- "urls": [],
1203
- "raw_text": "Rindflesch T. C. (1995) Integrating natural language processing and biomedical domain knowledge for increased information retrieval effectiveness. Pro- ceedings of the 5th Annual Dual-Use Technologies and Applications Conference, pp. 260-5.",
1204
- "links": null
1205
- },
1206
- "BIBREF19": {
1207
- "ref_id": "b19",
1208
- "title": "Mining molecular binding terminology from biomedical text",
1209
- "authors": [
1210
- {
1211
- "first": "T",
1212
- "middle": [
1213
- "C"
1214
- ],
1215
- "last": "Rindflesch",
1216
- "suffix": ""
1217
- },
1218
- {
1219
- "first": "L",
1220
- "middle": [],
1221
- "last": "Hunter",
1222
- "suffix": ""
1223
- },
1224
- {
1225
- "first": "A",
1226
- "middle": [
1227
- "R"
1228
- ],
1229
- "last": "Aronson",
1230
- "suffix": ""
1231
- }
1232
- ],
1233
- "year": 1999,
1234
- "venue": "Proceedings of the AMIA Annual Symposium",
1235
- "volume": "",
1236
- "issue": "",
1237
- "pages": "127--131",
1238
- "other_ids": {},
1239
- "num": null,
1240
- "urls": [],
1241
- "raw_text": "Rindflesch T. C., Hunter L., and Aronson A. R. (1999) Mining molecular binding terminology from biomedical text. Proceedings of the AMIA Annual Symposium, pp. 127-131.",
1242
- "links": null
1243
- },
1244
- "BIBREF20": {
1245
- "ref_id": "b20",
1246
- "title": "EDGAR: Extraction of drug, s, genes and relations from the biomedical literature",
1247
- "authors": [
1248
- {
1249
- "first": "T",
1250
- "middle": [
1251
- "C"
1252
- ],
1253
- "last": "Rindflesch",
1254
- "suffix": ""
1255
- },
1256
- {
1257
- "first": "L",
1258
- "middle": [],
1259
- "last": "Tanabe",
1260
- "suffix": ""
1261
- },
1262
- {
1263
- "first": "J",
1264
- "middle": [
1265
- "N"
1266
- ],
1267
- "last": "Weinstein",
1268
- "suffix": ""
1269
- },
1270
- {
1271
- "first": "L",
1272
- "middle": [],
1273
- "last": "Hunter",
1274
- "suffix": ""
1275
- }
1276
- ],
1277
- "year": 2000,
1278
- "venue": "Pacific Symposium on Biocomputing (PSB)",
1279
- "volume": "5",
1280
- "issue": "",
1281
- "pages": "514--539",
1282
- "other_ids": {},
1283
- "num": null,
1284
- "urls": [],
1285
- "raw_text": "Rindflesch T. C., Tanabe L., Weinstein J. N., and Hunter L. (2000) EDGAR: Extraction of drug, s, genes and relations from the biomedical literature. Pacific Symposium on Biocomputing (PSB), 5, pp. 514-25.",
1286
- "links": null
1287
- },
1288
- "BIBREF21": {
1289
- "ref_id": "b21",
1290
- "title": "Barrier word method for detecting molecular biology multiple word terms",
1291
- "authors": [
1292
- {
1293
- "first": "K",
1294
- "middle": [
1295
- "W F"
1296
- ],
1297
- "last": "Tersmette",
1298
- "suffix": ""
1299
- },
1300
- {
1301
- "first": "A",
1302
- "middle": [
1303
- "F"
1304
- ],
1305
- "last": "Scott",
1306
- "suffix": ""
1307
- },
1308
- {
1309
- "first": "G",
1310
- "middle": [
1311
- "W"
1312
- ],
1313
- "last": "Moore",
1314
- "suffix": ""
1315
- },
1316
- {
1317
- "first": "N",
1318
- "middle": [
1319
- "W"
1320
- ],
1321
- "last": "Matheson",
1322
- "suffix": ""
1323
- },
1324
- {
1325
- "first": "R",
1326
- "middle": [
1327
- "E"
1328
- ],
1329
- "last": "Miller",
1330
- "suffix": ""
1331
- }
1332
- ],
1333
- "year": 1988,
1334
- "venue": "Proceedings of the 12th Annual Symposium on Computer Applications in Medical Care",
1335
- "volume": "",
1336
- "issue": "",
1337
- "pages": "207--218",
1338
- "other_ids": {},
1339
- "num": null,
1340
- "urls": [],
1341
- "raw_text": "Tersmette K. W. F., Scott A. F., Moore G.W., Mathe- son N. W., and Miller R. E. (1988) Barrier word method for detecting molecular biology multiple word terms. Proceedings of the 12th Annual Sym- posium on Computer Applications in Medical Care, pp. 207-11.",
1342
- "links": null
1343
- },
1344
- "BIBREF22": {
1345
- "ref_id": "b22",
1346
- "title": "The Seventh Text Retrieval Conference",
1347
- "authors": [
1348
- {
1349
- "first": "E",
1350
- "middle": [
1351
- "M"
1352
- ],
1353
- "last": "Voorhees",
1354
- "suffix": ""
1355
- },
1356
- {
1357
- "first": "D",
1358
- "middle": [
1359
- "K"
1360
- ],
1361
- "last": "Harman",
1362
- "suffix": ""
1363
- }
1364
- ],
1365
- "year": 1998,
1366
- "venue": "",
1367
- "volume": "",
1368
- "issue": "",
1369
- "pages": "",
1370
- "other_ids": {},
1371
- "num": null,
1372
- "urls": [],
1373
- "raw_text": "Voorhees E. M. and Harman D. K. (1998) The Sev- enth Text Retrieval Conference.",
1374
- "links": null
1375
- },
1376
- "BIBREF23": {
1377
- "ref_id": "b23",
1378
- "title": "Developing a hybrid NP parser",
1379
- "authors": [
1380
- {
1381
- "first": "A",
1382
- "middle": [],
1383
- "last": "Vourtilainen",
1384
- "suffix": ""
1385
- },
1386
- {
1387
- "first": "L",
1388
- "middle": [],
1389
- "last": "Padro",
1390
- "suffix": ""
1391
- }
1392
- ],
1393
- "year": 1997,
1394
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
1395
- "volume": "",
1396
- "issue": "",
1397
- "pages": "80--87",
1398
- "other_ids": {},
1399
- "num": null,
1400
- "urls": [],
1401
- "raw_text": "Vourtilainen A. and Padro L. (1997) Developing a hybrid NP parser. Proceedings of the Fifth Confer- ence on Applied Natural Language Processing, pp. 80-7.",
1402
- "links": null
1403
- },
1404
- "BIBREF24": {
1405
- "ref_id": "b24",
1406
- "title": "Disambiguation of proper names in text",
1407
- "authors": [
1408
- {
1409
- "first": "N",
1410
- "middle": [],
1411
- "last": "Wacholder",
1412
- "suffix": ""
1413
- },
1414
- {
1415
- "first": "Y",
1416
- "middle": [],
1417
- "last": "Ravin",
1418
- "suffix": ""
1419
- },
1420
- {
1421
- "first": "M",
1422
- "middle": [],
1423
- "last": "Choi",
1424
- "suffix": ""
1425
- }
1426
- ],
1427
- "year": 1997,
1428
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
1429
- "volume": "",
1430
- "issue": "",
1431
- "pages": "202--208",
1432
- "other_ids": {},
1433
- "num": null,
1434
- "urls": [],
1435
- "raw_text": "Wacholder N., Ravin Y., and Choi M. (1997) Disam- biguation of proper names in text. Proceedings of the Fifth Conference on Applied Natural Language Processing, pp. 202-208.",
1436
- "links": null
1437
- },
1438
- "BIBREF25": {
1439
- "ref_id": "b25",
1440
- "title": "Coping with ambiguity and unknown words through probabilistic models",
1441
- "authors": [
1442
- {
1443
- "first": "R",
1444
- "middle": [],
1445
- "last": "Weischedel",
1446
- "suffix": ""
1447
- },
1448
- {
1449
- "first": "M",
1450
- "middle": [],
1451
- "last": "Meteer",
1452
- "suffix": ""
1453
- },
1454
- {
1455
- "first": "R",
1456
- "middle": [],
1457
- "last": "Schwartz",
1458
- "suffix": ""
1459
- },
1460
- {
1461
- "first": "L",
1462
- "middle": [],
1463
- "last": "Ramshaw",
1464
- "suffix": ""
1465
- },
1466
- {
1467
- "first": "Palmucci",
1468
- "middle": [
1469
- "J"
1470
- ],
1471
- "last": "",
1472
- "suffix": ""
1473
- }
1474
- ],
1475
- "year": 1993,
1476
- "venue": "Computational Linguistics",
1477
- "volume": "19",
1478
- "issue": "2",
1479
- "pages": "359--382",
1480
- "other_ids": {},
1481
- "num": null,
1482
- "urls": [],
1483
- "raw_text": "Weischedel R., Meteer M., Schwartz R., Ramshaw L., and Palmucci J. (1993) Coping with ambiguity and unknown words through probabilistic models. Computational Linguistics, 19/2, pp. 359-382.",
1484
- "links": null
1485
- },
1486
- "BIBREF26": {
1487
- "ref_id": "b26",
1488
- "title": "Fast statistical parsing of noun phrases for document indexing",
1489
- "authors": [
1490
- {
1491
- "first": "C",
1492
- "middle": [],
1493
- "last": "Zhai",
1494
- "suffix": ""
1495
- }
1496
- ],
1497
- "year": 1997,
1498
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
1499
- "volume": "",
1500
- "issue": "",
1501
- "pages": "312--343",
1502
- "other_ids": {},
1503
- "num": null,
1504
- "urls": [],
1505
- "raw_text": "Zhai C. (1997) Fast statistical parsing of noun phrases for document indexing. Proceedings of the Fifth Conference on Applied Natural Language Processing, pp. 312-31.",
1506
- "links": null
1507
- }
1508
- },
1509
- "ref_entries": {
1510
- "FIGREF0": {
1511
- "type_str": "figure",
1512
- "num": null,
1513
- "uris": null,
1514
- "text": "(5) binding_term( [transcriptionally active kappaB motifs], [in the TNF-alpha promoter], [in normal cells])"
1515
- },
1516
- "FIGREF1": {
1517
- "type_str": "figure",
1518
- "num": null,
1519
- "uris": null,
1520
- "text": "The nonvisual arrestins, beta-arrestin and arrestin3, but not visual arrestin, bind specifically to a glutathione S-transferase-clathrin terminal domain fusion protein."
1521
- }
1522
- }
1523
- }
1524
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1027.json DELETED
@@ -1,1184 +0,0 @@
1
- {
2
- "paper_id": "A00-1027",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:03.329225Z"
6
- },
7
- "title": "Compound Noun Segmentation Based on Lexical Data Extracted from Corpus*",
8
- "authors": [
9
- {
10
- "first": "Juntae",
11
- "middle": [],
12
- "last": "Yoon",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Pennsylvania",
17
- "location": {
18
- "addrLine": "3401 Walnut St., Suite 400A",
19
- "postCode": "19104-6228",
20
- "settlement": "Philadelphia",
21
- "region": "PA",
22
- "country": "USA"
23
- }
24
- },
25
- "email": "[email protected]"
26
- }
27
- ],
28
- "year": "",
29
- "venue": null,
30
- "identifiers": {},
31
- "abstract": "Compound noun analysis is one of the crucial problems in Korean language processing because a series of nouns in Korean may appear without white space in real texts, which makes it difficult to identify the morphological constituents. This paper presents an effective method of Korean compound noun segmentation based on lexical data extracted from corpus. The segmentation is done by two steps: First, it is based on manually constructed built-in dictionary for segmentation whose data were extracted from 30 million word corpus. Second, a segmentation algorithm using statistical data is proposed, where simple nouns and their frequencies are also extracted from corpus. The analysis is executed based on CYK tabular parsing and min-max operation. By experiments, its accuracy is about 97.29%, which turns out to be very effective. * This work was supported by a KOSEF's postdoctoral fellowship grant. retrieval, and obtaining better translation in machine translation. For example, suppose that a compound noun 'seol'agsan-gugrib-gongwon(Seol'ag Mountain National Park)' appear in documents. A user might want to retrieve documents about 'seol'agsan(Seol'ag Mountain)', and then it is likely that the documents with seol'agsan-gugrib-gongwon' are also the ones in his interest. Therefore, it should be exactly segmented before indexing in order for the documents to be retrieved with the query",
32
- "pdf_parse": {
33
- "paper_id": "A00-1027",
34
- "_pdf_hash": "",
35
- "abstract": [
36
- {
37
- "text": "Compound noun analysis is one of the crucial problems in Korean language processing because a series of nouns in Korean may appear without white space in real texts, which makes it difficult to identify the morphological constituents. This paper presents an effective method of Korean compound noun segmentation based on lexical data extracted from corpus. The segmentation is done by two steps: First, it is based on manually constructed built-in dictionary for segmentation whose data were extracted from 30 million word corpus. Second, a segmentation algorithm using statistical data is proposed, where simple nouns and their frequencies are also extracted from corpus. The analysis is executed based on CYK tabular parsing and min-max operation. By experiments, its accuracy is about 97.29%, which turns out to be very effective. * This work was supported by a KOSEF's postdoctoral fellowship grant. retrieval, and obtaining better translation in machine translation. For example, suppose that a compound noun 'seol'agsan-gugrib-gongwon(Seol'ag Mountain National Park)' appear in documents. A user might want to retrieve documents about 'seol'agsan(Seol'ag Mountain)', and then it is likely that the documents with seol'agsan-gugrib-gongwon' are also the ones in his interest. Therefore, it should be exactly segmented before indexing in order for the documents to be retrieved with the query",
38
- "cite_spans": [],
39
- "ref_spans": [],
40
- "eq_spans": [],
41
- "section": "Abstract",
42
- "sec_num": null
43
- }
44
- ],
45
- "body_text": [
46
- {
47
- "text": "Morphological analysis is crucial for processing the agglutinative language like Korean since words in such languages have lots of morphological variants. A sentence is represented by a sequence of eojeols which are the syntactic unit~ delimited by spacing characters in Korean. Unlike in English, an eojeol is not one word but composed of a series of words (content words and functional words). In particular, since an eojeol can often contain more than one noun, we cannot get proper interpretation of the sentence or phrase without its accurate segmentation.",
48
- "cite_spans": [],
49
- "ref_spans": [],
50
- "eq_spans": [],
51
- "section": "Introduction",
52
- "sec_num": "1"
53
- },
54
- {
55
- "text": "The problem in compound noun segmentation is that it is not possible to register all compound nouns in the dictionary since nouns are in the open set of words as well as the number of them is very large. Thus, they must be treated as unseen words without a segmentation process. Furthermore, accurate compound noun segmentation plays an important role in the application system. Compound noun segmentation is necessarily required for improving recall and precision in Korean information 'seol'agsan'. Also, to translate 'seol'agsan-gugribgongwon' to Seol'ag Mountain National Park, the constituents should be identified first through the process of segmentation.",
56
- "cite_spans": [],
57
- "ref_spans": [],
58
- "eq_spans": [],
59
- "section": "Introduction",
60
- "sec_num": "1"
61
- },
62
- {
63
- "text": "This paper presents two methods for segmentation of compound nouns. First, we extract compound nouns from a large size of corpus, manually divide them into simple nouns and construct the hand built segmentation dictionary with them. The dictionary includes compound nouns which are frequently used and need exceptional process. The number of data are about 100,000.",
64
- "cite_spans": [],
65
- "ref_spans": [],
66
- "eq_spans": [],
67
- "section": "Introduction",
68
- "sec_num": "1"
69
- },
70
- {
71
- "text": "Second, the segmentation algorithm is applied if the compound noun does not exist in the built-in dictionary. Basically, the segmenter is based on frequency of individual nouns extracted from corpus. However, the problem is that it is difficult to distinguish proper noun and common noun since there is no clue like capital letters in Korean. Thus, just a large amount of lexical knowledge does not make good results if it contains incorrect data and also it is not appropriate to use frequencies obtained by automatically tagging large corpus. Moreover, sufficient lexical data cannot be acquired from small amounts of tagged corpus.",
72
- "cite_spans": [],
73
- "ref_spans": [],
74
- "eq_spans": [],
75
- "section": "Introduction",
76
- "sec_num": "1"
77
- },
78
- {
79
- "text": "In this paper, we propose a method to get simple nouns and their frequencies from frequently occurring eojeols using repetitiveness of natural language. The amount of eojeols investigated is manually tractable and frequently used nouns extracted from them are crucial for compound noun segmentation. Furthermore, we propose rain-max composition to divide a sequence of syllables, which would be proven to be an effective method by experiments.",
80
- "cite_spans": [],
81
- "ref_spans": [],
82
- "eq_spans": [],
83
- "section": "Introduction",
84
- "sec_num": "1"
85
- },
86
- {
87
- "text": "To briefly show the reason that we select the operation, let us consider the following example. Suppose that a compound noun be composed of four syllables 'sl s2s3s4 '. There are several possibilities of segmentation in the sequence of syllables, where we consider the following possibilities (Sl/S2S3S4) and (sls2/s3s4). Assume that 'sl' is a frequently appearing word in texts whereas 's2s3s4' is a rarely occurring sequence of syllables as a word. On the other hand 'sis2' and 's3s4' occurs frequently but although they don't occur as frequently as 'sl'. In this case, the more likely segmentation would be (sls2/s3s4). It means that a sequence of syllables should not be divided into frequently occurring one and rarely occurring one. In this sense, min-max is the appropriate operation for the selection. In other words, rain value is selected between two sequences of syllables, and then max is taken from min values selected. To apply the operation repetitively, we use the CYK tabular parsing style algorithm.",
88
- "cite_spans": [],
89
- "ref_spans": [],
90
- "eq_spans": [],
91
- "section": "Introduction",
92
- "sec_num": "1"
93
- },
94
- {
95
- "text": "Since the compound noun consists of a series of nouns, the probability model using transition among parts of speech is not helpful, and rather lexical information is required for the compound noun segmentation. Our segmentation algorithm is based on a large collection of lexical information that consists of two kinds of data: One is the hand built segmentation dictionary (HBSD) and the other is the simple noun dictionary for segmentation (SND).",
96
- "cite_spans": [],
97
- "ref_spans": [],
98
- "eq_spans": [],
99
- "section": "Lexical Data Acquisition",
100
- "sec_num": "2"
101
- },
102
- {
103
- "text": "The first phase of compound noun segmentation uses the built-in dictionary (HBSD). The advantage of using the built-in dictionary is that the segmentation could (1) be very accurate by hand-made data and (2) become more efficient. In Korean compound noun, one syllable noun is sometimes highly ambiguous between suffix and noun, but human can easily identify them using semantic knowledge. For example, one syllable noun 'ssi' in Korean might be used either as a suffix or as a noun which means 'Mr/Ms' or 'seed' respectively. Without any semantic information, the best way to distinguish them is to record all the compound noun examples containing the meaning of seed in the dictionary since the number of compound nouns containing a meaning of 'seed' is even smaller. Besides, we can treat general spacing errors using the dictionary. By the spacing rule for Korean, there should be one content word except noun in an eojeol, but it turns out that one or more content words of short length sometimes appear without space in real texts, which causes the lexical ambiguities. It makes the system inefficient to deal with all these words on the phase of basic morphological analysis. To construct the dictionary, compound nouns axe extracted from corpus and manually elaborated. First, the morphological analyzer analyzes 30 million eojeol corpus using only simple noun dictionary, and the failed results are candidates for compound noun. After postpositions, if any, are removed from the compound noun candidates of the failure eojeols, the candidates axe modified and analyzed by hand. In addition, a collection of compound nouns of KAIST (Korea Advanced Institute of Science & Technology) is added to the dictionary in order to supplement them. The number of entries contained in the built-in dictionary is about 100,000. Table 1 shows some examples in the built-in dictionary. _The italic characters such as 'n' or 'x' in analysis information (right column) of the table is used to make distinction between noun and suffix.",
104
- "cite_spans": [],
105
- "ref_spans": [
106
- {
107
- "start": 1824,
108
- "end": 1831,
109
- "text": "Table 1",
110
- "ref_id": "TABREF0"
111
- }
112
- ],
113
- "eq_spans": [],
114
- "section": "Hand-Built Segmentation Dictionary",
115
- "sec_num": "2.1"
116
- },
117
- {
118
- "text": "As we said earlier, it is impossible for all compound nouns to be registered in the dictionary, and thus the built-in dictionary cannot cover all compound nouns even though it gives more accurate results. We need some good segmentation model for compound noun, therefore.",
119
- "cite_spans": [],
120
- "ref_spans": [],
121
- "eq_spans": [],
122
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
123
- "sec_num": "2.2"
124
- },
125
- {
126
- "text": "In compound noun segmentation, the thing that we pay attention to was that lexical information is crucial for segmenting noun compounds. Since a compound noun consists only of a sequence of nouns i.e. (noun)+, the transition probability of parts of speech is no use. Namely, the frequency of each noun plays highly important role in compound noun segmentation. Besides, since the parameter space is huge, we cannot extract enough lexicai information from hundreds of thousands of POS tagged corpus 1 even if accurate lexical information can be extracted from annotated corpus. Thus, a large size of corpus should be used to extract proper frequencies of nouns. However, it is difficult to look at a large size of corpus and to assign analyses to it, which makes it difficult to estimate the frequency distribution of words. Therefore, we need another approach for obtaining frequencies of nouns. It must be noted here that each noun in compound nouns could be easily segmented by human in many cases because it has a prominent figure in the sense that it is a frequently used word and so familiar with him. In other words, nouns prominent in documents can be defined as frequently occurred ones, which we call distinct nouns. Compound nouns contains these distinct nouns in many cases, which makes it easier to segment them and to identify their constituents. Empirically, it is well-known that too many words in the dictionary have a bad influence on morphological analysis in Korean. It is because rarely used nouns result in oversegmentation if they are included in compound noun segmentation dictionary. Therefore, it is necessary to select distinct nouns, which leads us to use a part of corpus instead of entire corpus that consists of frequently used ones in the corpus.",
127
- "cite_spans": [],
128
- "ref_spans": [],
129
- "eq_spans": [],
130
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
131
- "sec_num": "2.2"
132
- },
133
- {
134
- "text": "First, we examined distribution of eojeols in corpus in order to make the subset of corpus to extract lexical frequencies of nouns. The notable thing in our experiment is that the number of eojeols in corpus is increased in proportion to the size of corpus, but a small portion of eojeols takes most parts of the whole corpus. For instance, 70% of the corpus consists of just 60 thousand types of eojeols which take 7.5 million of frequency from 10 million eojeol corpus and 20.5 million from 30 million eojeols. The lowest frequency of the 60,000 eojeols is 49 in 30 million eojeol corpus. We decided to take 60,000 eojeols which are manually tractable and compose most parts of corpus ( Figure 1 ).",
135
- "cite_spans": [],
136
- "ref_spans": [
137
- {
138
- "start": 689,
139
- "end": 697,
140
- "text": "Figure 1",
141
- "ref_id": "FIGREF1"
142
- }
143
- ],
144
- "eq_spans": [],
145
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
146
- "sec_num": "2.2"
147
- },
148
- {
149
- "text": "Second, we made morphological analyses for the 60,000 eojeols by hand. Since Korean is an agglutinative language, an eojeol is represented by a sequence of content words and functional words as mentioned before. Especially, content words and functional words often have different distribution of syllables. In addition, inflectional endings for predicate and postpositions for nominals also have quite different distribution for syllables. Hence we can distinguish the constituents of eojeols in many cases. Of course, there are also many cases in which the result of morphological analysis has ambiguities. For example, an eojeol 'na-neun' in Korean has ambiguity of 'na/N+neun/P', 'na/PN+neun/P' and 'nal/V+neun/E'. In this example, the parts of speech N, PN, P, V and E mean noun, pronoun, postposition, verb and ending, respectively. On the other hand, many eojeols which are analyzed as having ambiguities by a morphological analyzer are actually not ambiguous. For instance, 'ga-geora' (go/imperative) has ambiguities by most morphological analyzer among 'ga/V+geora/E' and 'ga/N+i/C+geora/E' (C is copula), but it is actually not ambiguous. Such morphological ambiguity is caused by overgeneration of the morphological analyzer since the analyzer uses less detailed rules for robustness of the system. Therefore, if we examine and correct the results scrupulously, many ambiguities can be removed through the process.",
150
- "cite_spans": [],
151
- "ref_spans": [],
152
- "eq_spans": [],
153
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
154
- "sec_num": "2.2"
155
- },
156
- {
157
- "text": "As the result of the manual process, only 15% of 60,000 eojeols remain ambiguous at the mid-level of part of speech classification 2. Then, we extracted simple nouns and their frequencies from the data. Despite of manual correction, there must be ambiguities left for the reason mentioned above. There may be some methods to distribute frequencies in case of ambiguous words, but we simply assign the equal distribution to them. For instance, gage has two possibilities of analysis i.e. 'gage/N' and 'galV+ge/E', and its frequency is 2263, in which the noun 'gage' is assigned 1132 as its frequency. Table 2 shows examples of manually corrected morphological analyses of eojeols containing a noun 'gage' and their frequencies. We call the nouns extracted in such a way a set of distinct nouns.",
158
- "cite_spans": [],
159
- "ref_spans": [
160
- {
161
- "start": 600,
162
- "end": 607,
163
- "text": "Table 2",
164
- "ref_id": "TABREF2"
165
- }
166
- ],
167
- "eq_spans": [],
168
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
169
- "sec_num": "2.2"
170
- },
171
- {
172
- "text": "In addition, we supplement the dictionary with other nouns not appeared in the words obtained by the method mentioned above. First, nouns of more than three syllables are rare in real texts in Korean, as shown in Lee and Ahn (1996) . Their experiments proved that syllable based bigram indexing model makes much better result than other n-gram model such as trigram and quadragram in Korean IR. It follows that two syllable nouns take an overwhelming majority in nouns. Thus, there are not many such nouns in the simple nouns extracted by the manually corrected nouns (a set of distinct nouns). In particular, since many nouns of more 2At the mid-level of part of speech classification, for example, endings and postpositions are represented just by one tag e.g. E and P. To identify the sentential or clausal type (subordinate or declarative) in Korean, the ending should be subclassified for syntactic analysis more detail which can be done by statistical process. It is beyond the subject of this paper. ",
173
- "cite_spans": [
174
- {
175
- "start": 213,
176
- "end": 231,
177
- "text": "Lee and Ahn (1996)",
178
- "ref_id": "BIBREF5"
179
- }
180
- ],
181
- "ref_spans": [],
182
- "eq_spans": [],
183
- "section": "Extraction of Lexical Information for Segmentation from Corpus",
184
- "sec_num": "2.2"
185
- },
186
- {
187
- "text": "To simply describe the basic idea of our compound noun segmentation, we first consider a compound noun to be segmented into only two nouns. Given a compound noun, it is segmented by the possibility that a sequence of syllables inside it forms a word. The possibility that a sequence of syllables forms a word is measured by the following formula.",
188
- "cite_spans": [],
189
- "ref_spans": [],
190
- "eq_spans": [],
191
- "section": "Basic Idea",
192
- "sec_num": "3.1"
193
- },
194
- {
195
- "text": "Word (si,... sj) -fq(si,.., sj) Iq~",
196
- "cite_spans": [
197
- {
198
- "start": 5,
199
- "end": 31,
200
- "text": "(si,... sj) -fq(si,.., sj)",
201
- "ref_id": null
202
- }
203
- ],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "Basic Idea",
207
- "sec_num": "3.1"
208
- },
209
- {
210
- "text": "In the formula, fq (s~,...sj) is the frequency of the syllable si...sj, which is obtained from SND constructed on the stages of lexical data extraction.",
211
- "cite_spans": [
212
- {
213
- "start": 19,
214
- "end": 29,
215
- "text": "(s~,...sj)",
216
- "ref_id": null
217
- }
218
- ],
219
- "ref_spans": [],
220
- "eq_spans": [],
221
- "section": "Basic Idea",
222
- "sec_num": "3.1"
223
- },
224
- {
225
- "text": "And, fqN is the total sum of frequencies of simple nouns. Colloquially, the equation 1estimates how much the given sequence of syllables are likely to be word. If a sequence of syllables in the set of distinct nouns is included in a compound noun, it is more probable that it is divided around the syllables. If a compound noun consists of, for any combination of syllables, sequences of syllables in the set of supplementary nouns, the boundary of segmentation is somewhat fuzzy. Besides, if a given sequence of syllables is not found in SND, it is not probable that it is a noun.",
226
- "cite_spans": [],
227
- "ref_spans": [],
228
- "eq_spans": [],
229
- "section": "Basic Idea",
230
- "sec_num": "3.1"
231
- },
232
- {
233
- "text": "Consider a compound noun 'hag\u00b0gyo-saenghwal(school life)'. In case that segmentation of syllables is made into two, there would be four possibilities of segmentation for the example as follows:",
234
- "cite_spans": [],
235
- "ref_spans": [],
236
- "eq_spans": [],
237
- "section": "Basic Idea",
238
- "sec_num": "3.1"
239
- },
240
- {
241
- "text": "1. hag 9yo-saeng-hwal 2.",
242
- "cite_spans": [],
243
- "ref_spans": [],
244
- "eq_spans": [],
245
- "section": "Basic Idea",
246
- "sec_num": "3.1"
247
- },
248
- {
249
- "text": "hag-gyo saeng-hwal 3.",
250
- "cite_spans": [],
251
- "ref_spans": [],
252
- "eq_spans": [],
253
- "section": "Basic Idea",
254
- "sec_num": "3.1"
255
- },
256
- {
257
- "text": "hag-gyo-saeng hwal 4. hag-gyo-saeng-hwal \u00a2",
258
- "cite_spans": [],
259
- "ref_spans": [],
260
- "eq_spans": [],
261
- "section": "Basic Idea",
262
- "sec_num": "3.1"
263
- },
264
- {
265
- "text": "As we mentioned earlier, it is desirable that the eojeol is segmented in the position where each sequence of syllables to be divided occurs frequently enough in training data. As the length of a sequence of syllables is shorter in Korean, it occurs more frequently. That is, the shorter part usually have higher frequency than the other (longer) part when we divide syllables into two. Moreover, if the other part is the syllables that we rarely see in texts, then the part would not be a word. In the first of the above example, hag is a sequence of syllable appearing frequently, but gyo-saeng-hwa! is not. Actually, gyosaeng-hwal is not a word. On the other hand, both hag-gyo and saeng-hwal are frequently occurring syllables, and actually they are all words. Put another way, if it is unlikely that one sequence of syllables is a word, then it is more likely that the entire syllables are not segmented. The min-max composition is a suitable operation for this case. Therefore, we first take the minimum value from the function Word for each possibility of segmentation, and then we choose the maximum from the selected minimums. Also, the argument taking the maximum is selected as the most likely segmentation result.",
266
- "cite_spans": [],
267
- "ref_spans": [],
268
- "eq_spans": [],
269
- "section": "Basic Idea",
270
- "sec_num": "3.1"
271
- },
272
- {
273
- "text": "Here, Word(si... sj) is assigned the frequency of the syllables si... sj from the dictionary SND. Besides, if two minimums are equal, the entire syllable such as hag-gyo-saeng-hwal, if compared, is preferred, the values of the other sequence of syllables are compared or the dominant pattern has the priority.",
274
- "cite_spans": [],
275
- "ref_spans": [],
276
- "eq_spans": [],
277
- "section": "Basic Idea",
278
- "sec_num": "3.1"
279
- },
280
- {
281
- "text": "In this section, we generalize the word segmentation algorithm based on data obtained by the training method described in the previous section. In this case, we can hardly regard the sequence of syllable 'hag-gyo' as the combination of two words 'hag' and 'gyo'. The algorithm can be applied recursively from individual syllable to the entire syllable of the compound noun. The segmentation algorithm is effectively implemented by borrowing the CYK parsing method. Since we use the bottom-up strategy, the execution looks like composition rather than segmentation. After all possible segmentation of syllables being checked, the final result is put in the top of the table. When a compound noun is composed of n syllables, i.e. sis2.., s,~, the composition is started from each si (i = 1... n). Thus, the possibility that the individual syllable forms a word is recorded in the cell of the first row.",
282
- "cite_spans": [],
283
- "ref_spans": [],
284
- "eq_spans": [],
285
- "section": "Segmentation Algorithm",
286
- "sec_num": "3.2"
287
- },
288
- {
289
- "text": "Here, Ci,j is an element of CYK table where the segment result of the syllables sj,...j+i-1 is stored (Figure 2) . For instance, the segmentation result such that ar g max(min ( W ord( s l ) , Word(s2)), Word(s1 s2)) is stored in C1,2. What is interesting here is that the procedure follows the dynamic programming.",
290
- "cite_spans": [
291
- {
292
- "start": 176,
293
- "end": 190,
294
- "text": "( W ord( s l )",
295
- "ref_id": null
296
- }
297
- ],
298
- "ref_spans": [
299
- {
300
- "start": 102,
301
- "end": 112,
302
- "text": "(Figure 2)",
303
- "ref_id": null
304
- }
305
- ],
306
- "eq_spans": [],
307
- "section": "Segmentation Algorithm",
308
- "sec_num": "3.2"
309
- },
310
- {
311
- "text": "Thus, each cell C~,j has the most probable segmentation result for a series of syllables sj ..... j+i-1-Namely, C1,2 and C2,3 have the most likely segmentation of sis2 and s2s3 respectively. When the segmentation of sls2s3 is about to be checked, min(value (C2,1), value(C1,3) ), Table min (value(Cl,1),value(C2,2)) and Word(sls2s3) are compared to determine the segmentation for the syllables, because all Ci,j have the most likely segmentation.",
312
- "cite_spans": [
313
- {
314
- "start": 322,
315
- "end": 334,
316
- "text": "Word(sls2s3)",
317
- "ref_id": null
318
- }
319
- ],
320
- "ref_spans": [
321
- {
322
- "start": 257,
323
- "end": 276,
324
- "text": "(C2,1), value(C1,3)",
325
- "ref_id": "FIGREF1"
326
- },
327
- {
328
- "start": 280,
329
- "end": 291,
330
- "text": "Table min",
331
- "ref_id": null
332
- }
333
- ],
334
- "eq_spans": [],
335
- "section": "Segmentation Algorithm",
336
- "sec_num": "3.2"
337
- },
338
- {
339
- "text": "Here, value (Ci,j) represents the possibility value of Ci,j.",
340
- "cite_spans": [
341
- {
342
- "start": 12,
343
- "end": 18,
344
- "text": "(Ci,j)",
345
- "ref_id": null
346
- }
347
- ],
348
- "ref_spans": [],
349
- "eq_spans": [],
350
- "section": "Segmentation Algorithm",
351
- "sec_num": "3.2"
352
- },
353
- {
354
- "text": "Then, we can describe the segmentation algorithm as follows:",
355
- "cite_spans": [],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "Segmentation Algorithm",
359
- "sec_num": "3.2"
360
- },
361
- {
362
- "text": "When it is about to make the segmentation of syllables s~... sj, the segmentation results of less length of syllables like si...sj-1, S~+l... sj and so forth would be already stored in the table. In order to make analysis of si... s j, we combine two shorter length of analyses and the word generation possibilities are computed and checked.",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Segmentation Algorithm",
367
- "sec_num": "3.2"
368
- },
369
- {
370
- "text": "To make it easy to explain the algorithm, let us take an example compound noun 'hag-gyo-saeng-hwa~ (school life) which is segmented with 'haggyo' (school) and 'saenghwar (life) (Figure 3) . When it comes up to cell C4,1, we have to make the most probable segmentation for 'hag-gyo-saeng-hwal' i.e. SlS2S3S4. There are three kinds of sequences of syllables, i.e. sl in CI,1, sis2 in C2,1 and SlS2S3 in C3,1 that can construct the word consisting of 8182s384 which would be put in Ca,1. For instance, the word sls2s3s4 (hag-gyo-saeng-hwal) is made with Sl (hag) combined with sus3s4 (gyo-saeng-hwal). Likewise, it might be made by sis2 combined with s3s4 and sls2s3 combined with s4. Since each cell has the most probable result and its value, it is simple to find the best segmentation for each syllables. In addition, four cases, including the whole sequences of syllables, are compared to make segmentation of SlS2SaS4 as follows:",
371
- "cite_spans": [],
372
- "ref_spans": [
373
- {
374
- "start": 177,
375
- "end": 187,
376
- "text": "(Figure 3)",
377
- "ref_id": null
378
- }
379
- ],
380
- "eq_spans": [],
381
- "section": "Segmentation Algorithm",
382
- "sec_num": "3.2"
383
- },
384
- {
385
- "text": "1. rain (value(C3,1) , value(C3,4)) 2. min(value(C2,1), value(C2,3)) 3. min(value( Cl,1), value(C3,2)) 4. Word(SlS2SaS4) = Word (hag-gyo-saeng-hwal) Again, the most probable segmentation result is put in C4,1 with the likelihood value for its segmentation. We call it MLS (Most Likely Segmentation) ",
386
- "cite_spans": [
387
- {
388
- "start": 8,
389
- "end": 20,
390
- "text": "(value(C3,1)",
391
- "ref_id": null
392
- },
393
- {
394
- "start": 128,
395
- "end": 148,
396
- "text": "(hag-gyo-saeng-hwal)",
397
- "ref_id": null
398
- }
399
- ],
400
- "ref_spans": [],
401
- "eq_spans": [],
402
- "section": "Segmentation Algorithm",
403
- "sec_num": "3.2"
404
- },
405
- {
406
- "text": "arg max(min(w(hag),w(gyo)),w(hag-gyo))",
407
- "cite_spans": [],
408
- "ref_spans": [],
409
- "eq_spans": [],
410
- "section": ".__",
411
- "sec_num": null
412
- },
413
- {
414
- "text": "Figure 3: State of table when analyzing 'hag-gyosaeng-hwal'. Here, w(si . . . sj) = value (Cij) which is found in the following way:",
415
- "cite_spans": [
416
- {
417
- "start": 90,
418
- "end": 95,
419
- "text": "(Cij)",
420
- "ref_id": null
421
- }
422
- ],
423
- "ref_spans": [],
424
- "eq_spans": [],
425
- "section": ".__",
426
- "sec_num": null
427
- },
428
- {
429
- "text": "MLS(C4,z) = ar g max (rain(value(C3,1) , value( C3,a ) ), rain(value(G2,1), value(C2,3)), rain(value(C1,1), value(C3,2)), Word(sls2s3sa))",
430
- "cite_spans": [
431
- {
432
- "start": 21,
433
- "end": 38,
434
- "text": "(rain(value(C3,1)",
435
- "ref_id": null
436
- }
437
- ],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": ".__",
441
- "sec_num": null
442
- },
443
- {
444
- "text": "From the four cases, the maximum value and the segmentation result are selected and recorded in C4,1. To generalize it, the algorithm is described as shown in Figure 4 .",
445
- "cite_spans": [],
446
- "ref_spans": [
447
- {
448
- "start": 159,
449
- "end": 167,
450
- "text": "Figure 4",
451
- "ref_id": null
452
- }
453
- ],
454
- "eq_spans": [],
455
- "section": ".__",
456
- "sec_num": null
457
- },
458
- {
459
- "text": "The algorithm is straightforward. Let Word and MLS be the likelihood of being a noun and the most likely segmentation for a sequence of syllables. In the initialization step, each cell of the table is assigned Word value for a sequence of syllables sj ... sj+i+l using its frequency if it is found in SND. In other words, if the value of Word for the sequence in each cell is greater than zero, the syllables might be as a noun a part of a compound noun and so the value is recorded as MLS. It could be substituted by more likely one in the segmentation process.",
460
- "cite_spans": [],
461
- "ref_spans": [],
462
- "eq_spans": [],
463
- "section": ".__",
464
- "sec_num": null
465
- },
466
- {
467
- "text": "In order to make it efficient, the segmentation result is put as MLS instead of the syllables in case the sequence of syllables exists in the HBND. The minimum of each Word for constituents of the result as Word is recorded. Then, the segmenter compares possible analyses to make a larger one as shown in Figure 4 . Whenever Word of the entire syllables is less than that of segmented one, the syllables and value are replaced with the segmented result and its value. For instance, sl + s2 and its likelihood substitutes C2,1 if min(Word(sl), Word(s2)) > Word(sis2). When the entire syllables from the first to nth syllable are processed, C,~,x has the segmentation result.",
468
- "cite_spans": [],
469
- "ref_spans": [
470
- {
471
- "start": 305,
472
- "end": 313,
473
- "text": "Figure 4",
474
- "ref_id": null
475
- }
476
- ],
477
- "eq_spans": [],
478
- "section": ".__",
479
- "sec_num": null
480
- },
481
- {
482
- "text": "The overall complexity of the algorithm follows that of CYK parsing, O(n3).",
483
- "cite_spans": [],
484
- "ref_spans": [],
485
- "eq_spans": [],
486
- "section": ".__",
487
- "sec_num": null
488
- },
489
- {
490
- "text": "For the final result, we should take into consideration several issues which are related with the syllables that left unsegmented. There are several reasons that the given string remains unsegmented: 'geon-chug-sa' and 'si-heom', which have the meanings of authorized architect and examination. In this case, the unknown noun is caused by the suffix such as 'sa' because the suffix derives many words.",
491
- "cite_spans": [
492
- {
493
- "start": 200,
494
- "end": 229,
495
- "text": "'geon-chug-sa' and 'si-heom',",
496
- "ref_id": null
497
- }
498
- ],
499
- "ref_spans": [],
500
- "eq_spans": [],
501
- "section": "Default Analysis and Tuning",
502
- "sec_num": "3.3"
503
- },
504
- {
505
- "text": "However, it is known that it is very difficult to treat the kinds of suffixes since the suffix like 'sa' is a very frequently used character in Korean and thus prone to make oversegmentation if included in basic morphological analysis.",
506
- "cite_spans": [],
507
- "ref_spans": [],
508
- "eq_spans": [],
509
- "section": "Default Analysis and Tuning",
510
- "sec_num": "3.3"
511
- },
512
- {
513
- "text": "2. The string might consist of a proper noun alad a noun representing a position or geometric information. For instance, a compound noun 'kimdae-jung-dae-tong-ryeong' is composed of 'kimdae-jung' and 'dae-tong-ryeong' where the former is personal name and the latter means president respectively.",
514
- "cite_spans": [],
515
- "ref_spans": [],
516
- "eq_spans": [],
517
- "section": "Default Analysis and Tuning",
518
- "sec_num": "3.3"
519
- },
520
- {
521
- "text": "3. The string might be a proper noun itself. For example, 'willi'amseu' is a transliterated word for foreign name 'Williams' and 'hong-gil-dong'",
522
- "cite_spans": [],
523
- "ref_spans": [],
524
- "eq_spans": [],
525
- "section": "Default Analysis and Tuning",
526
- "sec_num": "3.3"
527
- },
528
- {
529
- "text": "is a personal name in Korean. Generally, since it has a different sequence of syllables from in a general Korean word, it often remains unsegmented.",
530
- "cite_spans": [],
531
- "ref_spans": [],
532
- "eq_spans": [],
533
- "section": "Default Analysis and Tuning",
534
- "sec_num": "3.3"
535
- },
536
- {
537
- "text": "If the basic segmentation is failed, three procedures would be executed for solving three problems above. For the first issue, we use the set of distinct nouns. That is, the offset pointer is stored in the initialization step as well as frequency of each noun in compound noun is recorded in the table. Attention should be paid to non-frequent sequence of syllables (ones in the set of supplementary nouns) in the default segmentation because it could be found in any proper noun such as personal names, place names, etc or transliterated words. It is known that the performance drops if all nouns in the compound noun segmentation dictionary are considered for default segmentation. We save the pointer to the boundary only when a noun in distinct set appears. For the above example 'geon-chug-sa-si-heom', the default segmentation would be 'geon-chug-sa' and 'si-heom' since 'si-heom' is in the set of distinct nouns and the pointer is set before 'si-heom' (Figure 5 ).",
538
- "cite_spans": [],
539
- "ref_spans": [
540
- {
541
- "start": 959,
542
- "end": 968,
543
- "text": "(Figure 5",
544
- "ref_id": null
545
- }
546
- ],
547
- "eq_spans": [],
548
- "section": "Default Analysis and Tuning",
549
- "sec_num": "3.3"
550
- },
551
- {
552
- "text": "For the test of compound noun segmentation, we first extracted compound noun from ETRI POS tagged corpus 3. By the processing, 1774 types of compound nouns were extracted, which was used as a gold standard test set. We evaluated our system by two methods: (1) the precision and recall rate, and (2) segmentation accuracy per compound noun which we refer to as SA. They are defined respectively as follows: What influences on the Korean IR system is whether words are appropriately segmented or not. The precision and recall estimate how appropriate the segmentation results are. They are 98.04% and 97.80% respectively, which shows that our algorithm is very effective (Table 3) .",
553
- "cite_spans": [],
554
- "ref_spans": [
555
- {
556
- "start": 669,
557
- "end": 678,
558
- "text": "(Table 3)",
559
- "ref_id": "TABREF6"
560
- }
561
- ],
562
- "eq_spans": [],
563
- "section": "Default Analysis and Tuning",
564
- "sec_num": "3.3"
565
- },
566
- {
567
- "text": "SA reflects how accurate the segmentation is for a compound noun at all. We compared two methods:",
568
- "cite_spans": [],
569
- "ref_spans": [],
570
- "eq_spans": [],
571
- "section": "Default Analysis and Tuning",
572
- "sec_num": "3.3"
573
- },
574
- {
575
- "text": "(1) using only the segmentation algorithm with default analysis which is a baseline of our system and so is needed to estimate the accuracy of the algorithm. (2) using both the built-in dictionary and the segmentation algorithm which reflects system accuracy as a whole. As shown in Table 4 , the baseline performance using only distinct nouns and the algorithm is about 94.3% and fairly good. From the results, we can find that the distinct nouns has great impact on compound noun segmentation. Also, the overall segmentation accuracy for the gold standard is about 97.29% which is a very good result for the application system. In addition, it shows that the built-in dictionary supplements the algorithm which results in better segmentation.",
576
- "cite_spans": [],
577
- "ref_spans": [
578
- {
579
- "start": 283,
580
- "end": 290,
581
- "text": "Table 4",
582
- "ref_id": "TABREF7"
583
- }
584
- ],
585
- "eq_spans": [],
586
- "section": "Default Analysis and Tuning",
587
- "sec_num": "3.3"
588
- },
589
- {
590
- "text": "Lastly, we compare our system with the previous work by (Yun et al. , 1997) . It is impossible that we directly compare our result with theirs, since the test set is different. It was reported that the accuracy given in the paper is about 95.6%. When comparing the performance only in terms of the accuracy, our system outperforms theirs. Embeded in the morphological analyzer, the compound noun segmentater is currently being used for some projects on MT and IE which are worked in several institutes and it turns out that the system is very effective. In this paper, we presented the new method for Korean compound noun segmentation. First, we proposed the lexical acquisition for compound noun analysis, which consists of the manually constructed segmentation dictionary (HBSD) and the dictionary for applying the segmentation algorithm (SND). The hand-built segmentation dictionary was made manually for compound nouns extracted from corpus. The simple noun dictionary is based on very frequently occurring nouns which are called distinct nouns because they are clues for identifying constituents of compound nouns. Second, the compound noun was segmented based on the modification of CYK tabular parsing and min-max composition, which was proven to be the very effective method by experiments. The bottom up approach using min-max operation guarantees the most likely segmentation, being applied in the same way as dynamic programming. With our new method, the result for segmentation is as accurate as 97.29%. Especially, the algorithm made results good enough and the builtin dictionary supplemented the algorithm. Consequently, the methodology is promising and the segmentation system would be helpful for the application system such as machine translation and information retrieval.",
591
- "cite_spans": [
592
- {
593
- "start": 56,
594
- "end": 75,
595
- "text": "(Yun et al. , 1997)",
596
- "ref_id": "BIBREF13"
597
- }
598
- ],
599
- "ref_spans": [],
600
- "eq_spans": [],
601
- "section": "Default Analysis and Tuning",
602
- "sec_num": "3.3"
603
- },
604
- {
605
- "text": "~It is the size of POS tagged corpus currently publicized by ETRI (Electronics and Telecommunications Research Institute) project.",
606
- "cite_spans": [],
607
- "ref_spans": [],
608
- "eq_spans": [],
609
- "section": "",
610
- "sec_num": null
611
- },
612
- {
613
- "text": "Experimental Results",
614
- "cite_spans": [],
615
- "ref_spans": [],
616
- "eq_spans": [],
617
- "section": "",
618
- "sec_num": null
619
- }
620
- ],
621
- "back_matter": [
622
- {
623
- "text": "We thank Prof. Mansuk Song at Yonsei Univ. and Prof. Key-Sun Choi at KAIST to provide data for experiments.",
624
- "cite_spans": [],
625
- "ref_spans": [],
626
- "eq_spans": [],
627
- "section": "Acknowledgement",
628
- "sec_num": "6"
629
- },
630
- {
631
- "text": "/* initialization step */ for i----1 to n do for j=l to n-i+l do value (Ci,j) ",
632
- "cite_spans": [
633
- {
634
- "start": 71,
635
- "end": 77,
636
- "text": "(Ci,j)",
637
- "ref_id": null
638
- }
639
- ],
640
- "ref_spans": [],
641
- "eq_spans": [],
642
- "section": "annex",
643
- "sec_num": null
644
- },
645
- {
646
- "text": "If this procedure is failed, the sequence of syllables is checked whether it might be proper noun or not. Since proper noun in Korean could have a kind of nominal suffix such as 'daetongryeong(president)' or 'ssi(Mr/Ms)' as mentioned above, we can identify it by detaching the nominal suffixes. If there does not exist any nominal suffix, then the entire syllables would be regarded just as the transliterated foreign word or a proper noun like personal or place name.",
647
- "cite_spans": [],
648
- "ref_spans": [],
649
- "eq_spans": [],
650
- "section": ": The segmentation algorithm",
651
- "sec_num": null
652
- }
653
- ],
654
- "bib_entries": {
655
- "BIBREF0": {
656
- "ref_id": "b0",
657
- "title": "Generalized Unknown Morpheme Guessing for Hybrid POS Tagging of Korean",
658
- "authors": [
659
- {
660
- "first": "J",
661
- "middle": [],
662
- "last": "Cha",
663
- "suffix": ""
664
- },
665
- {
666
- "first": "G",
667
- "middle": [],
668
- "last": "Lee",
669
- "suffix": ""
670
- },
671
- {
672
- "first": "J",
673
- "middle": [],
674
- "last": "Lee",
675
- "suffix": ""
676
- }
677
- ],
678
- "year": 1998,
679
- "venue": "Proceedings of the 6th Workshop on Very Large Corpora",
680
- "volume": "",
681
- "issue": "",
682
- "pages": "",
683
- "other_ids": {},
684
- "num": null,
685
- "urls": [],
686
- "raw_text": "Cha, J., Lee, G. and Lee, J. 1998. Generalized Un- known Morpheme Guessing for Hybrid POS Tag- ging of Korean. In Proceedings of the 6th Work- shop on Very Large Corpora.",
687
- "links": null
688
- },
689
- "BIBREF1": {
690
- "ref_id": "b1",
691
- "title": "KAIST Tree Bank Project for Korean: Present and Future Development",
692
- "authors": [
693
- {
694
- "first": "K",
695
- "middle": [
696
- "S"
697
- ],
698
- "last": "Choi",
699
- "suffix": ""
700
- },
701
- {
702
- "first": "Y",
703
- "middle": [
704
- "S"
705
- ],
706
- "last": "Han",
707
- "suffix": ""
708
- },
709
- {
710
- "first": "Y",
711
- "middle": [
712
- "G"
713
- ],
714
- "last": "Han",
715
- "suffix": ""
716
- },
717
- {
718
- "first": "O",
719
- "middle": [
720
- "W"
721
- ],
722
- "last": "Kwon",
723
- "suffix": ""
724
- }
725
- ],
726
- "year": 1994,
727
- "venue": "Proceedings of the International Workshop on Sharable Natural Language Resources",
728
- "volume": "",
729
- "issue": "",
730
- "pages": "",
731
- "other_ids": {},
732
- "num": null,
733
- "urls": [],
734
- "raw_text": "Choi, K. S., Han, Y. S., Han, Y. G., and Kwon, O. W. 1994. KAIST Tree Bank Project for Korean: Present and Future Development. In Proceedings of the International Workshop on Sharable Natu- ral Language Resources.",
735
- "links": null
736
- },
737
- "BIBREF2": {
738
- "ref_id": "b2",
739
- "title": "Spelling Correction Using Context",
740
- "authors": [
741
- {
742
- "first": "M",
743
- "middle": [
744
- "A"
745
- ],
746
- "last": "Elmi",
747
- "suffix": ""
748
- },
749
- {
750
- "first": "M",
751
- "middle": [],
752
- "last": "Evens",
753
- "suffix": ""
754
- }
755
- ],
756
- "year": 1998,
757
- "venue": "Proceedings",
758
- "volume": "",
759
- "issue": "",
760
- "pages": "",
761
- "other_ids": {},
762
- "num": null,
763
- "urls": [],
764
- "raw_text": "Elmi, M. A. and Evens, M. 1998. Spelling Cor- rection Using Context. In Proceedings o] COL- ING/A CL 98",
765
- "links": null
766
- },
767
- "BIBREF3": {
768
- "ref_id": "b3",
769
- "title": "Introduction to Automata Theory, Languages, and Computation",
770
- "authors": [
771
- {
772
- "first": "J",
773
- "middle": [
774
- "E"
775
- ],
776
- "last": "Hopcroft",
777
- "suffix": ""
778
- },
779
- {
780
- "first": "J",
781
- "middle": [
782
- "D"
783
- ],
784
- "last": "Ullman",
785
- "suffix": ""
786
- }
787
- ],
788
- "year": 1979,
789
- "venue": "",
790
- "volume": "",
791
- "issue": "",
792
- "pages": "",
793
- "other_ids": {},
794
- "num": null,
795
- "urls": [],
796
- "raw_text": "Hopcroft, J. E. and Ullman, J. D. 1979. Introduc- tion to Automata Theory, Languages, and Com- putation.",
797
- "links": null
798
- },
799
- "BIBREF4": {
800
- "ref_id": "b4",
801
- "title": "Identifying Unknown Words in Chinese Corpora",
802
- "authors": [
803
- {
804
- "first": "W",
805
- "middle": [],
806
- "last": "Jin",
807
- "suffix": ""
808
- },
809
- {
810
- "first": "L",
811
- "middle": [],
812
- "last": "Chen",
813
- "suffix": ""
814
- }
815
- ],
816
- "year": 1995,
817
- "venue": "Proceedings of NL-PRS 95",
818
- "volume": "",
819
- "issue": "",
820
- "pages": "",
821
- "other_ids": {},
822
- "num": null,
823
- "urls": [],
824
- "raw_text": "Jin, W. and Chen, L. 1995. Identifying Unknown Words in Chinese Corpora In Proceedings of NL- PRS 95",
825
- "links": null
826
- },
827
- "BIBREF5": {
828
- "ref_id": "b5",
829
- "title": "Using n-grams for Korean Text Retrieval",
830
- "authors": [
831
- {
832
- "first": "J",
833
- "middle": [
834
- "H"
835
- ],
836
- "last": "Lee",
837
- "suffix": ""
838
- },
839
- {
840
- "first": "J",
841
- "middle": [
842
- "S"
843
- ],
844
- "last": "Ahn",
845
- "suffix": ""
846
- }
847
- ],
848
- "year": 1996,
849
- "venue": "Proceedings of 19th",
850
- "volume": "",
851
- "issue": "",
852
- "pages": "",
853
- "other_ids": {},
854
- "num": null,
855
- "urls": [],
856
- "raw_text": "Lee, J. H. and Ahn, J. S. 1996. Using n-grams for Korean Text Retrieval. In Proceedings of 19th",
857
- "links": null
858
- },
859
- "BIBREF6": {
860
- "ref_id": "b6",
861
- "title": "Annual International A CM SIGIR Conference on Research and Development in Information Retrieval",
862
- "authors": [],
863
- "year": null,
864
- "venue": "",
865
- "volume": "",
866
- "issue": "",
867
- "pages": "",
868
- "other_ids": {},
869
- "num": null,
870
- "urls": [],
871
- "raw_text": "Annual International A CM SIGIR Conference on Research and Development in Information Re- trieval",
872
- "links": null
873
- },
874
- "BIBREF7": {
875
- "ref_id": "b7",
876
- "title": "Study and Implementation of Nondictionary Chinese Segmentation",
877
- "authors": [
878
- {
879
- "first": "J",
880
- "middle": [],
881
- "last": "Li",
882
- "suffix": ""
883
- },
884
- {
885
- "first": "K",
886
- "middle": [],
887
- "last": "Wang",
888
- "suffix": ""
889
- }
890
- ],
891
- "year": 1995,
892
- "venue": "Proceedings of NLPRS 95",
893
- "volume": "",
894
- "issue": "",
895
- "pages": "",
896
- "other_ids": {},
897
- "num": null,
898
- "urls": [],
899
- "raw_text": "Li, J. and Wang, K. 1995. Study and Implementa- tion of Nondictionary Chinese Segmentation. In Proceedings of NLPRS 95",
900
- "links": null
901
- },
902
- "BIBREF8": {
903
- "ref_id": "b8",
904
- "title": "A New Method of N-gram Statistics for Large Number of N and Automatic Extraction of Words and Phrases from Large Text Data of Japanese",
905
- "authors": [
906
- {
907
- "first": "M",
908
- "middle": [],
909
- "last": "Nagao",
910
- "suffix": ""
911
- },
912
- {
913
- "first": "S",
914
- "middle": [],
915
- "last": "Mori",
916
- "suffix": ""
917
- }
918
- ],
919
- "year": 1994,
920
- "venue": "Proceedings of COLING 94",
921
- "volume": "",
922
- "issue": "",
923
- "pages": "",
924
- "other_ids": {},
925
- "num": null,
926
- "urls": [],
927
- "raw_text": "Nagao, M. and Mori, S. 1994. A New Method of N-gram Statistics for Large Number of N and Au- tomatic Extraction of Words and Phrases from Large Text Data of Japanese. In Proceedings of COLING 94",
928
- "links": null
929
- },
930
- "BIBREF9": {
931
- "ref_id": "b9",
932
- "title": "Recognizing Korean Unknown Words by Comparatively Analyzing Example Words",
933
- "authors": [
934
- {
935
- "first": "B",
936
- "middle": [],
937
- "last": "Park",
938
- "suffix": ""
939
- },
940
- {
941
- "first": "R",
942
- "middle": [],
943
- "last": "Hwang",
944
- "suffix": ""
945
- },
946
- {
947
- "first": "Y",
948
- "middle": [
949
- "S"
950
- ],
951
- "last": "Rim",
952
- "suffix": ""
953
- },
954
- {
955
- "first": "H",
956
- "middle": [
957
- "C"
958
- ],
959
- "last": "",
960
- "suffix": ""
961
- }
962
- ],
963
- "year": 1997,
964
- "venue": "Proceedings o] ICCPOL 97",
965
- "volume": "",
966
- "issue": "",
967
- "pages": "",
968
- "other_ids": {},
969
- "num": null,
970
- "urls": [],
971
- "raw_text": "Park, B, R., Hwang, Y. S. and Rim, H. C. 1997. Recognizing Korean Unknown Words by Compar- atively Analyzing Example Words. In Proceedings o] ICCPOL 97",
972
- "links": null
973
- },
974
- "BIBREF10": {
975
- "ref_id": "b10",
976
- "title": "A Stochastic Finite-State Wordsegmentation Algorithm for Chinese",
977
- "authors": [
978
- {
979
- "first": "R",
980
- "middle": [
981
- "W"
982
- ],
983
- "last": "Sproat",
984
- "suffix": ""
985
- },
986
- {
987
- "first": "W",
988
- "middle": [],
989
- "last": "Shih",
990
- "suffix": ""
991
- },
992
- {
993
- "first": "W",
994
- "middle": [],
995
- "last": "Gale",
996
- "suffix": ""
997
- },
998
- {
999
- "first": "N",
1000
- "middle": [],
1001
- "last": "Chang",
1002
- "suffix": ""
1003
- }
1004
- ],
1005
- "year": 1994,
1006
- "venue": "Proceedings of the 32nd Annual Meeting",
1007
- "volume": "",
1008
- "issue": "",
1009
- "pages": "",
1010
- "other_ids": {},
1011
- "num": null,
1012
- "urls": [],
1013
- "raw_text": "Sproat, R. W., Shih, W., Gale, W. and Chang, N. 1994. A Stochastic Finite-State Word- segmentation Algorithm for Chinese. In Proceed- ings of the 32nd Annual Meeting o] ACL",
1014
- "links": null
1015
- },
1016
- "BIBREF11": {
1017
- "ref_id": "b11",
1018
- "title": "Information Retrieval Based on Compound Noun Analysis for Exact Term Extraction",
1019
- "authors": [
1020
- {
1021
- "first": "J",
1022
- "middle": [],
1023
- "last": "Yoon",
1024
- "suffix": ""
1025
- },
1026
- {
1027
- "first": "B",
1028
- "middle": [],
1029
- "last": "Kang",
1030
- "suffix": ""
1031
- },
1032
- {
1033
- "first": "K",
1034
- "middle": [
1035
- "S"
1036
- ],
1037
- "last": "Choi",
1038
- "suffix": ""
1039
- }
1040
- ],
1041
- "year": 1999,
1042
- "venue": "",
1043
- "volume": "",
1044
- "issue": "",
1045
- "pages": "",
1046
- "other_ids": {},
1047
- "num": null,
1048
- "urls": [],
1049
- "raw_text": "Yoon, J., Kang, B. and Choi, K. S. 1999. Informa- tion Retrieval Based on Compound Noun Analysis for Exact Term Extraction. Submitted in Journal of Computer Processing of Orientla Language.",
1050
- "links": null
1051
- },
1052
- "BIBREF12": {
1053
- "ref_id": "b12",
1054
- "title": "Word Segmentation Based on Estimation of Words from Examples",
1055
- "authors": [
1056
- {
1057
- "first": "J",
1058
- "middle": [],
1059
- "last": "Yoon",
1060
- "suffix": ""
1061
- },
1062
- {
1063
- "first": "W",
1064
- "middle": [],
1065
- "last": "Lee",
1066
- "suffix": ""
1067
- },
1068
- {
1069
- "first": "K",
1070
- "middle": [
1071
- "S"
1072
- ],
1073
- "last": "Choi",
1074
- "suffix": ""
1075
- }
1076
- ],
1077
- "year": 1999,
1078
- "venue": "",
1079
- "volume": "",
1080
- "issue": "",
1081
- "pages": "",
1082
- "other_ids": {},
1083
- "num": null,
1084
- "urls": [],
1085
- "raw_text": "Yoon, J., Lee, W. and Choi, K. S. 1999. Word Seg- mentation Based on Estimation of Words from Examples. Technical Report.",
1086
- "links": null
1087
- },
1088
- "BIBREF13": {
1089
- "ref_id": "b13",
1090
- "title": "Segmenting Korean Compound Nouns Using Statistical Information and a Preference Rules",
1091
- "authors": [
1092
- {
1093
- "first": "B",
1094
- "middle": [
1095
- "H"
1096
- ],
1097
- "last": "Yun",
1098
- "suffix": ""
1099
- },
1100
- {
1101
- "first": "M",
1102
- "middle": [
1103
- "C"
1104
- ],
1105
- "last": "Cho",
1106
- "suffix": ""
1107
- },
1108
- {
1109
- "first": "H",
1110
- "middle": [
1111
- "C"
1112
- ],
1113
- "last": "Rim",
1114
- "suffix": ""
1115
- }
1116
- ],
1117
- "year": 1997,
1118
- "venue": "Proceedings of PACLING",
1119
- "volume": "",
1120
- "issue": "",
1121
- "pages": "",
1122
- "other_ids": {},
1123
- "num": null,
1124
- "urls": [],
1125
- "raw_text": "Yun, B. H., Cho, M. C. and Rim, H. C. 1997. Seg- menting Korean Compound Nouns Using Statis- tical Information and a Preference Rules. In Pro- ceedings of PACLING.",
1126
- "links": null
1127
- }
1128
- },
1129
- "ref_entries": {
1130
- "FIGREF0": {
1131
- "uris": null,
1132
- "text": ")-t-nssi(seed) chuggu(foot ball)+tim(team)",
1133
- "num": null,
1134
- "type_str": "figure"
1135
- },
1136
- "FIGREF1": {
1137
- "uris": null,
1138
- "text": "Distribution of eojeols in Korean corpus",
1139
- "num": null,
1140
- "type_str": "figure"
1141
- },
1142
- "FIGREF2": {
1143
- "uris": null,
1144
- "text": "Figure 2: Composition Table",
1145
- "num": null,
1146
- "type_str": "figure"
1147
- },
1148
- "FIGREF4": {
1149
- "uris": null,
1150
- "text": "Precision = number of correct constituents in proposed segment results total number o] constituents in proposed segment results Recall = number of correct constituents in proposed segment results total number of constituents in compoundnouns SA = number of correctly segmented compound nouns total number of compoundnouns3The corpus was constructed by the ETRI (Electronics and Telecommunications Research Institute) project for standardization of natural language processing technology and the corpus presented consists of about 270,000 eojeols at present.",
1151
- "num": null,
1152
- "type_str": "figure"
1153
- },
1154
- "TABREF0": {
1155
- "text": "",
1156
- "num": null,
1157
- "type_str": "table",
1158
- "html": null,
1159
- "content": "<table><tr><td>: Examples of compound noun and analysis</td></tr><tr><td>information in built-in dictionary</td></tr></table>"
1160
- },
1161
- "TABREF2": {
1162
- "text": "Example of extraction of distinct nouns. Here N, V, P and E mean tag for noun, verb, postposition and ending and '@' is marked for representation of ambiguous analysis",
1163
- "num": null,
1164
- "type_str": "table",
1165
- "html": null,
1166
- "content": "<table><tr><td/><td/><td>And the SND for</td></tr><tr><td colspan=\"3\">compound noun segmentation is composed of a set</td></tr><tr><td colspan=\"3\">of distinct nouns and a set of supplementary nouns.</td></tr><tr><td colspan=\"3\">The number of simple nouns for compound noun seg-</td></tr><tr><td colspan=\"3\">mentation is about 50,000.</td></tr><tr><td>3</td><td>Compound</td><td>Word Segmentation</td></tr><tr><td/><td>Algorithm</td><td/></tr></table>"
1167
- },
1168
- "TABREF6": {
1169
- "text": "",
1170
- "num": null,
1171
- "type_str": "table",
1172
- "html": null,
1173
- "content": "<table><tr><td colspan=\"3\">: Result 1: Precision and recall rate</td></tr><tr><td/><td>SA</td><td/></tr><tr><td/><td>Whole System</td><td>Baseline</td></tr><tr><td>Number of correct constituents</td><td>1726]1774</td><td>1673/1774</td></tr><tr><td>Rate</td><td>97.29</td><td>94.30</td></tr></table>"
1174
- },
1175
- "TABREF7": {
1176
- "text": "",
1177
- "num": null,
1178
- "type_str": "table",
1179
- "html": null,
1180
- "content": "<table><tr><td/><td>: Result 2: Segmentation accuracy for Compound Noun</td></tr><tr><td>5</td><td>Conclusions</td></tr></table>"
1181
- }
1182
- }
1183
- }
1184
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1028.json DELETED
@@ -1,905 +0,0 @@
1
- {
2
- "paper_id": "A00-1028",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:50.980990Z"
6
- },
7
- "title": "Experiments with Corpus-based LFG Specialization",
8
- "authors": [
9
- {
10
- "first": "Nicola",
11
- "middle": [],
12
- "last": "Cancedda",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Xerox Research Centre Europe",
17
- "location": {
18
- "addrLine": "6, chemin de Maupertuis",
19
- "postCode": "38240",
20
- "settlement": "Meylan",
21
- "country": "France"
22
- }
23
- },
24
- "email": ""
25
- },
26
- {
27
- "first": "Christer",
28
- "middle": [],
29
- "last": "Samuelsson",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "",
33
- "institution": "Xerox Research Centre Europe",
34
- "location": {
35
- "addrLine": "6, chemin de Maupertuis",
36
- "postCode": "38240",
37
- "settlement": "Meylan",
38
- "country": "France"
39
- }
40
- },
41
- "email": "[email protected]"
42
- }
43
- ],
44
- "year": "",
45
- "venue": null,
46
- "identifiers": {},
47
- "abstract": "Sophisticated grammar formalisms, such as LFG, allow concisely capturing complex linguistic phenomena. The powerful operators provided by such formalisms can however introduce spurious ambiguity, making parsing inefficient. A simple form of corpus-based grammar pruning is evaluated experimentally on two wide-coverage grammars, one Engiish and one French. Speedups of up to a factor 6 were obtained, at a cost in grammatical coverage of about 13%. A two-stage architecture allows achieving significant speedups without introducing additional parse failures.",
48
- "pdf_parse": {
49
- "paper_id": "A00-1028",
50
- "_pdf_hash": "",
51
- "abstract": [
52
- {
53
- "text": "Sophisticated grammar formalisms, such as LFG, allow concisely capturing complex linguistic phenomena. The powerful operators provided by such formalisms can however introduce spurious ambiguity, making parsing inefficient. A simple form of corpus-based grammar pruning is evaluated experimentally on two wide-coverage grammars, one Engiish and one French. Speedups of up to a factor 6 were obtained, at a cost in grammatical coverage of about 13%. A two-stage architecture allows achieving significant speedups without introducing additional parse failures.",
54
- "cite_spans": [],
55
- "ref_spans": [],
56
- "eq_spans": [],
57
- "section": "Abstract",
58
- "sec_num": null
59
- }
60
- ],
61
- "body_text": [
62
- {
63
- "text": "Expressive grammar formalisms allow grammar developers to capture complex linguistic generalizations concisely and elegantly, thus greatly facilitating grammar development and maintenance. (Carrol, 1994) found that the empirical performance when parsing with unification-based grammars is nowhere near the theoretical worst-case complexity. Nonetheless, directly parsing with such grammars, in the form they were developed, can be very inefficient. For this reason, grammars are typically compiled into representations that allow faster parsing. This does however not solve the potential problem of the grammars overgenerating considerably, thus allowing large amounts of spurious ambiguity. Indeed, a current trend in high-coverage parsing, especially when employing a statistical model of language, see, e.g., (Collins 97), is to allow the grammar to massively overgenerate and instead disambiguate by statistical means during or after parsing. If the benefits resulting from more concise grammatical descriptions are to outweigh the costs of spurious ambiguity, the latter must be brought down.",
64
- "cite_spans": [
65
- {
66
- "start": 189,
67
- "end": 203,
68
- "text": "(Carrol, 1994)",
69
- "ref_id": "BIBREF4"
70
- }
71
- ],
72
- "ref_spans": [],
73
- "eq_spans": [],
74
- "section": "Introduction",
75
- "sec_num": "1"
76
- },
77
- {
78
- "text": "In such a situation, corpus-based compilation techniques can drastically improve parsing performance without burdening the grammar developer. The initial, and much seminal work in this area was been carried out by Rayner and coworkers, see (Rayner 1988) , (Samuelsson and Rayner 91) and (Rayner and Carter 1996) . In the current article, we apply similar ideas to Lexical Functional Grammar (LFG) in the incarnation of the Xerox Linguistic Environment (XLE). The goal is to investigate to what extent corpus-based compilation techniques can reduce overgeneration and spurious ambiguity, and increase parsing efficiency, without jeopardizing coverage. The rest of the article is organized as follows: Section 2 presents the relevant aspects of the LFG formalism and the pruning strategy employed, Section 3 describes the experimental setup, Section 4 reports the experimental results and Section 5 relates this to other work.",
79
- "cite_spans": [
80
- {
81
- "start": 240,
82
- "end": 253,
83
- "text": "(Rayner 1988)",
84
- "ref_id": "BIBREF8"
85
- },
86
- {
87
- "start": 256,
88
- "end": 282,
89
- "text": "(Samuelsson and Rayner 91)",
90
- "ref_id": null
91
- },
92
- {
93
- "start": 287,
94
- "end": 311,
95
- "text": "(Rayner and Carter 1996)",
96
- "ref_id": "BIBREF7"
97
- }
98
- ],
99
- "ref_spans": [],
100
- "eq_spans": [],
101
- "section": "Introduction",
102
- "sec_num": "1"
103
- },
104
- {
105
- "text": "The LFG formalism (Kaplan and Bresnan, 1982) allows the right-hand sides (RHS) of grammar rules to consist of a regular expression over grammar symbols. This makes it more appropriate to refer to the grammar rules as rule schemata, since each RHS can potentially be expanded into a (possibly infinite) number of distinct sequences of grammar symbols, each corresponding to a traditional phrase-structure rule. As can easily be imagined, the use of regularexpression operators such as Kleene-star and complementation may introduce a considerable amount of spurious ambiguity. Moreover, the LFG formalism provides operators which --although not increasing its theoretical expressive power --allow rules to be written more concisely. Examples of such operators are the ignore operator, which allows skipping any sequence of grammar symbols that matches a given pattern; the shuffle operator, which allows a set of grammar symbols to occur in any order; and the linear precedence operator, which allows partially specifying the order of grammar symbols. The pruning method we propose consists in eliminating complex operators from the grammar description by considering how they were actually instantiated when parsing a corpus. In LFGs, each rule scheme corresponds to a particular grammar symbol, since different expansions of the same symbol are expressed as alternatives in the regular expression on its RHS. We can define a specific path through the RHS of a rule scheme by the choices ~tf~ 211 made when matching it against some sequence of grammar symbols. Our training data allows us to derive, for each training example, the choices made at each rule expansion. By applying these choices to the rule scheme in isolation, we can derive a phrasestructure rule from it,.",
106
- "cite_spans": [
107
- {
108
- "start": 18,
109
- "end": 44,
110
- "text": "(Kaplan and Bresnan, 1982)",
111
- "ref_id": "BIBREF5"
112
- }
113
- ],
114
- "ref_spans": [],
115
- "eq_spans": [],
116
- "section": "LFG and Grammar Pruning",
117
- "sec_num": "2"
118
- },
119
- {
120
- "text": "The grammar is specialized, or pruned, by retaining all and only those phrase-structure rules that correspond to a path taken through a rule scheme when expanding some node in some training example. Since the grammar formalism requires that each LHS occur only in one rule scheme in the grammar, extracted rules with the same LHS symbol are merged into a single rule scheme with a disjunction operator at its top level. For instance, if a rule scheme with the structure",
121
- "cite_spans": [],
122
- "ref_spans": [],
123
- "eq_spans": [],
124
- "section": "LFG and Grammar Pruning",
125
- "sec_num": "2"
126
- },
127
- {
128
- "text": "A ~ B*{CI D}",
129
- "cite_spans": [],
130
- "ref_spans": [],
131
- "eq_spans": [],
132
- "section": "LFG and Grammar Pruning",
133
- "sec_num": "2"
134
- },
135
- {
136
- "text": "is expanded in the training data only in the following ways",
137
- "cite_spans": [],
138
- "ref_spans": [],
139
- "eq_spans": [],
140
- "section": "LFG and Grammar Pruning",
141
- "sec_num": "2"
142
- },
143
- {
144
- "text": "then it will be replaced by a rule scheme with the following structure",
145
- "cite_spans": [],
146
- "ref_spans": [],
147
- "eq_spans": [],
148
- "section": "A -> C A --+ BC A -+ BD",
149
- "sec_num": null
150
- },
151
- {
152
- "text": "A --+ {CIBC]BD}",
153
- "cite_spans": [],
154
- "ref_spans": [],
155
- "eq_spans": [],
156
- "section": "A -> C A --+ BC A -+ BD",
157
- "sec_num": null
158
- },
159
- {
160
- "text": "The same approach is taken to replace all regularexpression operators, other than concatenation, with the actual sequences of grammar symbols that are matched against them. A more realistic example, taken from the actual data, is shown in Figure 1 : none of the optional alternative portions following the V is ever used in any correct parse in the corpus. Moreover, the ADVP preceding the V occurs only 0 or 1 times in correct parses. Like other unification-based formalisms, lexical functional grammars allow grammar rules to be annotated with sets of feature-based constraints, here called \"functional descriptions\", whose purpose is both to enforce additional constraints on rule applicability and to build an enriched predicate-argument structure called \"f-structure\", which, together with the parse tree, constitutes the output of the parsing process. As these constraints are maintained verbatim in the specialized version of the rule scheme, this poses no problem for this form of grammar pruning.",
161
- "cite_spans": [],
162
- "ref_spans": [
163
- {
164
- "start": 239,
165
- "end": 247,
166
- "text": "Figure 1",
167
- "ref_id": "FIGREF0"
168
- }
169
- ],
170
- "eq_spans": [],
171
- "section": "A -> C A --+ BC A -+ BD",
172
- "sec_num": null
173
- },
174
- {
175
- "text": "The experiments carried out to determine the effectiveness of corpus-based specialization were performed as illustrated in Figure 2 . Two broadcoverage LFG grammars were used, one for French and one for English, both of which were developed within the Pargram project (Butt et al., 1999) during several years time. The French grammar consists of 133 rule schemata, the English grammar of 8.5 rule schemata. Each gralmnar is equipped with a treebank, which was developed for other purposes than grammar specialization. Each treebank was produced by letting the system parse a corpus of technical documentation. Any sentence that did not obtain any parse was discarded. At this point, the French corpus was reduced to 960 sentences, and the English corpus to 970. The average sentence length was 9 for French and 8 for English. For each sentence, a human expert then selected the most appropriate analysis among those returned by the parser.",
176
- "cite_spans": [
177
- {
178
- "start": 268,
179
- "end": 287,
180
- "text": "(Butt et al., 1999)",
181
- "ref_id": "BIBREF3"
182
- }
183
- ],
184
- "ref_spans": [
185
- {
186
- "start": 123,
187
- "end": 131,
188
- "text": "Figure 2",
189
- "ref_id": "FIGREF1"
190
- }
191
- ],
192
- "eq_spans": [],
193
- "section": "Experimental Setup",
194
- "sec_num": "3"
195
- },
196
- {
197
- "text": "In the current experiments, each treebank was used to specialize the grammar it had been developed with. A set of 10-fold cross-validation experiments was carried out to measure several interesting quantities under different conditions. This means that, for each language, the corpus was randomly split into ten equal parts, and one tenth at a time was held out for testing while the remaining nine tenths were used to specialize the grammar, and the results were averaged over the ten runs.. For each grammar the average number of parses per sentence, the fraction of sentences which still received at least one parse (angparse) and the fraction of sentences for which the parse selected by the expert was still derived (coverage) were measured 1. The average CPU time required by parsing was also measured, and this was used to compute the speedup with respect to the original grammar.",
198
- "cite_spans": [],
199
- "ref_spans": [],
200
- "eq_spans": [],
201
- "section": "Experimental Setup",
202
- "sec_num": "3"
203
- },
204
- {
205
- "text": "The thus established results constitute one data point in the trade-off between ambiguity reduction on one side, which is in turn related to parsing speed, and loss in coverage on the other. In order to determine other points of this trade-off, the same set. of experiments was performed where speciMization was inhibited for certain rule schemata. In particular, for each grammar, the two rule schemata that received the largest number of distinct expansions in the corpora were determined. These proved to be those associated with the LHS symbols 'VPverb[main]' and 'NP' for the French grammar, and 'VPv' and 'NPadj' for the English one. 2 The experiments were repeated while inhibiting specialization of first the scheme with the most expansions, and then the two most expanded schemata.",
206
- "cite_spans": [],
207
- "ref_spans": [],
208
- "eq_spans": [],
209
- "section": "Experimental Setup",
210
- "sec_num": "3"
211
- },
212
- {
213
- "text": "Measures of coverage and speedup are important 1 As long as we are interested in preserving the f-structure assigned to sentences, this notion of coverage is stricter than necessary. The same f-structure can in fact be assigned by more than one parse, so that in some cases a sentence is considered out of coverage even if the specialized grammar assigns to it the correct f-structure.",
214
- "cite_spans": [],
215
- "ref_spans": [],
216
- "eq_spans": [],
217
- "section": "Experimental Setup",
218
- "sec_num": "3"
219
- },
220
- {
221
- "text": "2'VPv' and 'VPverb[main]' cover VPs headed by a main verb. 'NPadj' covers NPs with adjectives attached.",
222
- "cite_spans": [],
223
- "ref_spans": [],
224
- "eq_spans": [],
225
- "section": "Experimental Setup",
226
- "sec_num": "3"
227
- },
228
- {
229
- "text": "The original rule:",
230
- "cite_spans": [],
231
- "ref_spans": [],
232
- "eq_spans": [],
233
- "section": "Experimental Setup",
234
- "sec_num": "3"
235
- },
236
- {
237
- "text": "l/Pperfp --+ ADVP* SE (t ADJUNCT) ($ ADV_TYPE) = t,padv ~/r { @M_Head_Perfp I@M_Head_Passp } @( Anaph_Ctrl $) { AD VP+ SE ('~ ADJUNCT) ($ ADV_TYPE) = vpadv",
238
- "cite_spans": [],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Experimental Setup",
242
- "sec_num": "3"
243
- },
244
- {
245
- "text": "is replaced by the following: indicators of what can be achieved with this form of grammar pruning. However, they could potentially be misleading, since failure times for uncovered sentences might be considerably lower than their parsing times, had they not been out of coverage. If the pruned grammar fails more frequently on sentences which take longer to parse, the measured speedup might be artificiMly high. This is easily realized, as simply removing the hardest sentences froln the corpus would cause a decrease ill the average parsing time, and thus result in a speedup, without any pruning at all. To factor out the contribution of uncovered sentences fi'om the results, the performance of a two-stage architecture analogous to that of (Samuelsson and Rayner, 1991) was silnulated, in which the pruned grammar is attempted \"A Sentence\" Parser with specialized grammar first, and the sentence is passed on to the original unpruned grammar whenever the pruned grammar fails to return a parse (see Figure 3) . The measured speedup of this simulated architecture, which preserves the anyparse measure of the original grammar, takes into account the contribution of uncovered sentences, as it penalizes sweeping difficult sentences under the carpet.",
246
- "cite_spans": [
247
- {
248
- "start": 745,
249
- "end": 774,
250
- "text": "(Samuelsson and Rayner, 1991)",
251
- "ref_id": "BIBREF10"
252
- }
253
- ],
254
- "ref_spans": [
255
- {
256
- "start": 1004,
257
- "end": 1013,
258
- "text": "Figure 3)",
259
- "ref_id": null
260
- }
261
- ],
262
- "eq_spans": [],
263
- "section": "Experimental Setup",
264
- "sec_num": "3"
265
- },
266
- {
267
- "text": "ADVP ,[.E (~ ADJUNCT) (.l. ADV_TYPE) = vpadv l/'Pperfp --+ @PPadjunct @PPcase_obl {@M.Head_Pevfp [@M..Head_Passp} @( Anaph_Ctrl ~ ) V { @M_Head_Perfp I@M_Head_Passp } @( Anaph_Ctrl ~)",
268
- "cite_spans": [],
269
- "ref_spans": [],
270
- "eq_spans": [],
271
- "section": "Experimental Setup",
272
- "sec_num": "3"
273
- },
274
- {
275
- "text": "The results of the experiments described in the section above are summarized in the table in Figure 4 . The upper part of the table refers to experiments with the French grammar, the lower part to experiments with the English grammar. For each language, the first line presents data gathered for the original grammar for comparison with the pruned grammars. The figures in the second line were collected by pruning the grammar based on the whole corpus, and then testing on the corpus itself. The grammars obtained in this way contain 516 and 388 disjuncts --corresponding to purely concatenative rules --for French and English respectively. Anyparse and coverage are not, of course, relevant in this case, but the statistics on parsing time are, especially the one on the maximum parsing time. For each iteration in the 10-fold cross-validation experiment, the maximum parsing time was retained, and those ten times were eventually averaged. If pruning tended to leave sentences which take long to parse uncovered, then we would observe a significant difference between the average over ma.ximum times on the grammar trained and tested on the same corpus (which parses all sentences, including the hardest), and the average over maximum times for grammars trained and tested on different sets. The fact that this does not seem to be the case indicates that pruning does not penalize difficult sentences. Note also that the average number of parses per sentence is significantly smaller than with the full grammar, of almost a factor of 9 in the case of the French graminar.",
276
- "cite_spans": [],
277
- "ref_spans": [
278
- {
279
- "start": 93,
280
- "end": 101,
281
- "text": "Figure 4",
282
- "ref_id": null
283
- }
284
- ],
285
- "eq_spans": [],
286
- "section": "Experimental Results",
287
- "sec_num": null
288
- },
289
- {
290
- "text": "The third line contains results for the fully pruned grammar.",
291
- "cite_spans": [],
292
- "ref_spans": [],
293
- "eq_spans": [],
294
- "section": "Experimental Results",
295
- "sec_num": null
296
- },
297
- {
298
- "text": "In the case of the French grammar a speedup of about 6 is obtained with a loss in coverage of 13%. The smaller speedup gained with the English grammar can be explained by the fact that here, the parsing times are lower in general, and that a non-negligible part of this time, especially that needed for morphological analysis, is unaffected by pruning. Even in the case of the English grammar, though, speedup is substantial (2.67). For both grammars, the reduction in the average maxinmm parsing time is particularly good, confirming our hypothesis that trimming the grammar by removing heavy constructs makes it considerably more efficient. A partially negative note comes from the average number of disjuncts in the prun.ed grainmars, which is 501 for French and 374 for English. Comparing this figures to the number of disjuncts in grammars pruned on the full corpus (516 and 388), we find that after training on nine tenths of the corpus, adding the last tenth still leads to an increase of 3-4% in the size of the resulting grammars. In other words, the marginal gain of further training examples is still significant after considering about 900 sentences, indicating that the training corpora are somewhat too small.",
299
- "cite_spans": [],
300
- "ref_spans": [],
301
- "eq_spans": [],
302
- "section": "Experimental Results",
303
- "sec_num": null
304
- },
305
- {
306
- "text": "The last two lines for each language show figures for grammars with pruning inhibited on the most variable and the two most variable symbols respectively. For both languages, inhibiting pruning on the most variable symbol has the expected effect of increasing both parsing time and coverage. Inhibiting pruning also on the second most variable symbol has ahnost no effect for French, and only a small effect for English.",
307
- "cite_spans": [],
308
- "ref_spans": [],
309
- "eq_spans": [],
310
- "section": "Experimental Results",
311
- "sec_num": null
312
- },
313
- {
314
- "text": "The table in Figure 5 summarizes the measures on the simulated two-stage architecture. For both languages the best trade-off, once the distribution of uncovered sentences has been taken into account, is achieved by the fully pruned grammars.",
315
- "cite_spans": [],
316
- "ref_spans": [
317
- {
318
- "start": 13,
319
- "end": 21,
320
- "text": "Figure 5",
321
- "ref_id": "FIGREF3"
322
- }
323
- ],
324
- "eq_spans": [],
325
- "section": "Experimental Results",
326
- "sec_num": null
327
- },
328
- {
329
- "text": "Related Work",
330
- "cite_spans": [],
331
- "ref_spans": [],
332
- "eq_spans": [],
333
- "section": "5",
334
- "sec_num": null
335
- },
336
- {
337
- "text": "The work presented in the current article is related to previous work on corpus-based grammar specialization as presented in (Rayner, 1988; Salnuelsson and Rayner, 1991; Rayner and Carter, 1996; Samuelsson, 1994; Srinivas a.nd Joshi, 1995; Neumann, 1997 The line of work described in (Rayner, 1988; Samuelsson and Rayner, 1991; Rayner and Carter, 1996; Samuelsson, 1994) deals with unificationbased grammars that already have a purelyconcatenative context-fi'ee backbone, and is more concerned with a different t~orm of specialization, consisting in the application of explanation-based learning (EBL). Here, the central idea is to collect the most frequently occurring subtrees in a treebank and use them as atomic units for parsing. The cited works differ mainly in the criteria adopted for selecting subtrees fi'om the treebank. In (Rayner, 1988; Samuelsson and Rayner, 1991; Rayner and Carter, 1996) these criteria are handcoded: all subtrees satisfying some properties are selected, and a new grammar rule is created by flattening each such subtree, i.e., by taking the root as lefl.-hand side and the yield as right-hand side, and in the process performing all unifications corresponding to the thus removed internal nodes. Experiments carried out on a corpus of 15,000 trees from the ATIS domain using a version of the SRI Core Language Engine resulted in a speedup of about 3.4 at a cost of 5% in gralmnati-cal coverage, which however was compensated by an increase in parsing accuracy.",
338
- "cite_spans": [
339
- {
340
- "start": 125,
341
- "end": 139,
342
- "text": "(Rayner, 1988;",
343
- "ref_id": "BIBREF8"
344
- },
345
- {
346
- "start": 140,
347
- "end": 169,
348
- "text": "Salnuelsson and Rayner, 1991;",
349
- "ref_id": null
350
- },
351
- {
352
- "start": 170,
353
- "end": 194,
354
- "text": "Rayner and Carter, 1996;",
355
- "ref_id": "BIBREF7"
356
- },
357
- {
358
- "start": 195,
359
- "end": 212,
360
- "text": "Samuelsson, 1994;",
361
- "ref_id": "BIBREF11"
362
- },
363
- {
364
- "start": 213,
365
- "end": 239,
366
- "text": "Srinivas a.nd Joshi, 1995;",
367
- "ref_id": null
368
- },
369
- {
370
- "start": 240,
371
- "end": 253,
372
- "text": "Neumann, 1997",
373
- "ref_id": "BIBREF6"
374
- },
375
- {
376
- "start": 284,
377
- "end": 298,
378
- "text": "(Rayner, 1988;",
379
- "ref_id": "BIBREF8"
380
- },
381
- {
382
- "start": 299,
383
- "end": 327,
384
- "text": "Samuelsson and Rayner, 1991;",
385
- "ref_id": "BIBREF10"
386
- },
387
- {
388
- "start": 328,
389
- "end": 352,
390
- "text": "Rayner and Carter, 1996;",
391
- "ref_id": "BIBREF7"
392
- },
393
- {
394
- "start": 353,
395
- "end": 370,
396
- "text": "Samuelsson, 1994)",
397
- "ref_id": "BIBREF11"
398
- },
399
- {
400
- "start": 835,
401
- "end": 849,
402
- "text": "(Rayner, 1988;",
403
- "ref_id": "BIBREF8"
404
- },
405
- {
406
- "start": 850,
407
- "end": 878,
408
- "text": "Samuelsson and Rayner, 1991;",
409
- "ref_id": "BIBREF10"
410
- },
411
- {
412
- "start": 879,
413
- "end": 903,
414
- "text": "Rayner and Carter, 1996)",
415
- "ref_id": "BIBREF7"
416
- }
417
- ],
418
- "ref_spans": [],
419
- "eq_spans": [],
420
- "section": "5",
421
- "sec_num": null
422
- },
423
- {
424
- "text": "Finding suitable tree-cutting criteria requires a considerable amount of work, and must be repeated for each new grammar and for each new domain to which the grammar is to be specialized. Samuelsson (Samuelsson, 1994) proposes a technique to automatically selects what subtrees to retain. The selection of appropriate subtrees is done by choosing a subset of nodes at which to cut trees. Cutnodes are determined by computing the entropy of each node, and selecting only those nodes whose entropy exceeds a given threshold. Intuitively, nodes with low entropy indicate locations in the trees where a given symbol was expanded using a predictable set of rules, at least most of the times, so that the loss of coverage that derives from ignoring the remaining cases is low. Nodes with high entropy, on the other hand, indicate positions in which there is a high uncertainty in what rule was used to expand the symbol, so that it is better to preserve all alternatives. Several schemas are proposed to compute entropies, each leading to a different trade-off be-tween coverage reduction and speedup. In general, results are not quite as good as those obtained using handcoded criteria, though of course the specialized grammar is obtained fully automatically, and thus with much less effort.",
425
- "cite_spans": [
426
- {
427
- "start": 199,
428
- "end": 217,
429
- "text": "(Samuelsson, 1994)",
430
- "ref_id": "BIBREF11"
431
- }
432
- ],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "5",
436
- "sec_num": null
437
- },
438
- {
439
- "text": "When ignoring issues related to the elimination of complex operators t\"1\"o111 the RHS of rule schemata, the grammar-pruning strategy described in the current article is equivalent to explanation-based learning where all nodes have been selected,as eutnodes. Conversely, EBL can be viewed as higher-order grammar pruning, removing not grammar rules, but gramlnar-rule combinations.",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "5",
444
- "sec_num": null
445
- },
446
- {
447
- "text": "Some of the work done on data-oriented parsing (DOP) (Bod, 1993; Bod and Scha, 1996; Bod and Kaplan, 1998; Sima'an, 1999) can also be considered related to our work, as it can be seen as a way to specialize in an gBL-like way the (initially unknown) grammar implicitly underlying a treebank. (Srinivas and aoshi, 1995) and (Neumann, 1997 ) apply EBL to speed up parsing with tree-adjoining grammars and sentence generation with HPSGs respectively, though they do so by introducing new components in their systems rather then by modifying the grammars they use.",
448
- "cite_spans": [
449
- {
450
- "start": 53,
451
- "end": 64,
452
- "text": "(Bod, 1993;",
453
- "ref_id": "BIBREF2"
454
- },
455
- {
456
- "start": 65,
457
- "end": 84,
458
- "text": "Bod and Scha, 1996;",
459
- "ref_id": "BIBREF1"
460
- },
461
- {
462
- "start": 85,
463
- "end": 106,
464
- "text": "Bod and Kaplan, 1998;",
465
- "ref_id": "BIBREF0"
466
- },
467
- {
468
- "start": 107,
469
- "end": 121,
470
- "text": "Sima'an, 1999)",
471
- "ref_id": "BIBREF12"
472
- },
473
- {
474
- "start": 292,
475
- "end": 318,
476
- "text": "(Srinivas and aoshi, 1995)",
477
- "ref_id": null
478
- },
479
- {
480
- "start": 323,
481
- "end": 337,
482
- "text": "(Neumann, 1997",
483
- "ref_id": "BIBREF6"
484
- }
485
- ],
486
- "ref_spans": [],
487
- "eq_spans": [],
488
- "section": "5",
489
- "sec_num": null
490
- },
491
- {
492
- "text": "Sophisticated grammar formalisms are very useful and convenient when designing high-coverage grammars for natural languages. Very expressive grammatical constructs can make the task of developing and maintaining such a large resource considerably easier. On the other hand, their use can result in a considerable increase in grammatical ambiguity. Gramnaar-compilation techniques based on grammar structure alone are insufficient remedies in those cases, as they cannot access the information required to determine which alternatives to retain and which alternatives to discard.",
493
- "cite_spans": [],
494
- "ref_spans": [],
495
- "eq_spans": [],
496
- "section": "Conclusions",
497
- "sec_num": "6"
498
- },
499
- {
500
- "text": "The current article demonstrates that a relatively simple pruning technique, employing the kind of reference corpus that is typically used for grammar development and thus often already available, can significantly improve parsing performance. On large lexical functional grammars, speedups of up to a factor 6 were observed, at the price of a. reduction in grammatical coverage of about 13%. A simple two-stage architecture was also proposed that preserves the anyparse measure of the original grammar, demonstrating that significant speedups can be obtained without increasing the number of parsing failures.",
501
- "cite_spans": [],
502
- "ref_spans": [],
503
- "eq_spans": [],
504
- "section": "Conclusions",
505
- "sec_num": "6"
506
- },
507
- {
508
- "text": "Future work includes extending the study of corpus-based grammar specialization from firstorder grammar pruning to higher-order grammar pruning, thus extending previous work on explanation-based learning for parsing, aad apply-ing it to the LFG fornaalism.",
509
- "cite_spans": [],
510
- "ref_spans": [],
511
- "eq_spans": [],
512
- "section": "Conclusions",
513
- "sec_num": "6"
514
- }
515
- ],
516
- "back_matter": [],
517
- "bib_entries": {
518
- "BIBREF0": {
519
- "ref_id": "b0",
520
- "title": "A probabilistic corpus-driven model for lexical-functional analysis",
521
- "authors": [
522
- {
523
- "first": "Rens",
524
- "middle": [],
525
- "last": "Bod",
526
- "suffix": ""
527
- },
528
- {
529
- "first": "Ronald",
530
- "middle": [],
531
- "last": "Kaplan",
532
- "suffix": ""
533
- }
534
- ],
535
- "year": 1998,
536
- "venue": "Proceedings of Coling-ACL-98",
537
- "volume": "",
538
- "issue": "",
539
- "pages": "",
540
- "other_ids": {},
541
- "num": null,
542
- "urls": [],
543
- "raw_text": "Rens Bod and Ronald Kaplan. 1998. A probabilistic corpus-driven model for lexical-functional analy- sis. In Proceedings of Coling-ACL-98, Montreal, Canada.",
544
- "links": null
545
- },
546
- "BIBREF1": {
547
- "ref_id": "b1",
548
- "title": "Data-oriented language processing: An overview",
549
- "authors": [
550
- {
551
- "first": "R",
552
- "middle": [],
553
- "last": "Bod",
554
- "suffix": ""
555
- },
556
- {
557
- "first": "R",
558
- "middle": [],
559
- "last": "Scha",
560
- "suffix": ""
561
- }
562
- ],
563
- "year": 1996,
564
- "venue": "",
565
- "volume": "",
566
- "issue": "",
567
- "pages": "",
568
- "other_ids": {},
569
- "num": null,
570
- "urls": [],
571
- "raw_text": "R. Bod and R. Scha. 1996. Data-oriented lan- guage processing: An overview. Technical report, ILLC, University of Amsterdam, Alnsterdam, The Netherlands.",
572
- "links": null
573
- },
574
- "BIBREF2": {
575
- "ref_id": "b2",
576
- "title": "Using an annotated corpus as a stochastic grammar",
577
- "authors": [
578
- {
579
- "first": "Rens",
580
- "middle": [],
581
- "last": "Bod",
582
- "suffix": ""
583
- }
584
- ],
585
- "year": 1993,
586
- "venue": "Proceedings of EACL-93",
587
- "volume": "",
588
- "issue": "",
589
- "pages": "",
590
- "other_ids": {},
591
- "num": null,
592
- "urls": [],
593
- "raw_text": "Rens Bod. 1993. Using an annotated corpus as a stochastic grammar. In Proceedings of EACL-93, Utrecht, The Netherlands.",
594
- "links": null
595
- },
596
- "BIBREF3": {
597
- "ref_id": "b3",
598
- "title": "A Grammar Writer's Cookbook. CSLI Publications",
599
- "authors": [
600
- {
601
- "first": "M",
602
- "middle": [],
603
- "last": "Butt",
604
- "suffix": ""
605
- },
606
- {
607
- "first": "T",
608
- "middle": [
609
- "H"
610
- ],
611
- "last": "King",
612
- "suffix": ""
613
- },
614
- {
615
- "first": "M",
616
- "middle": [
617
- "E"
618
- ],
619
- "last": "Nifio",
620
- "suffix": ""
621
- },
622
- {
623
- "first": "F",
624
- "middle": [],
625
- "last": "Segond",
626
- "suffix": ""
627
- }
628
- ],
629
- "year": 1999,
630
- "venue": "",
631
- "volume": "",
632
- "issue": "",
633
- "pages": "",
634
- "other_ids": {},
635
- "num": null,
636
- "urls": [],
637
- "raw_text": "M. Butt, T.H. King, M.E. Nifio, and F. Segond. 1999. A Grammar Writer's Cookbook. CSLI Pub- lications, Stanford, CA.",
638
- "links": null
639
- },
640
- "BIBREF4": {
641
- "ref_id": "b4",
642
- "title": "Relating complexity to practical performance in parsing with wide-coverage unification grammars",
643
- "authors": [
644
- {
645
- "first": "John",
646
- "middle": [],
647
- "last": "Carrol",
648
- "suffix": ""
649
- }
650
- ],
651
- "year": 1994,
652
- "venue": "Proceedings of (ACL '94)",
653
- "volume": "",
654
- "issue": "",
655
- "pages": "",
656
- "other_ids": {},
657
- "num": null,
658
- "urls": [],
659
- "raw_text": "John Carrol. 1994. Relating complexity to practical performance in parsing with wide-coverage uni- fication grammars. In Proceedings of (ACL '94), Las Cruces, New Mexico, June.",
660
- "links": null
661
- },
662
- "BIBREF5": {
663
- "ref_id": "b5",
664
- "title": "Lexicalfunctional grammar: A formal system for grammatical representation",
665
- "authors": [
666
- {
667
- "first": "Ronald",
668
- "middle": [],
669
- "last": "Kaplan",
670
- "suffix": ""
671
- },
672
- {
673
- "first": "Joan",
674
- "middle": [],
675
- "last": "Bresnan",
676
- "suffix": ""
677
- }
678
- ],
679
- "year": 1982,
680
- "venue": "",
681
- "volume": "",
682
- "issue": "",
683
- "pages": "173--281",
684
- "other_ids": {},
685
- "num": null,
686
- "urls": [],
687
- "raw_text": "Ronald Kaplan and Joan Bresnan. 1982. Lexical- functional grammar: A formal system for gram- matical representation. In Joan Bresnan, editor, The Mental Representation of Grammatical Rela- tions, pages 173-281. MIT Press.",
688
- "links": null
689
- },
690
- "BIBREF6": {
691
- "ref_id": "b6",
692
- "title": "Applying explanationbased learning to control and speeding-up natural language generation",
693
- "authors": [
694
- {
695
- "first": "G/Inter",
696
- "middle": [],
697
- "last": "Neumann",
698
- "suffix": ""
699
- }
700
- ],
701
- "year": 1997,
702
- "venue": "Proceedings of A CL-EACL-97",
703
- "volume": "",
704
- "issue": "",
705
- "pages": "",
706
- "other_ids": {},
707
- "num": null,
708
- "urls": [],
709
- "raw_text": "G/inter Neumann. 1997. Applying explanation- based learning to control and speeding-up natu- ral language generation. In Proceedings of A CL- EACL-97, Madrid, Spain.",
710
- "links": null
711
- },
712
- "BIBREF7": {
713
- "ref_id": "b7",
714
- "title": "Fast parsing using pruning and grammar specialization",
715
- "authors": [
716
- {
717
- "first": "Manny",
718
- "middle": [],
719
- "last": "Rayner",
720
- "suffix": ""
721
- },
722
- {
723
- "first": "David",
724
- "middle": [],
725
- "last": "Carter",
726
- "suffix": ""
727
- }
728
- ],
729
- "year": 1996,
730
- "venue": "Proceedings of the ACL-96",
731
- "volume": "",
732
- "issue": "",
733
- "pages": "",
734
- "other_ids": {},
735
- "num": null,
736
- "urls": [],
737
- "raw_text": "Manny Rayner and David Carter. 1996. Fast pars- ing using pruning and grammar specialization. In Proceedings of the ACL-96, Santa. Cruz, CA.",
738
- "links": null
739
- },
740
- "BIBREF8": {
741
- "ref_id": "b8",
742
- "title": "Applying explanation-based generalization to natural-language processing",
743
- "authors": [
744
- {
745
- "first": "Manny",
746
- "middle": [],
747
- "last": "Rayner",
748
- "suffix": ""
749
- }
750
- ],
751
- "year": 1988,
752
- "venue": "",
753
- "volume": "",
754
- "issue": "",
755
- "pages": "",
756
- "other_ids": {},
757
- "num": null,
758
- "urls": [],
759
- "raw_text": "Manny Rayner. 1988. Applying explanation-based generalization to natural-language processing.",
760
- "links": null
761
- },
762
- "BIBREF9": {
763
- "ref_id": "b9",
764
- "title": "Proceedings of the International Conference on Fifth Generation Computer Systems",
765
- "authors": [],
766
- "year": null,
767
- "venue": "",
768
- "volume": "",
769
- "issue": "",
770
- "pages": "",
771
- "other_ids": {},
772
- "num": null,
773
- "urls": [],
774
- "raw_text": "In Proceedings of the International Conference on Fifth Generation Computer Systems, Tokyo, Japan.",
775
- "links": null
776
- },
777
- "BIBREF10": {
778
- "ref_id": "b10",
779
- "title": "Quantitative evaluation of explanation-based learning as an optimization tool for a large-scale natural language system",
780
- "authors": [
781
- {
782
- "first": "Christer",
783
- "middle": [],
784
- "last": "Samuelsson",
785
- "suffix": ""
786
- },
787
- {
788
- "first": "Manny",
789
- "middle": [],
790
- "last": "Rayner",
791
- "suffix": ""
792
- }
793
- ],
794
- "year": 1991,
795
- "venue": "Proceedings of the IJCAI-91",
796
- "volume": "",
797
- "issue": "",
798
- "pages": "",
799
- "other_ids": {},
800
- "num": null,
801
- "urls": [],
802
- "raw_text": "Christer Samuelsson and Manny Rayner. 1991. Quantitative evaluation of explanation-based learning as an optimization tool for a large-scale natural language system. In Proceedings of the IJCAI-91, Sydney, Oz.",
803
- "links": null
804
- },
805
- "BIBREF11": {
806
- "ref_id": "b11",
807
- "title": "Grammar specialization through entropy thresholds",
808
- "authors": [
809
- {
810
- "first": "Christer",
811
- "middle": [],
812
- "last": "Samuelsson",
813
- "suffix": ""
814
- }
815
- ],
816
- "year": 1994,
817
- "venue": "Proceedings of the ACL-94",
818
- "volume": "",
819
- "issue": "",
820
- "pages": "",
821
- "other_ids": {},
822
- "num": null,
823
- "urls": [],
824
- "raw_text": "Christer Samuelsson. 1994. Grammar specialization through entropy thresholds. In Proceedings of the ACL-94, Las Cruces, New Mexico. Available as cmp-lg/9405022.",
825
- "links": null
826
- },
827
- "BIBREF12": {
828
- "ref_id": "b12",
829
- "title": "Institute for Logic, Language and Computation",
830
- "authors": [
831
- {
832
- "first": "",
833
- "middle": [],
834
- "last": "Khalil Sima'an",
835
- "suffix": ""
836
- }
837
- ],
838
- "year": 1999,
839
- "venue": "",
840
- "volume": "",
841
- "issue": "",
842
- "pages": "",
843
- "other_ids": {},
844
- "num": null,
845
- "urls": [],
846
- "raw_text": "Khalil Sima'an. 1999. Learning Efficient Dis- ambiguation. Ph.D. thesis, Institute for Logic, Language and Computation, Amsterdam, The Netherlands.",
847
- "links": null
848
- },
849
- "BIBREF13": {
850
- "ref_id": "b13",
851
- "title": "Some novel applications of explanation-based learning to parsing lexicalized tree-adjoining gramlnars",
852
- "authors": [
853
- {
854
- "first": "B",
855
- "middle": [],
856
- "last": "Srinivas",
857
- "suffix": ""
858
- },
859
- {
860
- "first": "A",
861
- "middle": [],
862
- "last": "Joshi",
863
- "suffix": ""
864
- }
865
- ],
866
- "year": 1995,
867
- "venue": "Proceedings of the ACL-95",
868
- "volume": "",
869
- "issue": "",
870
- "pages": "",
871
- "other_ids": {},
872
- "num": null,
873
- "urls": [],
874
- "raw_text": "B. Srinivas and A. Joshi. 1995. Some novel appli- cations of explanation-based learning to parsing lexicalized tree-adjoining gramlnars. In Proceed- ings of the ACL-95, Cambridge, MA.",
875
- "links": null
876
- }
877
- },
878
- "ref_entries": {
879
- "FIGREF0": {
880
- "text": "The pruning of a rule from the actual French grammar. The \"*\" and the \"+\" signs have the usual interpretation as in regular expressions. A sub-expression enclosed in parenthesis is optional. Alternative sub-expressions are enclosed in curly brackets and separated by the \"[\" sign. An \"@\" followed by an identifier is a macro expansion operator, and is eventually replaced by further functional descriptions.",
881
- "type_str": "figure",
882
- "uris": null,
883
- "num": null
884
- },
885
- "FIGREF1": {
886
- "text": "The setting for our experiments on grammar specialization.",
887
- "type_str": "figure",
888
- "uris": null,
889
- "num": null
890
- },
891
- "FIGREF2": {
892
- "text": "d + Time originalFigure 3: A schematic representation of the simulated two-stage coverage-preserving architecture.",
893
- "type_str": "figure",
894
- "uris": null,
895
- "num": null
896
- },
897
- "FIGREF3": {
898
- "text": "Results for the simulated two-stage architecture.",
899
- "type_str": "figure",
900
- "uris": null,
901
- "num": null
902
- }
903
- }
904
- }
905
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1029.json DELETED
@@ -1,821 +0,0 @@
1
- {
2
- "paper_id": "A00-1029",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:18.672629Z"
6
- },
7
- "title": "A Tool for Automated Revision of Grammars for NLP Systems",
8
- "authors": [
9
- {
10
- "first": "Nanda",
11
- "middle": [],
12
- "last": "Kambhatla",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "IBM T.J. Watson Research Center",
17
- "location": {
18
- "addrLine": "30 Saw Mill River Road",
19
- "postCode": "10532",
20
- "settlement": "Hawthorne",
21
- "region": "NY"
22
- }
23
- },
24
- "email": ""
25
- },
26
- {
27
- "first": "Wlodek",
28
- "middle": [],
29
- "last": "Zadrozny",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "",
33
- "institution": "IBM T.J. Watson Research Center",
34
- "location": {
35
- "addrLine": "30 Saw Mill River Road",
36
- "postCode": "10532",
37
- "settlement": "Hawthorne",
38
- "region": "NY"
39
- }
40
- },
41
- "email": ""
42
- }
43
- ],
44
- "year": "",
45
- "venue": null,
46
- "identifiers": {},
47
- "abstract": "We present an algorithm and a tool for automatically revising grammars for natural language processing (NLP) systems to disallow specifically identified sentences or sets of sentences. We also outline an approach for automatically revising attribute value grammars using counterexamples. Developing grammars for NLP systems that are both general enough to accept most sentences about a domain, but constrained enough to disallow other sentences is very tedious. Our approach of revising grammars automatically using counterexamples greatly simplifies the development and revision of tightly constrained grammars. We have successfully used our tool to constrain over-generalizing grammars of speech understanding systems and obtained higher recognition accuracy.",
48
- "pdf_parse": {
49
- "paper_id": "A00-1029",
50
- "_pdf_hash": "",
51
- "abstract": [
52
- {
53
- "text": "We present an algorithm and a tool for automatically revising grammars for natural language processing (NLP) systems to disallow specifically identified sentences or sets of sentences. We also outline an approach for automatically revising attribute value grammars using counterexamples. Developing grammars for NLP systems that are both general enough to accept most sentences about a domain, but constrained enough to disallow other sentences is very tedious. Our approach of revising grammars automatically using counterexamples greatly simplifies the development and revision of tightly constrained grammars. We have successfully used our tool to constrain over-generalizing grammars of speech understanding systems and obtained higher recognition accuracy.",
54
- "cite_spans": [],
55
- "ref_spans": [],
56
- "eq_spans": [],
57
- "section": "Abstract",
58
- "sec_num": null
59
- }
60
- ],
61
- "body_text": [
62
- {
63
- "text": "Natural language processing systems often constrain the set of \"utterances\" from a user (spoken, typed in, etc.) to narrow down the possible syntactic and semantic resolutions of the utterance and reduce the number of misrecognitions and/or misunderstandings by the system. Such constraints on the allowed syntax and the inferred semantics are often expressed in the form of a \"grammar \"l, a set of Throughout this document, by using the word \"grammar\", we refer to a Context-Free Grammar that consists of a finite set of non-terminals, a finite set of terminals, a unique non-terminal called the start symbol, and a set of production rules of the form A-> a, where A is a non-terminal and a is a string of terminal or non-terminal symbols. The 'language' rules specifying the set of allowed utterances and possibly also specifying the semantics associated with these utterances. For instance, grammars are commonly used in speech understanding systems to specify both the set of allowed sentences and to specify \"tags\" to extract semantic entities (e.g. the \"amount\" of money).",
64
- "cite_spans": [],
65
- "ref_spans": [],
66
- "eq_spans": [],
67
- "section": "Introduction",
68
- "sec_num": "1"
69
- },
70
- {
71
- "text": "Constraining the number of sentences accepted by a grammar is essential for reducing misinterpretations of user queries by an NLP system. For instance, for speech understanding systems, if the grammar accepts a large number of sentences, then the likelihood of recognizing uttered sentences as random, irrelevant, or undesirable sentences is increased. For transaction processing systems, misrecognized words can lead to unintended transactions being processed. An effective constraining grammar can reduce transactional errors by limiting the number of sentence level errors. The problem of over-generalization of speech grammars and related issues is well discussed by Seneff (1992) .",
72
- "cite_spans": [
73
- {
74
- "start": 671,
75
- "end": 684,
76
- "text": "Seneff (1992)",
77
- "ref_id": "BIBREF7"
78
- }
79
- ],
80
- "ref_spans": [],
81
- "eq_spans": [],
82
- "section": "Introduction",
83
- "sec_num": "1"
84
- },
85
- {
86
- "text": "Thus, speech grammars must often balance the conflicting requirements of",
87
- "cite_spans": [],
88
- "ref_spans": [],
89
- "eq_spans": [],
90
- "section": "Introduction",
91
- "sec_num": "1"
92
- },
93
- {
94
- "text": "\u2022 accepting a wide variety of sentences to increase flexibility, and \u2022 accepting a small number of sentences to increase system accuracy and robustness. Developing tight grammars which trade-off these conflicting constraints is a tedious and accepted by a grammar is the set of all terminal strings that can be generated from the start symbol by successive application of the production rules. The grammar may optionally have semantic interpretation rules associated with each production rule (e.g. see (Allen 95) ). difficult process.",
95
- "cite_spans": [
96
- {
97
- "start": 503,
98
- "end": 513,
99
- "text": "(Allen 95)",
100
- "ref_id": null
101
- }
102
- ],
103
- "ref_spans": [],
104
- "eq_spans": [],
105
- "section": "Introduction",
106
- "sec_num": "1"
107
- },
108
- {
109
- "text": "Typically, grammars overgeneralize and accept too many sentences that are irrelevant or undesirable for a given application. We call such sentences \"counterexamples\". The problem is usually handled by revising the grammar manually to disallow such counter-examples. For instance, the sentence \"give me my last eighteen transactions\" may need to be excluded from a grammar for a speech understanding system, since the words \"eighteen\" and \"ATM\" are easily confused by the speech recogniser. However, \"five\" and \"ten\" should remain as possible modifiers of \"transactions\". Counter-examples can also be sets of sentences that need to be excluded from a grammar (specified by allowing the inclusion of non-terminals in counter-examples). For example, for a banking application that disallows money transfers to online accounts, we might wish to exclude the set of sentences \"transfer <AMOUNT> dollars to my online account\" from the grammar, where <AMOUNT> is a non-terminal in the grammar that maps to all possible ways of specifying amounts.",
110
- "cite_spans": [],
111
- "ref_spans": [],
112
- "eq_spans": [],
113
- "section": "Introduction",
114
- "sec_num": "1"
115
- },
116
- {
117
- "text": "In this paper, we are proposing techniques for automatically revising grammars using counterexamples. The grammar developer identifies counter-examples from among sentences (or sets of sentences) mis-recognized by the speech recognizer or from sentences randomly generated by a sentence generator using the original grammar. The grammar reviser modifies the original grammar to invalidate the counterexamples. The revised grammar can be fed back to the grammar reviser and whole process can be iterated several times until the resulting grammar is deemed satisfactory.",
118
- "cite_spans": [],
119
- "ref_spans": [],
120
- "eq_spans": [],
121
- "section": "Introduction",
122
- "sec_num": "1"
123
- },
124
- {
125
- "text": "Figure I .....................................",
126
- "cite_spans": [],
127
- "ref_spans": [],
128
- "eq_spans": [],
129
- "section": "Introduction",
130
- "sec_num": "1"
131
- },
132
- {
133
- "text": "In the next sections, we first describe our algorithm for revising grammars to disallow counter-examples. We also discuss algorithms to make the revised grammar compact using minimum description length (MDL) based grammar compaction techniques and extensions to our basic algorithm to handle grammars with recursion. We then present some results of applying our grammar reviser tool to constrain speech grammars of speech understanding systems. Finally, we present an approach for revising attribute value grammars using our technique and present our conclusions.",
134
- "cite_spans": [],
135
- "ref_spans": [],
136
- "eq_spans": [],
137
- "section": "Introduction",
138
- "sec_num": "1"
139
- },
140
- {
141
- "text": "In this section, we describe an algorithm (see Figure 1 ) for revising grammars that directly modifies the rules of the grammar to disallow counter-examples. For each counter-example 2, we generate the parse tree (representation of all the grammar rules needed to generate the sentence or set of sentences) and the grammar modifier modifies the production rules of the grammar to invalidate the counter-example. This process is repeated for each counter-example using the revised grammar from the previous iteration for generating the parse tree for the current counter-example. If a counter-example generates multiple parse trees, the above algorithm is repeated for each parse tree in turn.",
142
- "cite_spans": [],
143
- "ref_spans": [
144
- {
145
- "start": 47,
146
- "end": 55,
147
- "text": "Figure 1",
148
- "ref_id": null
149
- }
150
- ],
151
- "eq_spans": [],
152
- "section": "Automated Grammar Revision by rule modification",
153
- "sec_num": "2"
154
- },
155
- {
156
- "text": "We present the grammar modification algorithm below. For, we assume that the parse-tree(s) of the counter-example contain no recursion (i.e. the same production rule does not occur twice in any of the parse trees). In section 2.4, we present an approach for using the algorithm even when the parse-trees contain recursion. Thus, the algorithm is applicable for any context-free grammar. The grammar modification algorithm a Note that a counter-example can be a sentence such as \"move to operator\" or a set of sentences such as \"transfer <AMOUNT> to online account\". The latter is specified using non-terminals interspersed with words.",
157
- "cite_spans": [],
158
- "ref_spans": [],
159
- "eq_spans": [],
160
- "section": "Grammar modification algorithm",
161
- "sec_num": "2.1"
162
- },
163
- {
164
- "text": "for modifying the rules of a grammar to disallow a counter-example c (identified by a grammar developer) using a parse-tree for e proceeds as follows : .............................................................................................................................. i We illustrate the algorithm with an example. Figure 2 (a) shows a simple grammar. Suppose the sentence \"move to operator\" is a counterexample for an application. Figure 2 (b) shows the parse-tree for \"move to operator\". Since the parse tree contains the rule: <V> ::= \"move\", new rules are added to define non-terminals <V'> and <Vo>, where <V'> does not generate \"move\" and <Vo> generates only \"move\". Similarly, since the parse tree contains the rule: <N>::= \"operator\", the new rules: <N'>::= \"checking\" I \"savings\" I \"money\"; and <No>::= \"operator\", are added. For the non-terminal <PP>, the new rules: <PP'>::= \"to\" <N'>; and <PPo>::= \"to\" <No>, are added. Note that since <No> only generates the phrase \"operator\" which is part of the counter-example, <PPo> only generates the phrase \"to operator\" which is part of the counter-example. Also, <PP'> generates all phrases that <PP> generates except for the phrase \"to operator\". Finally, the rule: <<START>>::= <V> <PP> is modified using the newly created non-terminals <V'>, <Vo>, <PP'> and <PPo> such that the only sentences which are accepted by the grammar and begin with the phrase \"move\" do not end with the phrase \"to operator\", and also, the only sentences which are accepted by the grammar and end with the phrase \"to operator\" do not begin with the phrase \"move\". Figure 3 shows the final modified grammar that accepts all the sentences that the grammar in Figure 2 (a) accepts except for the sentence \"move to operator\". In Figure 3 , all the grammar rules that are new or modified are shown in bold and italics.",
165
- "cite_spans": [
166
- {
167
- "start": 152,
168
- "end": 280,
169
- "text": ".............................................................................................................................. i",
170
- "ref_id": null
171
- }
172
- ],
173
- "ref_spans": [
174
- {
175
- "start": 326,
176
- "end": 334,
177
- "text": "Figure 2",
178
- "ref_id": "FIGREF1"
179
- },
180
- {
181
- "start": 443,
182
- "end": 451,
183
- "text": "Figure 2",
184
- "ref_id": "FIGREF1"
185
- },
186
- {
187
- "start": 1609,
188
- "end": 1617,
189
- "text": "Figure 3",
190
- "ref_id": null
191
- },
192
- {
193
- "start": 1702,
194
- "end": 1710,
195
- "text": "Figure 2",
196
- "ref_id": "FIGREF1"
197
- },
198
- {
199
- "start": 1770,
200
- "end": 1778,
201
- "text": "Figure 3",
202
- "ref_id": null
203
- }
204
- ],
205
- "eq_spans": [],
206
- "section": "Grammar modification algorithm",
207
- "sec_num": "2.1"
208
- },
209
- {
210
- "text": "The above algorithm for grammar modification has a time complexity of O(m*2 k) rule creation (or modification) steps for removing a counterexample, where m is the number of production rules in the parse tree of the counter-example and k is the largest number of non-terminals on the right hand side of any of these production rules. Since grammars used for real applications rarely have more than a handful of non-terminals on the right hand side of production rules, this complexity is quite manageable.",
211
- "cite_spans": [],
212
- "ref_spans": [],
213
- "eq_spans": [],
214
- "section": "Grammar modification algorithm",
215
- "sec_num": "2.1"
216
- },
217
- {
218
- "text": "As seen in the example described above, the size of the grammar (number of production rules) can increase greatly by applying our algorithm successively for a number of counter-examples. However, we can remedy this by applying grammar induction algorithms based on minimum description length (MDL) (e.g. Grunwald (1996) and Zadrozny (1997) ) to combine rules and create a compact grammar that accepts the same language.",
219
- "cite_spans": [
220
- {
221
- "start": 304,
222
- "end": 319,
223
- "text": "Grunwald (1996)",
224
- "ref_id": null
225
- },
226
- {
227
- "start": 324,
228
- "end": 339,
229
- "text": "Zadrozny (1997)",
230
- "ref_id": "BIBREF8"
231
- }
232
- ],
233
- "ref_spans": [],
234
- "eq_spans": [],
235
- "section": "MDL based grammar induction",
236
- "sec_num": null
237
- },
238
- {
239
- "text": "The MDL principle (Rissanen (1982) ) selects that description (theory) of data, which minimizes the sum of the length, in bits, of the description of the theory, and the length, in bits, of data when encoded using the theory. In our case, the data is the set of possible word combinations and the theory is the grammar that specifies it. We are primarily interested in using the MDL principle to obtain (select) a compact grammar (the theory) from among a set of equivalent grammars. Since the set of possible word combinations (data) is the same for all grammars in consideration, we focus on the description length of the grammars itself, which we approximate by using a set of heuristics described in step 1 below.",
240
- "cite_spans": [
241
- {
242
- "start": 18,
243
- "end": 34,
244
- "text": "(Rissanen (1982)",
245
- "ref_id": "BIBREF6"
246
- }
247
- ],
248
- "ref_spans": [],
249
- "eq_spans": [],
250
- "section": "MDL based grammar induction",
251
- "sec_num": null
252
- },
253
- {
254
- "text": "We use the following modified version of Zadrozny's (1997) algorithm to generate a more compact grammar from the revised grammar using the MDL principle: 1. Compute the description length of the grammar, i.e. the total number of symbols needed to specify the grammar, where each non-terminal, \"::=\", and \"1\" are counted as one symbol. 2. Modify the current grammar by concatenating all possible pairs of nonterminals, and compute the description length of each such resultant grammar. For concatenating <NI> and <N2>, introduce the rule <N3>::= <NI> <N2>, search all other rules for consecutive occurrences of <NI> and <N2>, and replace such occurrences with <N3>. Note that this change results in an equivalent grammar (that accepts the same set of sentences as the original grammar). 3. Modify the current grammar by merging all possible pairs of non-terminals, and compute the description length of each such resultant grammar. For merging <N4> and <N5>, introduce the rule: <N6>::= <N4> [ <N5>, search for pairs of rules which differ only in one position such that for one of the rules, <N4> occurs in that position and the other rule, the <N5> occurs in the same position. Replace the pair of rules with a new rule that is exactly the same as either of the pairs of rules, except for the use of <N6> instead of <N3> or <N4>. Note that this change results in an equivalent grammar (that accepts the same set of sentences as the original grammar).",
255
- "cite_spans": [
256
- {
257
- "start": 41,
258
- "end": 58,
259
- "text": "Zadrozny's (1997)",
260
- "ref_id": "BIBREF8"
261
- }
262
- ],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "MDL based grammar induction",
266
- "sec_num": null
267
- },
268
- {
269
- "text": "4. Compute a table of description lengths of the grammars obtained by concatenating or merging all possible pairs of non-terminals of the initial grammar, as described above. Select the pair of non-terminals (if any) together with the action (concatenate or merge) that results in the least description length and execute the corresponding action. 5. Iterate steps 2, 3, and 4 until the description length does not decrease. No further modification is performed if the base description length of the grammar is lower than that resulting from merging or concatenating any pair of non-terminals.",
270
- "cite_spans": [],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "MDL based grammar induction",
274
- "sec_num": null
275
- },
276
- {
277
- "text": "In variations of this algorithm, the selection of the pairs of non-terminals to concatenate or merge, can be based on; the syntactic categories of the corresponding terminals, the semantic categories of the corresponding terminals, and the frequency of occurrence of the nonterminals.",
278
- "cite_spans": [],
279
- "ref_spans": [],
280
- "eq_spans": [],
281
- "section": "MDL based grammar induction",
282
- "sec_num": null
283
- },
284
- {
285
- "text": "Using the algorithm described above in conjunction with the algorithm in section 2.1, we can obtain a compact grammar that is guaranteed to disallow the counter-examples.",
286
- "cite_spans": [],
287
- "ref_spans": [],
288
- "eq_spans": [],
289
- "section": "MDL based grammar induction",
290
- "sec_num": null
291
- },
292
- {
293
- "text": "We have built a graphical tool for revising grammars for NLP systems based on the algorithm described in sections 2.1 and 2.2 above. The tool takes as input an existing grammar and can randomly generate sentences accepted by the grammar including non-terminal strings and strings containing terminals and nonterminals (e.g. both \"move to operator\" and \"transfer <AMOUNT> to online account\" would be generated if they were accepted by the grammar). A grammar developer (a human) interacts with the tool and either inputs counterexamples selected from speech recognition error logs or selects counter-examples like the ones listed above. The grammar developer can then revise the grammar to disallow the counterexamples by pressing a button and then reduce the size of the resulting grammar using the algorithm in section 2.2 by pressing another button to obtain a compact grammar that does not accept any of the identified counterexamples. Typically, the grammar developer repeats the above cycle several times to obtain a tightly constrained grammar.",
294
- "cite_spans": [],
295
- "ref_spans": [],
296
- "eq_spans": [],
297
- "section": "Results for grammar revision for speech understanding systems",
298
- "sec_num": "2.3"
299
- },
300
- {
301
- "text": "We have successfully used the tool described above to greatly constrain overgeneralizing grammars for speech understanding systems that we built for telephony banking, stock trading and directory assistance (Zadrozny et al, 1998) . The speech recognition grammars for these systems accepted around fifty million sentences each. We successfully used the reviser tool to constrain these grammars by eliminating thousands of sentences and obtained around 20-30% improvement in sentence recognition accuracy. We conducted two user studies of our telephony banking system at different stages of development. The user studies were conducted eight months apart. During these eight months, we used a multi-pronged strategy of constraining grammars using the grammar revision algorithms described in this paper, improving the pronunciation models of some words and redesigning the prompts of the system to enable fast and easy error recovery by users. The combination of all these techniques resulted in improving the 'successful transaction in first try '3 from 43% to 71\u00b0/0, an improvement of 65%. The average number of wrong tries (turns of conversation) to get a successful answer was reduced from 2.1 to 0.5 tries. We did not conduct experiments to isolate the contribution of each factor towards this improvement in system performance.",
302
- "cite_spans": [
303
- {
304
- "start": 207,
305
- "end": 229,
306
- "text": "(Zadrozny et al, 1998)",
307
- "ref_id": "BIBREF0"
308
- }
309
- ],
310
- "ref_spans": [],
311
- "eq_spans": [],
312
- "section": "Results for grammar revision for speech understanding systems",
313
- "sec_num": "2.3"
314
- },
315
- {
316
- "text": "It is important to note here that we would probably have obtained this improvement in recognition accuracy even with a manual revision of the grammars. However, the main advantage in using our tool is the tremendous simplification of the whole process of revision for a grammar developer who now selects counter-examples with an interactive tool instead of manually revising the grammars.",
317
- "cite_spans": [],
318
- "ref_spans": [],
319
- "eq_spans": [],
320
- "section": "Results for grammar revision for speech understanding systems",
321
- "sec_num": "2.3"
322
- },
323
- {
324
- "text": "We now describe an extension of the algorithm in section 2.1 that can modify grammars with recursion to disallow a finite set of counterexamples. The example grammars shown above are regular grammars (i.e. equivalent finite state automatons exist). For regular grammars (and only for regular grammars), an alternative approach for eliminating counter-examples using standard automata theory is\"",
325
- "cite_spans": [],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "Handling recursion in grammars",
329
- "sec_num": "2.4"
330
- },
331
- {
332
- "text": "\u2022 Compute the finite state automaton (FSA) G corresponding to the original grammar. \u2022 Compute the FSA C corresponding to the set of counter-examples. \u2022 Compute C', the complement of C with respect to the given alphabet. \u2022 Compute G', the intersection of G and C'. The FSA G' is equivalent to a revised grammar which disallows the counterexamples.",
333
- "cite_spans": [],
334
- "ref_spans": [],
335
- "eq_spans": [],
336
- "section": "Handling recursion in grammars",
337
- "sec_num": "2.4"
338
- },
339
- {
340
- "text": "The time complexity of the algorithm is O(n*m), where n and m are the number of states in the finite state automatons G and C respectively. This is comparable to the quadratic time complexity of our grammar revision algorithm presented in Section 3.1.",
341
- "cite_spans": [],
342
- "ref_spans": [],
343
- "eq_spans": [],
344
- "section": "Handling recursion in grammars",
345
- "sec_num": "2.4"
346
- },
347
- {
348
- "text": "However, the above algorithm for eliminating counter-examples only works for regular grammars. This is because context-free grammars are not closed under complementation and intersection. However we can use our algorithm for grammar modification (section 2.1) to handle any context-free grammar as follows: 1) As before, generate parse tree p for counter-example c for an initial grammar G. 2) If p contains a recursion (two or more repetitions of any production rule in the same parse tree), rewrite the initial grammar G as the equivalent grammar G', where the recursion is \"unrolled\" sufficiently many times (at least one more time than the number of repetitions of the recursive production rule in the parse tree). We explain the unrolling of recursion in greater detail below. If p does not contain any recursion, go to step 4. 3) Generate parse tree p' for the counter-example c for the rewritten grammar G'. Note that p' will no longer contain a recursive application of any production rules, though G' itself will still have recursion. 4) Use the algorithm described in section 2.1 to modify the grammar G' to eliminate the counter-example c using the parse tree p'.",
349
- "cite_spans": [],
350
- "ref_spans": [],
351
- "eq_spans": [],
352
- "section": "Handling recursion in grammars",
353
- "sec_num": "2.4"
354
- },
355
- {
356
- "text": "We illustrate the above algorithm with an example. Figure 4(a) shows a context free grammar which accepts all strings of the form a\"b\", for any n greater than 0. Note that this is not a regular language. Suppose we wish to eliminate the counter-example aaabbb from the initial grammar. The parse treep for the counterexample aaabbb is shown in Figure 4(b) . The grammar in 4(a) can be rewritten as the equivalent grammar 4(c), where the recursion of (S->aSb) is unrolled three times. The parse tree p' for the counter-example aaabbb with respect to grammar in 4(c) is shown in Figure 4(d) . Note that p' does not contain any recursion, though the rewritten grammar does. We revised the FIGURE 4",
357
- "cite_spans": [],
358
- "ref_spans": [
359
- {
360
- "start": 51,
361
- "end": 62,
362
- "text": "Figure 4(a)",
363
- "ref_id": null
364
- },
365
- {
366
- "start": 344,
367
- "end": 355,
368
- "text": "Figure 4(b)",
369
- "ref_id": null
370
- },
371
- {
372
- "start": 577,
373
- "end": 588,
374
- "text": "Figure 4(d)",
375
- "ref_id": null
376
- }
377
- ],
378
- "eq_spans": [],
379
- "section": "Handling recursion in grammars",
380
- "sec_num": "2.4"
381
- },
382
- {
383
- "text": "(a) ORIGINAL GRAMMAR G <S> ::= \"a\" <S> \"b\" [ \"a n \"b\" .",
384
- "cite_spans": [],
385
- "ref_spans": [],
386
- "eq_spans": [],
387
- "section": "Handling recursion in grammars",
388
- "sec_num": "2.4"
389
- },
390
- {
391
- "text": "(b) PARSE TREE p <S> ::= \"a n <S> \"b\" .",
392
- "cite_spans": [],
393
- "ref_spans": [],
394
- "eq_spans": [],
395
- "section": "Handling recursion in grammars",
396
- "sec_num": "2.4"
397
- },
398
- {
399
- "text": "<S> ::= \"a\" <S> \"b\" . <S> ::= \"a n rib\" .",
400
- "cite_spans": [],
401
- "ref_spans": [],
402
- "eq_spans": [],
403
- "section": "Handling recursion in grammars",
404
- "sec_num": "2.4"
405
- },
406
- {
407
- "text": "(c) REWRITTEN GRAMMAR G' <S> ::= \"a\" <$1> \"b\" l \"a\" \"b\" . <Sl> ::= \"a\" <$2> \"b\" I \"a\" \"b\" . <$2> ::= \"a\" <$3>",
408
- "cite_spans": [],
409
- "ref_spans": [],
410
- "eq_spans": [],
411
- "section": "Handling recursion in grammars",
412
- "sec_num": "2.4"
413
- },
414
- {
415
- "text": "\"b\" I \"a\" \"b\" . <$3> ::= \"a\" <$3> \"b\" [ \"a\" \"b\" .",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "Handling recursion in grammars",
420
- "sec_num": "2.4"
421
- },
422
- {
423
- "text": "<S> ::= \"a\" <Sl> \"b\" . <$1> ::= \"a\" <$2> \"b\" . <$2> ::= \"a\" \"b\" .",
424
- "cite_spans": [],
425
- "ref_spans": [],
426
- "eq_spans": [],
427
- "section": "(d) PARSE TREE p'",
428
- "sec_num": null
429
- },
430
- {
431
- "text": "<S> ::= \"a\" <Sl> \"b\" [ \"a\" \"b\" .",
432
- "cite_spans": [],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "~) REVISED GRAMMAR Gr",
436
- "sec_num": null
437
- },
438
- {
439
- "text": "::= \"a\" <$2> \"b\" I \"a\" \"b\" . <82> ::= \"a\" <$3> \"b\" . <$3> ::= \"a\" <$3> \"b\" [ \"a\" \"b\" .",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "<SI>",
444
- "sec_num": null
445
- },
446
- {
447
- "text": "grammar in 4(c) to eliminate the counterexample aaabbb using the parse tree in Figure 4 (d). The revised grammar is shown in Figure 4 (e). Note that here we are assuming that a mechanism exists for rewriting the rules of a grammar with recursion to unroll the recursion (if it exists) a finite number of times. Such an unrolling is readily accomplished by introducing a set of new non-terminars, one for each iteration of unrolling as shown in Figure 4 (c).",
448
- "cite_spans": [],
449
- "ref_spans": [
450
- {
451
- "start": 79,
452
- "end": 89,
453
- "text": "Figure 4",
454
- "ref_id": null
455
- },
456
- {
457
- "start": 127,
458
- "end": 136,
459
- "text": "Figure 4",
460
- "ref_id": null
461
- },
462
- {
463
- "start": 447,
464
- "end": 455,
465
- "text": "Figure 4",
466
- "ref_id": null
467
- }
468
- ],
469
- "eq_spans": [],
470
- "section": "<SI>",
471
- "sec_num": null
472
- },
473
- {
474
- "text": "In this section, we delineate an approach for automatically modifying attribute value grammars using counter-examples. We first convert an attribute value grammar into an equivalent non-attributed grammar by creating new non-terminals and encoding the attributes in the names of the new non-terminals (see Manaster Ramer and Zadrozny (1990) and Pollard and Sag (1994) ).",
475
- "cite_spans": [
476
- {
477
- "start": 325,
478
- "end": 340,
479
- "text": "Zadrozny (1990)",
480
- "ref_id": null
481
- },
482
- {
483
- "start": 345,
484
- "end": 367,
485
- "text": "Pollard and Sag (1994)",
486
- "ref_id": "BIBREF5"
487
- }
488
- ],
489
- "ref_spans": [],
490
- "eq_spans": [],
491
- "section": "Automated revision of attribute-value grammars",
492
- "sec_num": "3"
493
- },
494
- {
495
- "text": "For example, suppose the grammar in Figure 2 (a) is an attribute value grammar with an",
496
- "cite_spans": [],
497
- "ref_spans": [
498
- {
499
- "start": 36,
500
- "end": 45,
501
- "text": "Figure 2",
502
- "ref_id": "FIGREF1"
503
- }
504
- ],
505
- "eq_spans": [],
506
- "section": "Automated revision of attribute-value grammars",
507
- "sec_num": "3"
508
- },
509
- {
510
- "text": "We have presented a set of algorithms and an interactive tool for automatically revising grammars of NLP systems to disallow identified counter-examples (sentences or sets of sentences accepted by the current grammar but deemed to be irrelevant for a given application). We have successfully used the tool to constrain overgeneralizing grammars of speech understanding systems and obtained 20-30% higher recognition accuracy. However, we believe the primary benefit of using our tool is the tremendously reduced effort for the grammar developer. Our technique relieves the grammar developer from the burden of going through the tedious and time consuming task of revising grammars by manually modifying production rules one at a time. Instead, the grammar developer simply identifies counter-examples to an interactive tool that revises the grammar to invalidate the identified sentences.",
511
- "cite_spans": [],
512
- "ref_spans": [],
513
- "eq_spans": [],
514
- "section": "Automated revision of attribute-value grammars",
515
- "sec_num": "3"
516
- },
517
- {
518
- "text": "We also discussed an MDL based algorithm for grammar compaction to reduce the size of the revised grammar. Thus, using a combination of the algorithms presented in this paper, one can obtain a compact grammar that is guaranteed to disallow the counter-examples.",
519
- "cite_spans": [],
520
- "ref_spans": [],
521
- "eq_spans": [],
522
- "section": "Automated revision of attribute-value grammars",
523
- "sec_num": "3"
524
- },
525
- {
526
- "text": "Although our discussion here was focussed on speech understanding applications, the algorithms and the tool described here are applicable for any domain where grammars are used. We are currently implementing an extension of the grammar modifier to handle attribute-value grammars. We outlined an approach for automated modification of attribute-value grammars in Section 3.",
527
- "cite_spans": [],
528
- "ref_spans": [],
529
- "eq_spans": [],
530
- "section": "Automated revision of attribute-value grammars",
531
- "sec_num": "3"
532
- },
533
- {
534
- "text": "We conclude that algorithms for automatically constraining grammars based on counterexamples can be highly effective in reducing the burden on grammar developers to develop constrained, domain specific grammars. Moreover, these algorithms can be used in any applications, which deal with grammars.",
535
- "cite_spans": [],
536
- "ref_spans": [],
537
- "eq_spans": [],
538
- "section": "Automated revision of attribute-value grammars",
539
- "sec_num": "3"
540
- },
541
- {
542
- "text": "We measured the number of times the user's transactional intent (e.g. checking balance, last five transactions etc.) was recognized and acted upon correctly by the system in the first try, even when the actual utterance may not have been recognized correctly word for word.914.",
543
- "cite_spans": [],
544
- "ref_spans": [],
545
- "eq_spans": [],
546
- "section": "",
547
- "sec_num": null
548
- },
549
- {
550
- "text": "Conclusions",
551
- "cite_spans": [],
552
- "ref_spans": [],
553
- "eq_spans": [],
554
- "section": "",
555
- "sec_num": null
556
- }
557
- ],
558
- "back_matter": [
559
- {
560
- "text": "We thank all of our colleagues in the conversation machines group at IBM T.J. Watson Research Center for several helpful comments and suggestions through the course of this work.",
561
- "cite_spans": [],
562
- "ref_spans": [],
563
- "eq_spans": [],
564
- "section": "Acknowledgements",
565
- "sec_num": null
566
- },
567
- {
568
- "text": "<N_account_savings> : := \"savings\". <N_account_unspecified> ::= \"money\" I \"operator\" .attribute 'account', which encodes information about the type of account specified, e.g. 'account' might have the values, SAVINGS, CHECKING and UNSPECIFIED. Figure 5 shows an equivalent non-attributed grammar, where the value of the attribute 'account' has been encoded in the names of the non-terminals. Note that such an encoding can potentially create a very large number of non-terminals. Also, the specific coding used needs to be such that the attributes can be easily recovered from the non-terminal names later on.We can now use our modification algorithms (Section 2.1 and 2.2) to eliminate counterexamples from the non-attributed grammar. For instance, suppose we wish to eliminate 'move to operator' from the attributed grammar based on Figure 2 (a), as discussed above. We apply our algorithm (Section 2.1) to the grammar in Figure 5 and obtain the grammar shown in Figure 6 . Note that we name any new non-terminals created during the grammar modification in such a way as to leave the encoding of the attribute values in the non-terminal names intact.After applying the grammar revision algorithm, we can extract the attribute values from the encoding in the non-terminal names. For instance, in the example outlined above, we might systematically check for suffixes of a certain type and recover the attributes and their values. Also, as described earlier, we can use the algorithm described in section 2.2 to make the resulting grammar compact again by using MDL based grammar induction algorithms.",
569
- "cite_spans": [],
570
- "ref_spans": [
571
- {
572
- "start": 243,
573
- "end": 251,
574
- "text": "Figure 5",
575
- "ref_id": null
576
- },
577
- {
578
- "start": 834,
579
- "end": 842,
580
- "text": "Figure 2",
581
- "ref_id": null
582
- },
583
- {
584
- "start": 923,
585
- "end": 932,
586
- "text": "Figure 5",
587
- "ref_id": null
588
- },
589
- {
590
- "start": 965,
591
- "end": 973,
592
- "text": "Figure 6",
593
- "ref_id": null
594
- }
595
- ],
596
- "eq_spans": [],
597
- "section": "annex",
598
- "sec_num": null
599
- }
600
- ],
601
- "bib_entries": {
602
- "BIBREF0": {
603
- "ref_id": "b0",
604
- "title": "Conversation machines for transaction processing",
605
- "authors": [
606
- {
607
- "first": "W",
608
- "middle": [],
609
- "last": "Zadrozny",
610
- "suffix": ""
611
- },
612
- {
613
- "first": "C",
614
- "middle": [],
615
- "last": "Wolf",
616
- "suffix": ""
617
- },
618
- {
619
- "first": "N",
620
- "middle": [],
621
- "last": "Kambhatla",
622
- "suffix": ""
623
- },
624
- {
625
- "first": "Ye",
626
- "middle": [
627
- "Y"
628
- ],
629
- "last": "",
630
- "suffix": ""
631
- }
632
- ],
633
- "year": 1998,
634
- "venue": "proceedings of AAAI'98/IAAI'98",
635
- "volume": "",
636
- "issue": "",
637
- "pages": "1160--1166",
638
- "other_ids": {},
639
- "num": null,
640
- "urls": [],
641
- "raw_text": "Zadrozny W., Wolf C., Kambhatla N., and Ye Y. (1998). Conversation machines for transaction processing. In proceedings of AAAI'98/IAAI'98, AAAI Press/MIT Press, pp 1160-1166.",
642
- "links": null
643
- },
644
- "BIBREF1": {
645
- "ref_id": "b1",
646
- "title": "Natural Language Understanding. The Benjamin/Cummings Publishing Company",
647
- "authors": [
648
- {
649
- "first": "J",
650
- "middle": [],
651
- "last": "Allen",
652
- "suffix": ""
653
- }
654
- ],
655
- "year": 1995,
656
- "venue": "",
657
- "volume": "",
658
- "issue": "",
659
- "pages": "",
660
- "other_ids": {},
661
- "num": null,
662
- "urls": [],
663
- "raw_text": "Allen J. (1995). Natural Language Understanding. The Benjamin/Cummings Publishing Company, Redwood City, CA 94065.",
664
- "links": null
665
- },
666
- "BIBREF2": {
667
- "ref_id": "b2",
668
- "title": "A minimum description length approach to grammar inference",
669
- "authors": [
670
- {
671
- "first": "P",
672
- "middle": [],
673
- "last": "Gnmwald",
674
- "suffix": ""
675
- }
676
- ],
677
- "year": 1996,
678
- "venue": "Symbolic, Connectionist and Statistical Approach to Learning for Natural Language Processing",
679
- "volume": "",
680
- "issue": "",
681
- "pages": "203--216",
682
- "other_ids": {},
683
- "num": null,
684
- "urls": [],
685
- "raw_text": "Gnmwald P. (1996). A minimum description length approach to grammar inference. In S. Wemter et al., editors, Symbolic, Connectionist and Statistical Approach to Learning for Natural Language Processing, Springer, Berlin, p 203-216.",
686
- "links": null
687
- },
688
- "BIBREF4": {
689
- "ref_id": "b4",
690
- "title": "Expressive Power of Grammatical Formalisms",
691
- "authors": [],
692
- "year": null,
693
- "venue": "Proceedings of Coling-90. Universitas Helsingiensis. Helsinki, Finland",
694
- "volume": "",
695
- "issue": "",
696
- "pages": "195--200",
697
- "other_ids": {},
698
- "num": null,
699
- "urls": [],
700
- "raw_text": "Expressive Power of Grammatical Formalisms, Proceedings of Coling-90. Universitas Helsingiensis. Helsinki, Finland\", pp. 195-200.",
701
- "links": null
702
- },
703
- "BIBREF5": {
704
- "ref_id": "b5",
705
- "title": "Head-Driven Phrase Structure Grammar",
706
- "authors": [
707
- {
708
- "first": "C",
709
- "middle": [],
710
- "last": "Pollard",
711
- "suffix": ""
712
- },
713
- {
714
- "first": "I",
715
- "middle": [
716
- "A"
717
- ],
718
- "last": "Sag",
719
- "suffix": ""
720
- }
721
- ],
722
- "year": 1994,
723
- "venue": "",
724
- "volume": "",
725
- "issue": "",
726
- "pages": "",
727
- "other_ids": {},
728
- "num": null,
729
- "urls": [],
730
- "raw_text": "Pollard, C. and Sag I A. (1994). Head-Driven Phrase Structure Grammar. The U. of Chicago Press.",
731
- "links": null
732
- },
733
- "BIBREF6": {
734
- "ref_id": "b6",
735
- "title": "A universal prior for integers and estimation by minimum description length",
736
- "authors": [
737
- {
738
- "first": "J",
739
- "middle": [],
740
- "last": "Rissanen",
741
- "suffix": ""
742
- }
743
- ],
744
- "year": 1982,
745
- "venue": "Annals of Statistics",
746
- "volume": "11",
747
- "issue": "",
748
- "pages": "416--431",
749
- "other_ids": {},
750
- "num": null,
751
- "urls": [],
752
- "raw_text": "Rissanen J. (1982). A universal prior for integers and estimation by minimum description length. Annals of Statistics, 11:416-431.",
753
- "links": null
754
- },
755
- "BIBREF7": {
756
- "ref_id": "b7",
757
- "title": "TINA: A natural language system for spoken language applications, Computational Linguistics",
758
- "authors": [
759
- {
760
- "first": "S",
761
- "middle": [],
762
- "last": "Seneff",
763
- "suffix": ""
764
- }
765
- ],
766
- "year": 1992,
767
- "venue": "",
768
- "volume": "18",
769
- "issue": "",
770
- "pages": "61--86",
771
- "other_ids": {},
772
- "num": null,
773
- "urls": [],
774
- "raw_text": "Seneff S. (1992). TINA: A natural language system for spoken language applications, Computational Linguistics, 18:p61-86.",
775
- "links": null
776
- },
777
- "BIBREF8": {
778
- "ref_id": "b8",
779
- "title": "Minimum description length and compositionality",
780
- "authors": [
781
- {
782
- "first": "W",
783
- "middle": [],
784
- "last": "Zadrozny",
785
- "suffix": ""
786
- }
787
- ],
788
- "year": 1997,
789
- "venue": "Proceedings of Second International Workshop for Computational Semantics",
790
- "volume": "",
791
- "issue": "",
792
- "pages": "",
793
- "other_ids": {},
794
- "num": null,
795
- "urls": [],
796
- "raw_text": "Zadrozny W. (1997). Minimum description length and compositionality. Proceedings of Second International Workshop for Computational Semantics, Tilburg. Recently re-published as a book chapter in: H.Bunt and R.Muskens (eds.) Computing Meaning. Kluwer Academic Publishers, Dordrecht/Boston, 1999.",
797
- "links": null
798
- }
799
- },
800
- "ref_entries": {
801
- "FIGREF1": {
802
- "type_str": "figure",
803
- "num": null,
804
- "text": "Figure 2",
805
- "uris": null
806
- },
807
- "FIGREF2": {
808
- "type_str": "figure",
809
- "num": null,
810
- "text": ".....................................................<%'> : == \"move\"",
811
- "uris": null
812
- },
813
- "FIGREF3": {
814
- "type_str": "figure",
815
- "num": null,
816
- "text": "Figure 3",
817
- "uris": null
818
- }
819
- }
820
- }
821
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1030.json DELETED
@@ -1,663 +0,0 @@
1
- {
2
- "paper_id": "A00-1030",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:33.823046Z"
6
- },
7
- "title": "Aggressive Morphology for Robust Lexical Coverage",
8
- "authors": [
9
- {
10
- "first": "William",
11
- "middle": [
12
- "A"
13
- ],
14
- "last": "Woods",
15
- "suffix": "",
16
- "affiliation": {},
17
- "email": "[email protected]"
18
- }
19
- ],
20
- "year": "",
21
- "venue": null,
22
- "identifiers": {},
23
- "abstract": "This paper describes an approach to providing lexical information for natural language processing in unrestricted domains. A system of approximately 1200 morphological rules is used to extend a core lexicon of 39,000 words to provide lexical coverage that exceeds that of a lexicon of 80,000 words or 150,000 word forms. The morphological system is described, and lexical coverage is evaluated for random words chosen from a previously unanalyzed corpus.",
24
- "pdf_parse": {
25
- "paper_id": "A00-1030",
26
- "_pdf_hash": "",
27
- "abstract": [
28
- {
29
- "text": "This paper describes an approach to providing lexical information for natural language processing in unrestricted domains. A system of approximately 1200 morphological rules is used to extend a core lexicon of 39,000 words to provide lexical coverage that exceeds that of a lexicon of 80,000 words or 150,000 word forms. The morphological system is described, and lexical coverage is evaluated for random words chosen from a previously unanalyzed corpus.",
30
- "cite_spans": [],
31
- "ref_spans": [],
32
- "eq_spans": [],
33
- "section": "Abstract",
34
- "sec_num": null
35
- }
36
- ],
37
- "body_text": [
38
- {
39
- "text": "Many applications of natural language processing have a need for a large vocabulary lexicon. However, no matter how large a lexicon one starts with, most applications will encounter terms that are not covered. This paper describes an approach to the lexicon problem that emphasizes recognition of morphological structure in unknown words in order to extend a relatively small core lexicon to allow robust natural language processing in unrestricted domains. This technique, which extends functionality originally developed for the Lunar system (Woods et al., 1972) , has been most recently applied in a conceptual indexing and retrieval system (Woods, 1997; Ambroziak and Woods, 1998; Woods et ai., 2000) .",
40
- "cite_spans": [
41
- {
42
- "start": 544,
43
- "end": 564,
44
- "text": "(Woods et al., 1972)",
45
- "ref_id": "BIBREF4"
46
- },
47
- {
48
- "start": 644,
49
- "end": 657,
50
- "text": "(Woods, 1997;",
51
- "ref_id": "BIBREF6"
52
- },
53
- {
54
- "start": 658,
55
- "end": 684,
56
- "text": "Ambroziak and Woods, 1998;",
57
- "ref_id": "BIBREF0"
58
- },
59
- {
60
- "start": 685,
61
- "end": 704,
62
- "text": "Woods et ai., 2000)",
63
- "ref_id": null
64
- }
65
- ],
66
- "ref_spans": [],
67
- "eq_spans": [],
68
- "section": "Motivation",
69
- "sec_num": "1"
70
- },
71
- {
72
- "text": "The system described here uses a collection of approximately 1200 knowledge-based morphological rules to extend a core lexicon of approximately 39,000 words to give coverage that exceeds that of an English lexicon of more than 80,000 base forms (or 150,000 base plus inflected forms). To illustrate the need for a robust extensible lexicon, a random sample of 100 words from the vocabulary of the millionword Brown corpus (Kucera and Francis, 1967) , contained 24 words that were not included in a 300,000word list of English word forms. This suggests that approximately 25% of the words in the Brown corpus would not be covered by an independent lexicon of even 300,000 words.",
73
- "cite_spans": [
74
- {
75
- "start": 422,
76
- "end": 448,
77
- "text": "(Kucera and Francis, 1967)",
78
- "ref_id": "BIBREF2"
79
- }
80
- ],
81
- "ref_spans": [],
82
- "eq_spans": [],
83
- "section": "Motivation",
84
- "sec_num": "1"
85
- },
86
- {
87
- "text": "In a recent experiment, 54% of approximately 34,000 word types (numbers and hyphenated words excluded) from a 3.1-million-word corpus of technical literature would not be covered by our hypothet-ical 300,O00-word lexicon. Many of these are special forms (e.g., Nb203 and Ti/tin), and some are apparent misspellings (e.g., auniprocessor and sychronized), but the following are a sampling of fairly normal words that were not in the 300,O00-word list: busmaster copyline hereabove preprocessing uniprocessors unreacted 2 Integrated, Preferential, Heuristic Morphology There are a number of systems that have been used to describe natural language morphology for computational use. The most popular of these is perhaps the finite-state Kimmo system (Koskenniemi, 1983 ). Other approaches are described in (Sproat, 1992) . The system described here differs from other systems in a number of dimensions. First, it is integrated with an extensive lexicon, a semantic ontology, and a syntactic analysis system, which it both consults and augments. For example, subsumption relationships in the semantic ontology enable the system to determine whether a proposed root is a container or a mental attitude, so that cupful is interpreted as a unit of measure (a kind of noun), while hopeful is interpreted as an adjective.",
88
- "cite_spans": [
89
- {
90
- "start": 746,
91
- "end": 764,
92
- "text": "(Koskenniemi, 1983",
93
- "ref_id": "BIBREF1"
94
- },
95
- {
96
- "start": 802,
97
- "end": 816,
98
- "text": "(Sproat, 1992)",
99
- "ref_id": "BIBREF3"
100
- }
101
- ],
102
- "ref_spans": [],
103
- "eq_spans": [],
104
- "section": "Motivation",
105
- "sec_num": "1"
106
- },
107
- {
108
- "text": "Second, it uses ordered preferential rules that attempt to choose a small number of correct analyses of a word (usually 1-3) from the many potential analyses that might be found. Finally, it uses rules that are heuristic in that they are not guaranteed to give correct analyses, but rather are designed to deal with various states of lack of knowledge and to make plausible inferences in the face of uncertainty. The focus is to use what it knows (or can infer) to determine a usable set of part-of-speech classifications for the word and to determine any root-plus-affix or internal compound structure that is apparent. If possible, it also assigns a semantic categorization to the word. It deals with unknown as well as known roots, and it indicates relative confidences in its classifications when its rules indicate uncertainty in the result.",
109
- "cite_spans": [],
110
- "ref_spans": [],
111
- "eq_spans": [],
112
- "section": "Motivation",
113
- "sec_num": "1"
114
- },
115
- {
116
- "text": "The role of the morphological analysis component in this system is to construct lexical entries for words that do not already have entries, so that subsequent encounters with the same word will find an already existing lexical entry. Thus, morphological analysis happens only once for each encountered word type that is not already in the core lexicon. The resulting lexical entries can be saved in a supplementary lexicon that is constructed as a side-effect of analyzing text. The rules of the morphological analysis system can ask syntactic and semantic questions about potential base forms. The system handles prefixes, suffixes, and lexical compounds (e.g., bitmap and replybuffer). It also handles multiword lexical items and many special forms, including Roman numerals, dates, and apparent phone numbers.",
117
- "cite_spans": [],
118
- "ref_spans": [],
119
- "eq_spans": [],
120
- "section": "Motivation",
121
- "sec_num": "1"
122
- },
123
- {
124
- "text": "The morphological analysis system makes use of a number of different kinds of morphological rules, applied in the following preferential order to words that are not already in the lexicon:",
125
- "cite_spans": [],
126
- "ref_spans": [],
127
- "eq_spans": [],
128
- "section": "Morphological rules and the lexicon",
129
- "sec_num": "2.1"
130
- },
131
- {
132
- "text": "1. Morph-precheck for special forms 2. Phase one pass with suffix rules (allow only \"known\" roots in phase one) Generally, the rules are ordered in decreasing order of specificity, confidence and likelihood. Very specific tests are applied in Step 1 to identify and deal with \"words\" that are not ordinary sequences of alphabetic characters. These include numbers, alphanumeric sequences, and expressions involving special characters. Falling this, an ordered sequence of suffix rules is applied in Step 2 in a first pass that will allow a match only if the proposed root word is \"known.\" The same list of rules will be applied later in a second pass without this known-root condition if an earlier analysis does not succeed. This issue of \"known\" roots is a subtle one that can involve consulting external lists of known words as well as words already in the lexicon, and can also consider certain derived forms of known roots to be \"known,\" even when they have not been previously encountered.",
133
- "cite_spans": [],
134
- "ref_spans": [],
135
- "eq_spans": [],
136
- "section": "Morphological rules and the lexicon",
137
- "sec_num": "2.1"
138
- },
139
- {
140
- "text": "For example, if.fish is a known word, then fishing is as good as known, so is considered a \"known\" root for this purpose. In general, suffix rules applied to \"known\" roots are more reliable than applications of rules to unknown roots or to words with no identifiable root.",
141
- "cite_spans": [],
142
- "ref_spans": [],
143
- "eq_spans": [],
144
- "section": "Morphological rules and the lexicon",
145
- "sec_num": "2.1"
146
- },
147
- {
148
- "text": "If no phase-one suffix rules apply, prefix rules are tried in Step 3 to see if an interpretation of this word as a prefix combined with some other \"known\" word is possible. Falling this, a set of lexical compound rules is tried, in Step 4, to see if the word is interpretable as a compound of two or more words, and failing that, lists of first and last names of people and names of cities are checked in Step 5. All of steps 3-5 are considered more reliable if they succeed than the phase-two pass of the suffix rules that comes in Step 6. This ordering allows prefixes and compounding to be tried before less confident suffix analyses are attempted, and avoids applying weak suffix analyses to known names. Various other ways to order these rules have been tried, but this order has been found to be the most effective.",
149
- "cite_spans": [],
150
- "ref_spans": [],
151
- "eq_spans": [],
152
- "section": "Morphological rules and the lexicon",
153
- "sec_num": "2.1"
154
- },
155
- {
156
- "text": "Before trying pattern-based rules for suffixes, prefixes, and lexical compounds, the morphological analyzer makes a number of tests for special forms that require idiosyncratic treatment. These tests include the following:",
157
- "cite_spans": [],
158
- "ref_spans": [],
159
- "eq_spans": [],
160
- "section": "Special form tests",
161
- "sec_num": "2.2"
162
- },
163
- {
164
- "text": "\u2022 number (including integer, floating, and exponential notations, including numbers too large to be represented internally as numbers in the machine),",
165
- "cite_spans": [],
166
- "ref_spans": [],
167
- "eq_spans": [],
168
- "section": "Special form tests",
169
- "sec_num": "2.2"
170
- },
171
- {
172
- "text": "\u2022 Roman numeral (vii, mcm),",
173
- "cite_spans": [],
174
- "ref_spans": [],
175
- "eq_spans": [],
176
- "section": "Special form tests",
177
- "sec_num": "2.2"
178
- },
179
- {
180
- "text": "\u2022 ordinal (1st, 2nd, twenty-third), \u2022 alphanum (Al203, 79D), \u2022 letter (b, x), \u2022 initial (B.),",
181
- "cite_spans": [],
182
- "ref_spans": [],
183
- "eq_spans": [],
184
- "section": "Special form tests",
185
- "sec_num": "2.2"
186
- },
187
- {
188
- "text": "\u2022 phone number (123-4567),",
189
- "cite_spans": [],
190
- "ref_spans": [],
191
- "eq_spans": [],
192
- "section": "Special form tests",
193
- "sec_num": "2.2"
194
- },
195
- {
196
- "text": "\u2022 hyphenated adjective (all-volunteer),",
197
- "cite_spans": [],
198
- "ref_spans": [],
199
- "eq_spans": [],
200
- "section": "Special form tests",
201
- "sec_num": "2.2"
202
- },
203
- {
204
- "text": "\u2022 ratio (s/S, V/R),",
205
- "cite_spans": [],
206
- "ref_spans": [],
207
- "eq_spans": [],
208
- "section": "Special form tests",
209
- "sec_num": "2.2"
210
- },
211
- {
212
- "text": "\u2022 multiword lexical item (snake_in_the_grass),",
213
- "cite_spans": [],
214
- "ref_spans": [],
215
- "eq_spans": [],
216
- "section": "Special form tests",
217
- "sec_num": "2.2"
218
- },
219
- {
220
- "text": "\u2022 special proper nouns ([email protected], /usr/bin, http://www.sun.com, C+ + )",
221
- "cite_spans": [],
222
- "ref_spans": [],
223
- "eq_spans": [],
224
- "section": "Special form tests",
225
- "sec_num": "2.2"
226
- },
227
- {
228
- "text": "2.3 Pattern-action rules Suffix rules in this system are pattern-action rules that specify:",
229
- "cite_spans": [],
230
- "ref_spans": [],
231
- "eq_spans": [],
232
- "section": "Special form tests",
233
- "sec_num": "2.2"
234
- },
235
- {
236
- "text": "1. a pattern of characters to match at the end of the word to be analyzed, 2. possibly a number of characters to remove and/or a sequence of characters to add to form a root (or base form),",
237
- "cite_spans": [],
238
- "ref_spans": [],
239
- "eq_spans": [],
240
- "section": "Special form tests",
241
- "sec_num": "2.2"
242
- },
243
- {
244
- "text": "3. a sequence of tests and action clauses indicating possible interpretations of a word matching this pattern.",
245
- "cite_spans": [],
246
- "ref_spans": [],
247
- "eq_spans": [],
248
- "section": "Special form tests",
249
- "sec_num": "2.2"
250
- },
251
- {
252
- "text": "These rules are organized into blocks that are typically indexed by a shared final letter, and are applied in order within a block until a rule is encountered that generates one or more interpretations. At that point, no further rules are tried, and the interpretations generated by that rule are used to construct a lexical entry for the analyzed word.",
253
- "cite_spans": [],
254
- "ref_spans": [],
255
- "eq_spans": [],
256
- "section": "Special form tests",
257
- "sec_num": "2.2"
258
- },
259
- {
260
- "text": "The following is an example of a fairly specific, but productive, knowledge-rich morphological suffix rule:",
261
- "cite_spans": [],
262
- "ref_spans": [],
263
- "eq_spans": [],
264
- "section": "Special form tests",
265
- "sec_num": "2.2"
266
- },
267
- {
268
- "text": "((f i s h) (kill 4) (test (plausible-root root)) (cat nmsp (is-root-of-cat root '(adj n)) eval (progu (mark-dict lex (mark-dict (mark-dict (mark-dict '-es))) 'false-root root t t) lex 'kindof 'fish t t) lex 'has-prefix root t t) lex 'root 'fish t t)",
269
- "cite_spans": [],
270
- "ref_spans": [],
271
- "eq_spans": [],
272
- "section": "Special form tests",
273
- "sec_num": "2.2"
274
- },
275
- {
276
- "text": "This rule matches a word that ends in fish and removes four letters from the end (the fish part) to produce a root word which it then tests to see if it is a plausible root (e.g., does it at least have a vowel in it?). If it gets this fax, the rule will construct a category nmsp interpretation (a kind of noun), if the condition (is-root-of-cat root ' (adj n)) is true (i.e., if the root is a known adjective or noun).",
277
- "cite_spans": [],
278
- "ref_spans": [],
279
- "eq_spans": [],
280
- "section": "Special form tests",
281
- "sec_num": "2.2"
282
- },
283
- {
284
- "text": "This rule deals with words like hagfish and goatfish and comes before the rules that handle words with ish as a suffix, like doltish and oafish. Incidentally, this rule doesn't apply to oafish because the hypothesized root on, which would result from removing four letters, is not known to be an adjective or noun. When this rule succeeds, it specifies that the word will be assigned the category nmsp, a category indicating a word that has a mass sense, a singular count sense, and can also be used as a plural (e.g., Goatfish are 1%nny-looking.). (The category nmsp comes from a collection of 91 syntactic categories, organized in a hierarchy based on generality, so that, for example, nm subsumes nmsp.) The action part of this rule specifies that (contrary to the usual case) the \"root\" obtained by removing characters from the end of the word (e.g., goat) is in this case a false root. The real root is fish, and the false root (goat) is actually a prefix. The rule also specifies that the word refers to a kind of fish and that the inflectional paradigm for this word is -es (thus allowing goatfishes as an alternative plural).",
285
- "cite_spans": [],
286
- "ref_spans": [],
287
- "eq_spans": [],
288
- "section": "Special form tests",
289
- "sec_num": "2.2"
290
- },
291
- {
292
- "text": "The rules within a block are ordered in decreasing order of confidence and specificity. Thus, rules with conditions that check explicit inflectional paradigms of known roots are ordered before rules that guess the inflectional paradigm from the spelling of the root, and rules with more specific conditions are ordered before rules with less specific conditions so that the latter can assume that the former will already have been tested and rejected. The rules within a block of suffix rules will typically try for interpretations in roughly the following order: The last rule in this sequence is a default guessing rule that depends on a flag that tells it whether it is running with a core lexicon that is believed to contain most nonobvious verbs. If so, then only the noun part-of-speech is assigned, but with a smaller core lexicon, the guessing rules would also assign a less likely interpretation as a verb, in order to provide a way for unknown verbs to be parsed correctly in sentences.",
293
- "cite_spans": [],
294
- "ref_spans": [],
295
- "eq_spans": [],
296
- "section": "Special form tests",
297
- "sec_num": "2.2"
298
- },
299
- {
300
- "text": "Prefix rules are similar in structure to suffix rules, except that the pattern is matched at the beginning of the word, and the rule blocks are indexed by the initial letter of the word. Lexical compound rules have a slightly different format and are called by a specialized interpreter that looks for places to divide a word into two pieces of sufficient size. The points of potential decomposition are searched from right to left, and the first such point that has an interpretation is taken, with the following exception: The morph compound analyzer checks for special cases where, for example, the first word is plural and ends in an s, but there is an alternative segmentation in which the singular of the first word is followed by a word starting with the s. In such cases, the decomposition using the singular first word is preferred over the one using the plural. For example, the word minesweeper will be analyzed as mine+sweeper rather than mines+weeper. This preference heuristic is specific to English and might be different for other languages.",
301
- "cite_spans": [],
302
- "ref_spans": [],
303
- "eq_spans": [],
304
- "section": "Special form tests",
305
- "sec_num": "2.2"
306
- },
307
- {
308
- "text": "When attempting to apply a rule to a word, the morphological analyzer can be applied recursively to analyze the hypothesized root. A simple caching technique is used to control the potential for combinatoric explosion and to block looping. This is sufficiently effective that the time required for morphological analysis is a negligible part of the time required for processing large amounts of natural language text. Protection against looping is especially important for a kind of morphological rule that derives one word from another without either of them being a root of the other in the usual sense (e.g., deriving communist from communism or external from internal). Operating in a loop-safe environment allows rules like these to identify the relationship between a new word and a known word in either direction, whichever of the two forms is encountered first.",
309
- "cite_spans": [],
310
- "ref_spans": [],
311
- "eq_spans": [],
312
- "section": "Recursive application of rules",
313
- "sec_num": "2.4"
314
- },
315
- {
316
- "text": "Since analyzing a word is done once per unknown word type and consumes a negligible fraction of the overall text-processing time, speed of operation is not considered a factor for evaluation. The interesting dimension of evaluation deals with the coverage of the rules and the kinds of errors that are made. This was tested by applying the system to two word lists randomly selected from the Brown corpus and provided to me by Philip Resnik, using some sampling tools that he developed. The first of these (the token sample) consists of 100 word tokens selected randomly, without eliminating duplicates, and the second (the type sample) consists of 100 distinct word types selected randomly from the vocabulary of the Brown corpus. Prior to a single test run on each of these samples, neither the lexicon nor the morphological rule system had any exposure to the Brown corpus, nor had either of these word lists been looked at by the experimenter. Consequently, the results are a fair evaluation of the expected performance of this system on an unknown domain.",
317
- "cite_spans": [],
318
- "ref_spans": [],
319
- "eq_spans": [],
320
- "section": "Evaluation",
321
- "sec_num": "3"
322
- },
323
- {
324
- "text": "Since different syntactic category errors have different consequences for parsing text, it is useful to grade the syntactic category assignments of the analyzer on an A-B-C-D-F scale according to the severity of any mistakes. Grades are assigned to a lexical entry as follows:",
325
- "cite_spans": [],
326
- "ref_spans": [],
327
- "eq_spans": [],
328
- "section": "Grading rule performance",
329
- "sec_num": "3.1"
330
- },
331
- {
332
- "text": "A if all appropriate syntactic categories are assigned and no incorrect categories are assigned B if all categories are correct, allowing for categorizing an adjective or a name as a noun or a noun as a name C if an entry has at least one correct category and is correct except for missing a noun category or having a single extra category D if there is more than one extra category or if there is a missing category other than one of the above cases, provided that there is at least one correct category F if there are no correct categories Both A and B grades are considered acceptable assignments for the sake of evaluation, since category B errors would allow a reasonable parse to be found. This is because the grammar used for parsing sentences and phrases allows a noun to be used as an adjective modifier and a proper noun to be used in place of a noun. One parser/grammar that uses this lexicon also allows any other categoo; to be used as a noun, at the expense of a penalty, so that a C grade will still enable a parse, although with a penalty and a substantial likelihood that other false parses might score better. Similarly, a D grade increases the likelihood that a false parse might score better.",
333
- "cite_spans": [],
334
- "ref_spans": [],
335
- "eq_spans": [],
336
- "section": "Grading rule performance",
337
- "sec_num": "3.1"
338
- },
339
- {
340
- "text": "Separately, we measure whether count/mass distinctions are made correctly (for nouns only), and whether roots of derived and inflected forms are identified correctly.",
341
- "cite_spans": [],
342
- "ref_spans": [],
343
- "eq_spans": [],
344
- "section": "Grading rule performance",
345
- "sec_num": "3.1"
346
- },
347
- {
348
- "text": "We are interested in the count/mass distinction because, like the common/proper noun distinction, it affects the grammaticality and likelihood of a noun phrase interpretation for a singular noun in absence of an explicit determiner.",
349
- "cite_spans": [],
350
- "ref_spans": [],
351
- "eq_spans": [],
352
- "section": "Grading rule performance",
353
- "sec_num": "3.1"
354
- },
355
- {
356
- "text": "The morphological analyzer has been applied to the words from the two sample word lists that were not already in its core lexicon. There were 17 such words from the token sample and 72 such words from the type sample. Of the 17 unknown tokensample words, 100% were graded B or better (88% A and 12% B); 85% of the roots were identified correctly (all but one); 85% of the count noun senses were found (all but one); and 100% of the mass noun senses were found. Token-sample performance is not a very challenging test for a morphological analyzer because it is biased toward a relatively small number of frequently occurring word types. Token-sample performance is used to assess the per-token error rate that one would expect in analyzing large amounts of running text. In contrast, type-sample performance gives a measure of the expected performance on new words the analyzer is likely to encounter. For the 72 words in the type sample that are not covered by the lexicon, Tables 1-3 show the syntactic category performance of the analyzer and its abilities to make count/mass distinctions and identify roots.",
357
- "cite_spans": [],
358
- "ref_spans": [],
359
- "eq_spans": [],
360
- "section": "Sampling rule performance",
361
- "sec_num": "3.2"
362
- },
363
- {
364
- "text": "Notes on incorrect or debatable analyses:",
365
- "cite_spans": [],
366
- "ref_spans": [],
367
- "eq_spans": [],
368
- "section": "Sampling rule performance",
369
- "sec_num": "3.2"
370
- },
371
- {
372
- "text": "1. One N (noun) for a probable name (Tonio), counted as B. 2. Two NPR(proper name) for abbreviations;",
373
- "cite_spans": [],
374
- "ref_spans": [],
375
- "eq_spans": [],
376
- "section": "Sampling rule performance",
377
- "sec_num": "3.2"
378
- },
379
- {
380
- "text": "(A. V. may be ADJ, W.B. is correct), counted as one B and one A.",
381
- "cite_spans": [],
382
- "ref_spans": [],
383
- "eq_spans": [],
384
- "section": "Sampling rule performance",
385
- "sec_num": "3.2"
386
- },
387
- {
388
- "text": "3. One wrong root when suffix ism was identified as root of hooliganism in a hypothesized compound hooligan+ism (arguably justifiable as a kind of ism, which is known in the lexicon, but counted as an error anyway). Reanalyzing this word after hooligan is a known word gets the correct interpretation. 4. One debatable root in the hyphenated phrase reference-points whose root was listed as points rather than reference-point. This is due to a bug that caused the hyphenated word rules to incorrectly identify this as a verb, rather than a noun (counted as F for syntax). 5. One extra root for embouchure from embouche (but a correct form of the French root?). 6. One missing category N for bobbles, which was given category V but not N because the core lexicon incorrectly listed bobble only as a verb (counted as C for syntax). This is corrected by adding the missing category to the lexical entry for bobble.",
389
- "cite_spans": [],
390
- "ref_spans": [],
391
- "eq_spans": [],
392
- "section": "Sampling rule performance",
393
- "sec_num": "3.2"
394
- },
395
- {
396
- "text": "We have described an approach to robust lexical coverage for unrestricted text applications that makes use of an aggressive set of morphological rules to supplement a core lexicon of approximately 39,000 words to give lexical coverage that exceeds that of a much larger lexicon. This morphological analyzer is integrated with an extensive lexicon, an ontology, and a syntactic analysis system, which it both consults and augments. It uses ordered preferential rules that attempt to choose a small number of correct analyses of a word and are designed to deal with various states of lack of knowledge. When applied to 72 unknown words from a random sample of 100 distinct word types from the Brown corpus, its syntactic category assignments received a grade of B or better (using a grading system explained herein) for 97% of the words, and it correctly identified 95% of the root words. This performance demonstrates that one can obtain robust lexical coverage for natural language processing applications in unrestricted domains, using a relatively small core lexicon and an aggressive collection of morphological rules.",
397
- "cite_spans": [],
398
- "ref_spans": [],
399
- "eq_spans": [],
400
- "section": "Conclusions",
401
- "sec_num": "4"
402
- }
403
- ],
404
- "back_matter": [],
405
- "bib_entries": {
406
- "BIBREF0": {
407
- "ref_id": "b0",
408
- "title": "Natural language technology in precision content retrieval",
409
- "authors": [
410
- {
411
- "first": "Jacek",
412
- "middle": [],
413
- "last": "Ambroziak",
414
- "suffix": ""
415
- },
416
- {
417
- "first": "A",
418
- "middle": [],
419
- "last": "William",
420
- "suffix": ""
421
- },
422
- {
423
- "first": "",
424
- "middle": [],
425
- "last": "Woods",
426
- "suffix": ""
427
- }
428
- ],
429
- "year": 1998,
430
- "venue": "International Conference on Natural Language Processing and Industrial Applications",
431
- "volume": "",
432
- "issue": "",
433
- "pages": "",
434
- "other_ids": {},
435
- "num": null,
436
- "urls": [],
437
- "raw_text": "Jacek Ambroziak and William A. Woods. 1998. Natural language technology in precision content retrieval. In International Conference on Natural Language Processing and Industrial Applications, Moncton, New Brunswick, Canada, August. www.sun.com/research/techrep/1998/abstract- 69.html.",
438
- "links": null
439
- },
440
- "BIBREF1": {
441
- "ref_id": "b1",
442
- "title": "Two-level model for morphological analysis",
443
- "authors": [
444
- {
445
- "first": "Kimmo",
446
- "middle": [],
447
- "last": "Koskenniemi",
448
- "suffix": ""
449
- }
450
- ],
451
- "year": 1983,
452
- "venue": "Proceedings of the International Joint Conference on Artificial Intelligence",
453
- "volume": "",
454
- "issue": "",
455
- "pages": "683--685",
456
- "other_ids": {},
457
- "num": null,
458
- "urls": [],
459
- "raw_text": "Kimmo Koskenniemi. 1983. Two-level model for morphological analysis. In Proceedings of the In- ternational Joint Conference on Artificial Intelli- gence, pages 683-685, Los Angelos, CA. Morgan Kauffmann.",
460
- "links": null
461
- },
462
- "BIBREF2": {
463
- "ref_id": "b2",
464
- "title": "Computational Analysis of Present-Day American English",
465
- "authors": [
466
- {
467
- "first": "H",
468
- "middle": [],
469
- "last": "Kucera",
470
- "suffix": ""
471
- },
472
- {
473
- "first": "W",
474
- "middle": [],
475
- "last": "Francis",
476
- "suffix": ""
477
- }
478
- ],
479
- "year": 1967,
480
- "venue": "",
481
- "volume": "",
482
- "issue": "",
483
- "pages": "",
484
- "other_ids": {},
485
- "num": null,
486
- "urls": [],
487
- "raw_text": "H. Kucera and W. Francis. 1967. Computa- tional Analysis of Present-Day American English. Brown University Press.",
488
- "links": null
489
- },
490
- "BIBREF3": {
491
- "ref_id": "b3",
492
- "title": "Morphology and Computation",
493
- "authors": [
494
- {
495
- "first": "Richard",
496
- "middle": [],
497
- "last": "Sproat",
498
- "suffix": ""
499
- }
500
- ],
501
- "year": 1992,
502
- "venue": "",
503
- "volume": "",
504
- "issue": "",
505
- "pages": "",
506
- "other_ids": {},
507
- "num": null,
508
- "urls": [],
509
- "raw_text": "Richard Sproat. 1992. Morphology and Computa- tion. MIT Press, Cambridge, MA.",
510
- "links": null
511
- },
512
- "BIBREF4": {
513
- "ref_id": "b4",
514
- "title": "The lunar sciences natural language information system: Final report",
515
- "authors": [
516
- {
517
- "first": "William",
518
- "middle": [
519
- "A"
520
- ],
521
- "last": "Woods",
522
- "suffix": ""
523
- },
524
- {
525
- "first": "Ronald",
526
- "middle": [
527
- "M"
528
- ],
529
- "last": "Kaplan",
530
- "suffix": ""
531
- },
532
- {
533
- "first": "Bonnie",
534
- "middle": [
535
- "L"
536
- ],
537
- "last": "Nash-Webber",
538
- "suffix": ""
539
- }
540
- ],
541
- "year": 1972,
542
- "venue": "",
543
- "volume": "",
544
- "issue": "",
545
- "pages": "",
546
- "other_ids": {},
547
- "num": null,
548
- "urls": [],
549
- "raw_text": "William A. Woods, Ronald M. Kaplan, and Bon- nie L. Nash-Webber. 1972. The lunar sciences natural language information system: Final re- port. Technical Report BBN Report No. 2378, Bolt Beranek and Newman Inc, Cambridge, MA, June. (available from NTIS as N72-28984).",
550
- "links": null
551
- },
552
- "BIBREF5": {
553
- "ref_id": "b5",
554
- "title": "Linguistic knowledge can improve information retrieval",
555
- "authors": [
556
- {
557
- "first": "William",
558
- "middle": [
559
- "A"
560
- ],
561
- "last": "Woods",
562
- "suffix": ""
563
- },
564
- {
565
- "first": "Lawrence",
566
- "middle": [
567
- "A"
568
- ],
569
- "last": "Bookman",
570
- "suffix": ""
571
- },
572
- {
573
- "first": "Ann",
574
- "middle": [
575
- "C"
576
- ],
577
- "last": "Houston",
578
- "suffix": ""
579
- },
580
- {
581
- "first": "Robert",
582
- "middle": [
583
- "J"
584
- ],
585
- "last": "Kuhns",
586
- "suffix": ""
587
- },
588
- {
589
- "first": "Paul",
590
- "middle": [
591
- "A"
592
- ],
593
- "last": "Martin",
594
- "suffix": ""
595
- },
596
- {
597
- "first": "Stephen",
598
- "middle": [],
599
- "last": "Green",
600
- "suffix": ""
601
- }
602
- ],
603
- "year": 2000,
604
- "venue": "",
605
- "volume": "",
606
- "issue": "",
607
- "pages": "",
608
- "other_ids": {},
609
- "num": null,
610
- "urls": [],
611
- "raw_text": "William A. Woods, Lawrence A. Bookman, Ann C. Houston, Robert J. Kuhns, Paul A. Martin, and Stephen Green. 2000. Linguistic knowledge can improve information retrieval. In (these proceed- ings).",
612
- "links": null
613
- },
614
- "BIBREF6": {
615
- "ref_id": "b6",
616
- "title": "Conceptual indexing: A better way to organize knowledge",
617
- "authors": [
618
- {
619
- "first": "William",
620
- "middle": [
621
- "A"
622
- ],
623
- "last": "Woods",
624
- "suffix": ""
625
- }
626
- ],
627
- "year": 1997,
628
- "venue": "",
629
- "volume": "",
630
- "issue": "",
631
- "pages": "",
632
- "other_ids": {},
633
- "num": null,
634
- "urls": [],
635
- "raw_text": "William A. Woods. 1997. Conceptual indexing: A better way to organize knowledge. Technical Report SMLI TR-97-61, Sun Microsystems Laboratories, Mountain View, CA, April. www.sun.com/research/techrep/1997/abstract- 61.html.",
636
- "links": null
637
- }
638
- },
639
- "ref_entries": {
640
- "TABREF2": {
641
- "html": null,
642
- "text": "Syntactic cate ;ory performance of the analyzer.",
643
- "num": null,
644
- "type_str": "table",
645
- "content": "<table><tr><td>Category Grade</td><td>A</td><td>B</td><td>C</td><td>D</td><td colspan=\"2\">F B or better</td></tr><tr><td>Number</td><td>62</td><td>8</td><td>1</td><td>0</td><td>1</td><td>70</td></tr><tr><td>Percent</td><td colspan=\"5\">86% 11% 1.5% 0% 1.5%</td><td>97%</td></tr></table>"
646
- },
647
- "TABREF3": {
648
- "html": null,
649
- "text": "Count/mass distinction performance of the analyzer. Count/mass Good count Extra count Good mass Missing mass",
650
- "num": null,
651
- "type_str": "table",
652
- "content": "<table><tr><td>Number</td><td>39</td><td>1</td><td>14</td><td>1</td></tr><tr><td>Percent</td><td>100%</td><td>2.6%</td><td>93%</td><td>6.7%</td></tr></table>"
653
- },
654
- "TABREF4": {
655
- "html": null,
656
- "text": "Root identification performance of the analyzer.",
657
- "num": null,
658
- "type_str": "table",
659
- "content": "<table><tr><td colspan=\"6\">Detect root Good Wrong Debatable Missing Extra</td></tr><tr><td>Number</td><td>57</td><td>1</td><td>1</td><td>0</td><td>1</td></tr><tr><td>Percent</td><td>95%</td><td>1.7%</td><td>1.7%</td><td>0</td><td>1.7%</td></tr></table>"
660
- }
661
- }
662
- }
663
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1031.json DELETED
@@ -1,1184 +0,0 @@
1
- {
2
- "paper_id": "A00-1031",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:11.304458Z"
6
- },
7
- "title": "TnT --A Statistical Part-of-Speech Tagger",
8
- "authors": [
9
- {
10
- "first": "Thorsten",
11
- "middle": [],
12
- "last": "Brants",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Saarland University Computational Linguistics",
17
- "location": {
18
- "postCode": "D-66041",
19
- "settlement": "Saarbriicken",
20
- "country": "Germany"
21
- }
22
- },
23
- "email": "thorsten@coli@de"
24
- }
25
- ],
26
- "year": "",
27
- "venue": null,
28
- "identifiers": {},
29
- "abstract": "Trigrams'n'Tags (TnT) is an efficient statistical part-of-speech tagger. Contrary to claims found elsewhere in the literature, we argue that a tagger based on Markov models performs at least as well as other current approaches, including the Maximum Entropy framework. A recent comparison has even shown that TnT performs significantly better for the tested corpora. We describe the basic model of TnT, the techniques used for smoothing and for handling unknown words. Furthermore, we present evaluations on two corpora.",
30
- "pdf_parse": {
31
- "paper_id": "A00-1031",
32
- "_pdf_hash": "",
33
- "abstract": [
34
- {
35
- "text": "Trigrams'n'Tags (TnT) is an efficient statistical part-of-speech tagger. Contrary to claims found elsewhere in the literature, we argue that a tagger based on Markov models performs at least as well as other current approaches, including the Maximum Entropy framework. A recent comparison has even shown that TnT performs significantly better for the tested corpora. We describe the basic model of TnT, the techniques used for smoothing and for handling unknown words. Furthermore, we present evaluations on two corpora.",
36
- "cite_spans": [],
37
- "ref_spans": [],
38
- "eq_spans": [],
39
- "section": "Abstract",
40
- "sec_num": null
41
- }
42
- ],
43
- "body_text": [
44
- {
45
- "text": "A large number of current language processing systems use a part-of-speech tagger for pre-processing. The tagger assigns a (unique or ambiguous) part-ofspeech tag to each token in the input and passes its output to the next processing level, usually a parser. Furthermore, there is a large interest in part-ofspeech tagging for corpus annotation projects, who create valuable linguistic resources by a combination of automatic processing and human correction.",
46
- "cite_spans": [],
47
- "ref_spans": [],
48
- "eq_spans": [],
49
- "section": "Introduction",
50
- "sec_num": "1"
51
- },
52
- {
53
- "text": "For both applications, a tagger with the highest possible accuracy is required. The debate about which paradigm solves the part-of-speech tagging problem best is not finished. Recent comparisons of approaches that can be trained on corpora (van Halteren et al., 1998; Volk and Schneider, 1998) have shown that in most cases statistical aproaches (Cutting et al., 1992; Schmid, 1995; Ratnaparkhi, 1996) yield better results than finite-state, rule-based, or memory-based taggers (Brill, 1993; Daelemans et al., 1996) . They are only surpassed by combinations of different systems, forming a \"voting tagger\".",
54
- "cite_spans": [
55
- {
56
- "start": 240,
57
- "end": 267,
58
- "text": "(van Halteren et al., 1998;",
59
- "ref_id": "BIBREF11"
60
- },
61
- {
62
- "start": 268,
63
- "end": 293,
64
- "text": "Volk and Schneider, 1998)",
65
- "ref_id": "BIBREF12"
66
- },
67
- {
68
- "start": 346,
69
- "end": 368,
70
- "text": "(Cutting et al., 1992;",
71
- "ref_id": "BIBREF3"
72
- },
73
- {
74
- "start": 369,
75
- "end": 382,
76
- "text": "Schmid, 1995;",
77
- "ref_id": "BIBREF9"
78
- },
79
- {
80
- "start": 383,
81
- "end": 401,
82
- "text": "Ratnaparkhi, 1996)",
83
- "ref_id": "BIBREF7"
84
- },
85
- {
86
- "start": 478,
87
- "end": 491,
88
- "text": "(Brill, 1993;",
89
- "ref_id": "BIBREF1"
90
- },
91
- {
92
- "start": 492,
93
- "end": 515,
94
- "text": "Daelemans et al., 1996)",
95
- "ref_id": "BIBREF4"
96
- }
97
- ],
98
- "ref_spans": [],
99
- "eq_spans": [],
100
- "section": "Introduction",
101
- "sec_num": "1"
102
- },
103
- {
104
- "text": "Among the statistical approaches, the Maximum Entropy framework has a very strong position. Nevertheless, a recent independent comparison of 7 taggets (Zavrel and Daelemans, 1999) has shown that another approach even works better: Markov models combined with a good smoothing technique and with handling of unknown words. This tagger, TnT, not only yielded the highest accuracy, it also was the fastest both in training and tagging.",
105
- "cite_spans": [
106
- {
107
- "start": 151,
108
- "end": 179,
109
- "text": "(Zavrel and Daelemans, 1999)",
110
- "ref_id": "BIBREF13"
111
- }
112
- ],
113
- "ref_spans": [],
114
- "eq_spans": [],
115
- "section": "Introduction",
116
- "sec_num": "1"
117
- },
118
- {
119
- "text": "The tagger comparison was organized as a \"blackbox test\": set the same task to every tagger and compare the outcomes. This paper describes the models and techniques used by TnT together with the implementation.",
120
- "cite_spans": [],
121
- "ref_spans": [],
122
- "eq_spans": [],
123
- "section": "Introduction",
124
- "sec_num": "1"
125
- },
126
- {
127
- "text": "The reader will be surprised how simple the underlying model is. The result of the tagger comparison seems to support the maxime \"the simplest is the best\". However, in this paper we clarify a number of details that are omitted in major previous publications concerning tagging with Markov models. As two examples, (Rabiner, 1989) and (Charniak et al., 1993) give good overviews of the techniques and equations used for Markov models and part-ofspeech tagging, but they are not very explicit in the details that are needed for their application. We argue that it is not only the choice of the general model that determines the result of the tagger but also the various \"small\" decisions on alternatives.",
128
- "cite_spans": [
129
- {
130
- "start": 315,
131
- "end": 330,
132
- "text": "(Rabiner, 1989)",
133
- "ref_id": "BIBREF6"
134
- },
135
- {
136
- "start": 335,
137
- "end": 358,
138
- "text": "(Charniak et al., 1993)",
139
- "ref_id": "BIBREF2"
140
- }
141
- ],
142
- "ref_spans": [],
143
- "eq_spans": [],
144
- "section": "Introduction",
145
- "sec_num": "1"
146
- },
147
- {
148
- "text": "The aim of this paper is to give a detailed account of the techniques used in TnT. Additionally, we present results of the tagger on the NEGRA corpus (Brants et al., 1999) and the Penn Treebank (Marcus et al., 1993) . The Penn Treebank results reported here for the Markov model approach are at least equivalent to those reported for the Maximum Entropy approach in (Ratnaparkhi, 1996) . For a comparison to other taggers, the reader is referred to (Zavrel and Daelemans, 1999) .",
149
- "cite_spans": [
150
- {
151
- "start": 150,
152
- "end": 171,
153
- "text": "(Brants et al., 1999)",
154
- "ref_id": "BIBREF0"
155
- },
156
- {
157
- "start": 194,
158
- "end": 215,
159
- "text": "(Marcus et al., 1993)",
160
- "ref_id": "BIBREF5"
161
- },
162
- {
163
- "start": 366,
164
- "end": 385,
165
- "text": "(Ratnaparkhi, 1996)",
166
- "ref_id": "BIBREF7"
167
- },
168
- {
169
- "start": 449,
170
- "end": 477,
171
- "text": "(Zavrel and Daelemans, 1999)",
172
- "ref_id": "BIBREF13"
173
- }
174
- ],
175
- "ref_spans": [],
176
- "eq_spans": [],
177
- "section": "Introduction",
178
- "sec_num": "1"
179
- },
180
- {
181
- "text": "TnT uses second order Markov models for part-ofspeech tagging. The states of the model represent tags, outputs represent the words. Transition probabilities depend on the states, thus pairs of tags. Output probabilities only depend on the most recent category. To be explicit, we calculate argmax P(tilti-1, ti-2)P (wilti P(tr+l ItT) ti...iT (i) for a given sequence of words w I ... W T of length T. tl... tT are elements of the tagset, the additional tags t-l, to, and tT+l are beginning-of-sequence and end-of-sequence markers. Using these additional tags, even if they stem from rudimentary processing of punctuation marks, slightly improves tagging results. This is different from formulas presented in other publications, which just stop with a \"loose end\" at the last word. If sentence boundaries are not marked in the input, TnT adds these tags if it encounters one of [.!?;] as a token.",
182
- "cite_spans": [
183
- {
184
- "start": 315,
185
- "end": 333,
186
- "text": "(wilti P(tr+l ItT)",
187
- "ref_id": null
188
- },
189
- {
190
- "start": 342,
191
- "end": 345,
192
- "text": "(i)",
193
- "ref_id": null
194
- }
195
- ],
196
- "ref_spans": [],
197
- "eq_spans": [],
198
- "section": "Architecture 2.1 The Underlying Model",
199
- "sec_num": "2"
200
- },
201
- {
202
- "text": "Transition and output probabilities are estimated from a tagged corpus. As a first step, we use the maximum likelihood probabilities /5 which are derived from the relative frequencies:",
203
- "cite_spans": [],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "Architecture 2.1 The Underlying Model",
207
- "sec_num": "2"
208
- },
209
- {
210
- "text": "Unigrams: /5(t3) = f(t3) (2) N f(t2, t3) (3) Bigrams: P(t31t~)= f(t2) f(ta, t2, t3) Trigrams: /5(t3ltx,t~) -f(tl,t2) (4) Lexical: /5(w3 It3) -/(w3, t3) (5) f(t3)",
211
- "cite_spans": [],
212
- "ref_spans": [],
213
- "eq_spans": [],
214
- "section": "Architecture 2.1 The Underlying Model",
215
- "sec_num": "2"
216
- },
217
- {
218
- "text": "for all tl, t2, t3 in the tagset and w3 in the lexicon. N is the total number of tokens in the training corpus. We define a maximum likelihood probability to be zero if the corresponding nominators and denominators are zero. As a second step, contextual frequencies are smoothed and lexical frequences are completed by handling words that are not in the lexicon (see below).",
219
- "cite_spans": [],
220
- "ref_spans": [],
221
- "eq_spans": [],
222
- "section": "Architecture 2.1 The Underlying Model",
223
- "sec_num": "2"
224
- },
225
- {
226
- "text": "Trigram probabilities generated from a corpus usually cannot directly be used because of the sparsedata problem. This means that there are not enough instances for each trigram to reliably estimate the probability. Furthermore, setting a probability to zero because the corresponding trigram never occured in the corpus has an undesired effect. It causes the probability of a complete sequence to be set to zero if its use is necessary for a new text sequence, thus makes it impossible to rank different sequences containing a zero probability.",
227
- "cite_spans": [],
228
- "ref_spans": [],
229
- "eq_spans": [],
230
- "section": "Smoothing",
231
- "sec_num": "2.2"
232
- },
233
- {
234
- "text": "The smoothing paradigm that delivers the best results in TnT is linear interpolation of unigrams, bigrams, and trigrams. Therefore, we estimate a trigram probability as follows:",
235
- "cite_spans": [],
236
- "ref_spans": [],
237
- "eq_spans": [],
238
- "section": "Smoothing",
239
- "sec_num": "2.2"
240
- },
241
- {
242
- "text": "P(t3ltl, t2) = AlP(t3) + Ag_/5(t31t2) + A3/5(t3[t1, t2) (6)",
243
- "cite_spans": [],
244
- "ref_spans": [],
245
- "eq_spans": [],
246
- "section": "Smoothing",
247
- "sec_num": "2.2"
248
- },
249
- {
250
- "text": "/5 are maximum likelihood estimates of the probabilities, and A1 + A2 + A3 = 1, so P again represent probability distributions.",
251
- "cite_spans": [],
252
- "ref_spans": [],
253
- "eq_spans": [],
254
- "section": "Smoothing",
255
- "sec_num": "2.2"
256
- },
257
- {
258
- "text": "We use the context-independent variant of linear interpolation, i.e., the values of the As do not depend on the particular trigram. Contrary to intuition, this yields better results than the context-dependent variant. Due to sparse-data problems, one cannot estimate a different set of As for each trigram. Therefore, it is common practice to group trigrams by frequency and estimate tied sets of As. However, we are not aware of any publication that has investigated frequency groupings for linear interpolation in part-of-speech tagging. All groupings that we have tested yielded at most equivalent results to contextindependent linear interpolation. Some groupings even yielded worse results. The tested groupings included a) one set of As for each frequency value and b) two classes (low and high frequency) on the two ends of the scale, as well as several groupings in between and several settings for partitioning the classes.",
259
- "cite_spans": [],
260
- "ref_spans": [],
261
- "eq_spans": [],
262
- "section": "Smoothing",
263
- "sec_num": "2.2"
264
- },
265
- {
266
- "text": "The values of Ax, A2, and A3 are estimated by deleted interpolation. This technique successively removes each trigram from the training corpus and estimates best values for the As from all other ngrams in the corpus. Given the frequency counts for uni-, bi-, and trigrams, the weights can be very efficiently determined with a processing time linear in the number of different trigrams. The algorithm is given in figure 1. Note that subtracting 1 means taking unseen data into account. Without this subtraction the model would overfit the training data and would generally yield worse results.",
267
- "cite_spans": [],
268
- "ref_spans": [],
269
- "eq_spans": [],
270
- "section": "Smoothing",
271
- "sec_num": "2.2"
272
- },
273
- {
274
- "text": "Currently, the method of handling unknown words that seems to work best for inflected languages is a suffix analysis as proposed in (Samuelsson, 1993) . Tag probabilities are set according to the word's ending. The suffix is a strong predictor for word classes, e.g., words in the Wall Street Journal part of the Penn Treebank ending in able are adjectives (JJ) in 98% of the cases (e.g. fashionable, variable), the rest of 2% are nouns (e.g. cable, variable).",
275
- "cite_spans": [
276
- {
277
- "start": 132,
278
- "end": 150,
279
- "text": "(Samuelsson, 1993)",
280
- "ref_id": "BIBREF8"
281
- }
282
- ],
283
- "ref_spans": [],
284
- "eq_spans": [],
285
- "section": "Handling of Unknown Words",
286
- "sec_num": "2.3"
287
- },
288
- {
289
- "text": "The probability distribution for a particular suffix is generated from all words in the training set that share the same suffix of some predefined maximum length. The term suffix as used here means \"final sequence of characters of a word\" which is not necessarily a linguistically meaningful suffix.",
290
- "cite_spans": [],
291
- "ref_spans": [],
292
- "eq_spans": [],
293
- "section": "Handling of Unknown Words",
294
- "sec_num": "2.3"
295
- },
296
- {
297
- "text": "Probabilities are smoothed by successive abstraction. This calculates the probability of a tag t given the last m letters li of an n letter word: P (tlln-r,+l,... ",
298
- "cite_spans": [
299
- {
300
- "start": 148,
301
- "end": 162,
302
- "text": "(tlln-r,+l,...",
303
- "ref_id": null
304
- }
305
- ],
306
- "ref_spans": [],
307
- "eq_spans": [],
308
- "section": "Handling of Unknown Words",
309
- "sec_num": "2.3"
310
- },
311
- {
312
- "text": "The sequence of increasingly more general contexts omits more and more characters of the suffix, such that P(tlln_m+2,...,ln), P(t[ln-m+3,...,l~), ..., P(t) are used for smoothing. The recursiou formula is P(tll,_i+l, . . . ln)",
313
- "cite_spans": [
314
- {
315
- "start": 127,
316
- "end": 156,
317
- "text": "P(t[ln-m+3,...,l~), ..., P(t)",
318
- "ref_id": null
319
- }
320
- ],
321
- "ref_spans": [],
322
- "eq_spans": [],
323
- "section": "ln).",
324
- "sec_num": null
325
- },
326
- {
327
- "text": "= P(tlln-i+l,... In) + OiP(tll~-,..., ln) (7) 1 +0~ set )%1 ----)%2 = )%3 = foreach trigram tl,t2,t3 with f(tl,t2,t3) > 0",
328
- "cite_spans": [],
329
- "ref_spans": [],
330
- "eq_spans": [],
331
- "section": "ln).",
332
- "sec_num": null
333
- },
334
- {
335
- "text": "depending on the maximum of the following three values: ",
336
- "cite_spans": [],
337
- "ref_spans": [],
338
- "eq_spans": [],
339
- "section": "ln).",
340
- "sec_num": null
341
- },
342
- {
343
- "text": "f(tl ,t2,ts)--1 case f(h,t2)-I \" increment )%3 by f(tl,t2,t3) f(t2,t3)-I case f(t2)-I \" increment )%2 by f(tl,t2,ts) f(t3)--i case N-1 \" increment )%1 by f(tl,t2,t3) end end normalize )%1, )%2, )%3",
344
- "cite_spans": [],
345
- "ref_spans": [],
346
- "eq_spans": [],
347
- "section": "ln).",
348
- "sec_num": null
349
- },
350
- {
351
- "text": "EQUATION",
352
- "cite_spans": [],
353
- "ref_spans": [],
354
- "eq_spans": [
355
- {
356
- "start": 0,
357
- "end": 8,
358
- "text": "EQUATION",
359
- "ref_id": "EQREF",
360
- "raw_str": "P(t) =/5(t).",
361
- "eq_num": "(8)"
362
- }
363
- ],
364
- "section": "ln).",
365
- "sec_num": null
366
- },
367
- {
368
- "text": "The maximum likelihood estimate for a suffix of length i is derived from corpus frequencies by",
369
- "cite_spans": [],
370
- "ref_spans": [],
371
- "eq_spans": [],
372
- "section": "ln).",
373
- "sec_num": null
374
- },
375
- {
376
- "text": "EQUATION",
377
- "cite_spans": [],
378
- "ref_spans": [],
379
- "eq_spans": [
380
- {
381
- "start": 0,
382
- "end": 8,
383
- "text": "EQUATION",
384
- "ref_id": "EQREF",
385
- "raw_str": "P(ti/~-i+l, .. l~) = f(t, 1~-~+1,... l~) \u2022",
386
- "eq_num": "(9)"
387
- }
388
- ],
389
- "section": "ln).",
390
- "sec_num": null
391
- },
392
- {
393
- "text": "For the Markov model, we need the inverse conditional probabilities P(/,-i+l,... lnlt) which are obtained by Bayesian inversion. A theoretical motivated argumentation uses the standard deviation of the maximum likelihood probabilities for the weights 0i (Samuelsson, 1993) \u2022 This leaves room for interpretation. 1) One has to identify a good value for m, the longest suffix used\u2022 The approach taken for TnT is the following: m depends on the word in question. We use the longest suffix that we can find in the training set (i.e., for which the frequency is greater than or equal to 1), but at most 10 characters. This is an empirically determined choice\u2022 2) We use a context-independent approach for 0i, as we did for the contextual weights )%i. It turned out to be a good choice to set all 0i to the standard deviation of the unconditioned maximum likelihood probabilities of the tags in the training corpus, i.e., we set 3) We use different estimates for uppercase and lowercase words, i.e., we maintain two different suffix tries depending on the capitalization of the word. This information improves the tagging results\u2022 4) Another freedom concerns the choice of the words in the lexicon that should be used for suffix handling. Should we use all words, or are some of them better suited than others? Accepting that unknown words are most probably infrequent, one can argue that using suffixes of infrequent words in the lexicon is a better approximation for unknown words than using suffixes of frequent words. Therefore, we restrict the procedure of suffix handling to words with a frequency smaller than or equal to some threshold value. Empirically, 10 turned out to be a good choice for this threshold.",
394
- "cite_spans": [
395
- {
396
- "start": 254,
397
- "end": 272,
398
- "text": "(Samuelsson, 1993)",
399
- "ref_id": "BIBREF8"
400
- }
401
- ],
402
- "ref_spans": [],
403
- "eq_spans": [],
404
- "section": "ln).",
405
- "sec_num": null
406
- },
407
- {
408
- "text": "Additional information that turned out to be useful for the disambiguation process for several corpora and tagsets is capitalization information. Tags are usually not informative about capitalization, but probability distributions of tags around capitalized words are different from those not capitalized. The effect is larger for English, which only capitalizes proper names, and smaller for German, which capitalizes all nouns.",
409
- "cite_spans": [],
410
- "ref_spans": [],
411
- "eq_spans": [],
412
- "section": "Capitalization",
413
- "sec_num": "2.4"
414
- },
415
- {
416
- "text": "We use flags ci that are true if wi is a capitalized word and false otherwise\u2022 These flags are added to the contextual probability distributions. Instead of",
417
- "cite_spans": [],
418
- "ref_spans": [],
419
- "eq_spans": [],
420
- "section": "Capitalization",
421
- "sec_num": "2.4"
422
- },
423
- {
424
- "text": "we use P(t3, c3 [tl , cl, t2, c2) ",
425
- "cite_spans": [
426
- {
427
- "start": 16,
428
- "end": 33,
429
- "text": "[tl , cl, t2, c2)",
430
- "ref_id": null
431
- }
432
- ],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "P(tsItl,t2)",
436
- "sec_num": null
437
- },
438
- {
439
- "text": "and equations (3) to (5) are updated accordingly. This is equivalent to doubling the size of the tagset and using different tags depending on capitalization.",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "P(tsItl,t2)",
444
- "sec_num": null
445
- },
446
- {
447
- "text": "The processing time of the Viterbi algorithm (Rabiner, 1989) can be reduced by introducing a beam search. Each state that receives a 5 value smaller than the largest 5 divided by some threshold value is excluded from further processing. While the Viterbi algorithm is guaranteed to find the sequence of states with the highest probability, this is no longer true when beam search is added. Nevertheless, for practical purposes and the right choice of 0, there is virtually no difference between the algorithm with and without a beam. Empirically, a value of 0 = 1000 turned out to approximately double the speed of the tagger without affecting the accuracy.",
448
- "cite_spans": [],
449
- "ref_spans": [],
450
- "eq_spans": [],
451
- "section": "Beam Search",
452
- "sec_num": "2.5"
453
- },
454
- {
455
- "text": "The tagger currently tags between 30~000 and 60,000 tokens per second (including file I/O) on a Pentium 500 running Linux. The speed mainly depends on the percentage of unknown words and on the average ambiguity rate.",
456
- "cite_spans": [],
457
- "ref_spans": [],
458
- "eq_spans": [],
459
- "section": "Beam Search",
460
- "sec_num": "2.5"
461
- },
462
- {
463
- "text": "We evaluate the tagger's performance under several aspects. First of all, we determine the tagging accuracy averaged over ten iterations. The overall accuracy, as well as separate accuracies for known and unknown words are measured. Second, learning curves are presented, that indicate the performance when using training corpora of different sizes, starting with as few as 1,000 tokens and ranging to the size of the entire corpus (minus the test set).",
464
- "cite_spans": [],
465
- "ref_spans": [],
466
- "eq_spans": [],
467
- "section": "Evaluation",
468
- "sec_num": "3"
469
- },
470
- {
471
- "text": "An important characteristic of statistical taggers is that they not only assign tags to words but also probabilities in order to rank different assignments. We distinguish reliable from unreliable assignments by the quotient of the best and second best assignments 1. All assignments for which this quotient is larger than some threshold are regarded as reliable, the others as unreliable. As we will see below, accuracies for reliable assignments are much higher.",
472
- "cite_spans": [],
473
- "ref_spans": [],
474
- "eq_spans": [],
475
- "section": "Evaluation",
476
- "sec_num": "3"
477
- },
478
- {
479
- "text": "The tests are performed on partitions of the corpora that use 90% as training set and 10% as test set, so that the test data is guaranteed to be unseen during training. Each result is obtained by repeating the experiment 10 times with different partitions and averaging the single outcomes.",
480
- "cite_spans": [],
481
- "ref_spans": [],
482
- "eq_spans": [],
483
- "section": "Evaluation",
484
- "sec_num": "3"
485
- },
486
- {
487
- "text": "In all experiments, contiguous test sets are used. The alternative is a round-robin procedure that puts every 10th sentence into the test set. We argue that contiguous test sets yield more realistic results because completely unseen articles are tagged. Using the round-robin procedure, parts of an article are already seen, which significantly reduces the percentage of unknown words. Therefore, we expect even higher results when testing on every 10th sentence instead of a contiguous set of 10%.",
488
- "cite_spans": [],
489
- "ref_spans": [],
490
- "eq_spans": [],
491
- "section": "Evaluation",
492
- "sec_num": "3"
493
- },
494
- {
495
- "text": "In the following, accuracy denotes the number of correctly assigned tags divided by the number of tokens in the corpus processed. The tagger is allowed to assign exactly one tag to each token.",
496
- "cite_spans": [],
497
- "ref_spans": [],
498
- "eq_spans": [],
499
- "section": "Evaluation",
500
- "sec_num": "3"
501
- },
502
- {
503
- "text": "We distinguish the overall accuracy, taking into account all tokens in the test corpus, and separate accuracies for known and unknown tokens. The latter are interesting, since usually unknown tokens are much more difficult to process than known tokens, for which a list of valid tags can be found in the lexicon.",
504
- "cite_spans": [],
505
- "ref_spans": [],
506
- "eq_spans": [],
507
- "section": "Evaluation",
508
- "sec_num": "3"
509
- },
510
- {
511
- "text": "The German NEGRA corpus consists of 20,000 sentences (355,000 tokens) of newspaper texts (Frankfurter Rundschau) that are annotated with parts-ofspeech and predicate-argument structures (Skut et al., 1997) . It was developed at the Saarland University in Saarbrficken 2. Part of it was tagged at the IMS Stuttgart. This evaluation only uses the partof-speech annotation and ignores structural annotations.",
512
- "cite_spans": [
513
- {
514
- "start": 186,
515
- "end": 205,
516
- "text": "(Skut et al., 1997)",
517
- "ref_id": "BIBREF10"
518
- }
519
- ],
520
- "ref_spans": [],
521
- "eq_spans": [],
522
- "section": "Tagging the NEGRA corpus",
523
- "sec_num": "3.1"
524
- },
525
- {
526
- "text": "Tagging accuracies for the NEGRA corpus are shown in table 2. Figure 3 shows the learning curve of the tagger, i.e., the accuracy depending on the amount of training data. Training length is the nmnber of tokens used for training. Each training length was tested ten times, training and test sets were randomly chosen and disjoint, results were averaged. The training length is given on a logarithmic scale.",
527
- "cite_spans": [],
528
- "ref_spans": [
529
- {
530
- "start": 62,
531
- "end": 70,
532
- "text": "Figure 3",
533
- "ref_id": "FIGREF2"
534
- }
535
- ],
536
- "eq_spans": [],
537
- "section": "Tagging the NEGRA corpus",
538
- "sec_num": "3.1"
539
- },
540
- {
541
- "text": "It is remarkable that tagging accuracy for known words is very high even for very small training cotpora. This means that we have a good chance of getting the right tag if a word is seen at least once during training. Average percentages of unknown tokens are shown in the bottom line of each diagram.",
542
- "cite_spans": [],
543
- "ref_spans": [],
544
- "eq_spans": [],
545
- "section": "Tagging the NEGRA corpus",
546
- "sec_num": "3.1"
547
- },
548
- {
549
- "text": "We exploit the fact that the tagger not only determines tags, but also assigns probabilities. If there is an alternative that has a probability \"close to\" that of the best assignment, this alternative can be viewed as almost equally well suited. The notion of \"close to\" is expressed by the distance of probabilities, and this in turn is expressed by the quotient of probabilities. So, the distance of the probabilities of a best tag tbest and an alternative tag tart is expressed by P(tbest)/p(tau), which is some value greater or equal to 1 since the best tag assignment has the highest probability. Figure 4 shows the accuracy when separating assignments with quotients larger and smaller than the threshold (hence reliable and unreliable assignments). As expected, we find that accuracies for 2For availability, please check h~tp ://w~. col i. uni-sb, de/s fb378/negra-corpus reliable assignments are much higher than for unreliable assignments. This distinction is, e.g., useful for annotation projects during the cleaning process, or during pre-processing, so the tagger can emit multiple tags if the best tag is classified as unreliable.",
550
- "cite_spans": [],
551
- "ref_spans": [
552
- {
553
- "start": 602,
554
- "end": 610,
555
- "text": "Figure 4",
556
- "ref_id": "FIGREF3"
557
- }
558
- ],
559
- "eq_spans": [],
560
- "section": "Tagging the NEGRA corpus",
561
- "sec_num": "3.1"
562
- },
563
- {
564
- "text": "We use the Wall Street Journal as contained in the Penn Treebank for our experiments. The annotation consists of four parts: 1) a context-free structure augmented with traces to mark movement and discontinuous constituents, 2) phrasal categories that are annotated as node labels, 3) a small set of grammatical functions that are annotated as extensions to the node labels, and 4) part-of-speech tags (Marcus et al., 1993) . This evaluation only uses the part-ofspeech annotation. The Wall Street Journal part of the Penn Treebank consists of approx. 50,000 sentences (1.2 million tokens).",
565
- "cite_spans": [
566
- {
567
- "start": 401,
568
- "end": 422,
569
- "text": "(Marcus et al., 1993)",
570
- "ref_id": "BIBREF5"
571
- }
572
- ],
573
- "ref_spans": [],
574
- "eq_spans": [],
575
- "section": "Tagging the Penn Treebank",
576
- "sec_num": "3.2"
577
- },
578
- {
579
- "text": "Tagging accuracies for the Penn Treebank are shown in table 5. Figure 6 shows the learning curve of the tagger, i.e., the accuracy depending on the amount of training data. Training length is the number of tokens used for training. Each training length was tested ten times. Training and test sets were disjoint, results are averaged. The training length is given on a logarithmic scale. As for the NEGRA corpus, tagging accuracy is very high for known tokens even with small amounts of training data.",
580
- "cite_spans": [],
581
- "ref_spans": [
582
- {
583
- "start": 63,
584
- "end": 71,
585
- "text": "Figure 6",
586
- "ref_id": null
587
- }
588
- ],
589
- "eq_spans": [],
590
- "section": "Tagging the Penn Treebank",
591
- "sec_num": "3.2"
592
- },
593
- {
594
- "text": "We exploit the fact that the tagger not only determines tags, but also assigns probabilities. Figure 7 shows the accuracy when separating assignments with quotients larger and smaller than the threshold (hence reliable and unreliable assignments). Again, we find that accuracies for reliable assignments are much higher than for unreliable assignments.",
595
- "cite_spans": [],
596
- "ref_spans": [
597
- {
598
- "start": 94,
599
- "end": 103,
600
- "text": "Figure 7",
601
- "ref_id": "FIGREF4"
602
- }
603
- ],
604
- "eq_spans": [],
605
- "section": "Tagging the Penn Treebank",
606
- "sec_num": "3.2"
607
- },
608
- {
609
- "text": "Average part-of-speech tagging accuracy is between 96% and 97%, depending on language and tagset, which is at least on a par with state-of-the-art results found in the literature, possibly better. For the Penn Treebank, (Ratnaparkhi, 1996) reports an accuracy of 96.6% using the Maximum Entropy approach, our much simpler and therefore faster HMM approach delivers 96.7%. This comparison needs to be re-examined, since we use a ten-fold crossvalidation and averaging of results while Ratnaparkhi only makes one test run. The accuracy for known tokens is significantly higher than for unknown tokens. For the German newspaper data, results are 8.7% better when the word was seen before and therefore is in the lexicon, than when it was not seen before (97.7% vs. 89.0%). Accuracy for known tokens is high even with very small amounts of training data. As few as 1000 tokens are sufficient to achieve 95%-96% accuracy for them. It is important for the tagger to have seen a word at least once during training.",
610
- "cite_spans": [
611
- {
612
- "start": 220,
613
- "end": 239,
614
- "text": "(Ratnaparkhi, 1996)",
615
- "ref_id": "BIBREF7"
616
- }
617
- ],
618
- "ref_spans": [],
619
- "eq_spans": [],
620
- "section": "Summary of Part-of-Speech Tagging Results",
621
- "sec_num": "3.3"
622
- },
623
- {
624
- "text": "Stochastic taggers assign probabilities to tags. We exploit the probabilities to determine reliability of assignments. For a subset that is determined during processing by the tagger we achieve accuracy rates of over 99%. The accuracy of the complement set is much lower. This information can, e.g., be exploited in an annotation project to give an additional treatment to the unreliable assignments, or to pass selected ambiguities to a subsequent processing step.",
625
- "cite_spans": [],
626
- "ref_spans": [],
627
- "eq_spans": [],
628
- "section": "Summary of Part-of-Speech Tagging Results",
629
- "sec_num": "3.3"
630
- },
631
- {
632
- "text": "We have shown that a tagger based on Markov models yields state-of-the-art results, despite contrary claims found in the literature. For example, the Markov model tagger used in the comparison of (van Halteren et al., 1998) yielded worse results than all other taggers. In our opinion, a reason for the wrong claim is that the basic algorithms leave several decisions to the implementor. The rather large amount of freedom was not handled in detail in previous publications: handling of start-and end-of-sequence, the exact smoothing technique, how to determine the weights for context probabilities, details on handling unknown words, and how to determine the weights for unknown words. Note that the decisions we made yield good results for both the German and the English Corpus. They do so for several other corpora as well. The architecture remains applicable to a large variety of languages. According to current tagger comparisons (van Halteren et al., 1998; Zavrel and Daelemans, 1999) , and according to a comparsion of the results presented here with those in (Ratnaparkhi, 1996) , the Maximum Entropy framework seems to be the only other approach yielding comparable results to the one presented here. It is a very interesting future research topic to determine the advantages of either of these approaches, to find the reason for their high accuracies, and to find a good combination of both.",
633
- "cite_spans": [
634
- {
635
- "start": 196,
636
- "end": 223,
637
- "text": "(van Halteren et al., 1998)",
638
- "ref_id": "BIBREF11"
639
- },
640
- {
641
- "start": 938,
642
- "end": 965,
643
- "text": "(van Halteren et al., 1998;",
644
- "ref_id": "BIBREF11"
645
- },
646
- {
647
- "start": 966,
648
- "end": 993,
649
- "text": "Zavrel and Daelemans, 1999)",
650
- "ref_id": "BIBREF13"
651
- },
652
- {
653
- "start": 1070,
654
- "end": 1089,
655
- "text": "(Ratnaparkhi, 1996)",
656
- "ref_id": "BIBREF7"
657
- }
658
- ],
659
- "ref_spans": [],
660
- "eq_spans": [],
661
- "section": "Conclusion",
662
- "sec_num": "4"
663
- },
664
- {
665
- "text": "TnT is freely available to universities and related organizations for research purposes (see http ://www. coli. uni-sb, de/-thorsten/tnt).",
666
- "cite_spans": [],
667
- "ref_spans": [],
668
- "eq_spans": [],
669
- "section": "Conclusion",
670
- "sec_num": "4"
671
- },
672
- {
673
- "text": "like to thank all the people who took the effort to annotate the Penn Treebank, the Susanne Corpus, the Stuttgarter Referenzkorpus, the NEGRA Corpus, the Verbmobil Corpora, and several others. And, last but not least, I would like to thank the users of TnT who provided me with bug reports and valuable suggestions for improvements.",
674
- "cite_spans": [],
675
- "ref_spans": [],
676
- "eq_spans": [],
677
- "section": "Conclusion",
678
- "sec_num": "4"
679
- },
680
- {
681
- "text": "By definition, this quotient is co if there is only one possible tag for a given word.",
682
- "cite_spans": [],
683
- "ref_spans": [],
684
- "eq_spans": [],
685
- "section": "",
686
- "sec_num": null
687
- }
688
- ],
689
- "back_matter": [
690
- {
691
- "text": "Many thanks go to Hans Uszkoreit for his support during the development of TnT. Most of the work on TnT was carried out while the author received a grant of the Deutsche Forschungsgemeinschaft in the Graduiertenkolleg Kognitionswissenschaft Saarbriicken. Large annotated corpora are the pre-requisite for developing and testing part-ofspeech taggers, and they enable the generation of high-quality language models. Therefore, I would",
692
- "cite_spans": [],
693
- "ref_spans": [],
694
- "eq_spans": [],
695
- "section": "Acknowledgements",
696
- "sec_num": null
697
- }
698
- ],
699
- "bib_entries": {
700
- "BIBREF0": {
701
- "ref_id": "b0",
702
- "title": "Syntactic annotation of a German newspaper corpus",
703
- "authors": [
704
- {
705
- "first": "Thorsten",
706
- "middle": [],
707
- "last": "Brants",
708
- "suffix": ""
709
- },
710
- {
711
- "first": "Wojciech",
712
- "middle": [],
713
- "last": "Skut",
714
- "suffix": ""
715
- },
716
- {
717
- "first": "Hans",
718
- "middle": [],
719
- "last": "Uszkoreit",
720
- "suffix": ""
721
- }
722
- ],
723
- "year": 1999,
724
- "venue": "Proceedings of the ATALA Treebank Workshop",
725
- "volume": "",
726
- "issue": "",
727
- "pages": "69--76",
728
- "other_ids": {},
729
- "num": null,
730
- "urls": [],
731
- "raw_text": "Thorsten Brants, Wojciech Skut, and Hans Uszko- reit. 1999. Syntactic annotation of a German newspaper corpus. In Proceedings of the ATALA Treebank Workshop, pages 69-76, Paris, France.",
732
- "links": null
733
- },
734
- "BIBREF1": {
735
- "ref_id": "b1",
736
- "title": "A Corpus-Based Approach to Language Learning",
737
- "authors": [
738
- {
739
- "first": "Eric",
740
- "middle": [],
741
- "last": "Brill",
742
- "suffix": ""
743
- }
744
- ],
745
- "year": 1993,
746
- "venue": "Ph.D. Dissertation",
747
- "volume": "",
748
- "issue": "",
749
- "pages": "",
750
- "other_ids": {},
751
- "num": null,
752
- "urls": [],
753
- "raw_text": "Eric Brill. 1993. A Corpus-Based Approach to Lan- guage Learning. Ph.D. Dissertation, Department of Computer and Information Science, University of Pennsylvania.",
754
- "links": null
755
- },
756
- "BIBREF2": {
757
- "ref_id": "b2",
758
- "title": "Equations for part-of-speech tagging",
759
- "authors": [
760
- {
761
- "first": "Eugene",
762
- "middle": [],
763
- "last": "Charniak",
764
- "suffix": ""
765
- },
766
- {
767
- "first": "Curtis",
768
- "middle": [],
769
- "last": "Hendrickson",
770
- "suffix": ""
771
- },
772
- {
773
- "first": "Neil",
774
- "middle": [],
775
- "last": "Jacobson",
776
- "suffix": ""
777
- },
778
- {
779
- "first": "Mike",
780
- "middle": [],
781
- "last": "Perkowitz",
782
- "suffix": ""
783
- }
784
- ],
785
- "year": 1993,
786
- "venue": "Proceedings of the Eleventh National Con[erence on Artificial Intelligence",
787
- "volume": "",
788
- "issue": "",
789
- "pages": "784--789",
790
- "other_ids": {},
791
- "num": null,
792
- "urls": [],
793
- "raw_text": "Eugene Charniak, Curtis Hendrickson, Neil Ja- cobson, and Mike Perkowitz. 1993. Equations for part-of-speech tagging. In Proceedings of the Eleventh National Con[erence on Artificial In- telligence, pages 784-789, Menlo Park: AAAI Press/MIT Press.",
794
- "links": null
795
- },
796
- "BIBREF3": {
797
- "ref_id": "b3",
798
- "title": "A practical part-of-speech tagger",
799
- "authors": [
800
- {
801
- "first": "Doug",
802
- "middle": [],
803
- "last": "Cutting",
804
- "suffix": ""
805
- },
806
- {
807
- "first": "Julian",
808
- "middle": [],
809
- "last": "Kupiec",
810
- "suffix": ""
811
- },
812
- {
813
- "first": "Jan",
814
- "middle": [],
815
- "last": "Pedersen",
816
- "suffix": ""
817
- },
818
- {
819
- "first": "Penelope",
820
- "middle": [],
821
- "last": "Sibun",
822
- "suffix": ""
823
- }
824
- ],
825
- "year": 1992,
826
- "venue": "Proceedings of the 3rd Conference on Applied Natural Language Processing (ACL)",
827
- "volume": "",
828
- "issue": "",
829
- "pages": "133--140",
830
- "other_ids": {},
831
- "num": null,
832
- "urls": [],
833
- "raw_text": "Doug Cutting, Julian Kupiec, Jan Pedersen, and Penelope Sibun. 1992. A practical part-of-speech tagger. In Proceedings of the 3rd Conference on Applied Natural Language Processing (ACL), pages 133-140.",
834
- "links": null
835
- },
836
- "BIBREF4": {
837
- "ref_id": "b4",
838
- "title": "Mbt: A memory-based part of speech tagger-generator",
839
- "authors": [
840
- {
841
- "first": "Walter",
842
- "middle": [],
843
- "last": "Daelemans",
844
- "suffix": ""
845
- },
846
- {
847
- "first": "Jakub",
848
- "middle": [],
849
- "last": "Zavrel",
850
- "suffix": ""
851
- },
852
- {
853
- "first": "Peter",
854
- "middle": [],
855
- "last": "Berck",
856
- "suffix": ""
857
- },
858
- {
859
- "first": "Steven",
860
- "middle": [],
861
- "last": "Gillis",
862
- "suffix": ""
863
- }
864
- ],
865
- "year": 1996,
866
- "venue": "Proceedings of the Workshop on Very Large Corpora",
867
- "volume": "",
868
- "issue": "",
869
- "pages": "",
870
- "other_ids": {},
871
- "num": null,
872
- "urls": [],
873
- "raw_text": "Walter Daelemans, Jakub Zavrel, Peter Berck, and Steven Gillis. 1996. Mbt: A memory-based part of speech tagger-generator. In Proceedings of the Workshop on Very Large Corpora, Copenhagen, Denmark.",
874
- "links": null
875
- },
876
- "BIBREF5": {
877
- "ref_id": "b5",
878
- "title": "Building a large annotated corpus of English: The Penn Treebank",
879
- "authors": [
880
- {
881
- "first": "Mitchell",
882
- "middle": [],
883
- "last": "Marcus",
884
- "suffix": ""
885
- },
886
- {
887
- "first": "Beatrice",
888
- "middle": [],
889
- "last": "Santorini",
890
- "suffix": ""
891
- },
892
- {
893
- "first": "Mary",
894
- "middle": [
895
- "Ann"
896
- ],
897
- "last": "Marcinkiewicz",
898
- "suffix": ""
899
- }
900
- ],
901
- "year": 1993,
902
- "venue": "Computational Linguistics",
903
- "volume": "19",
904
- "issue": "2",
905
- "pages": "313--330",
906
- "other_ids": {},
907
- "num": null,
908
- "urls": [],
909
- "raw_text": "Mitchell Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Compu- tational Linguistics, 19(2):313-330.",
910
- "links": null
911
- },
912
- "BIBREF6": {
913
- "ref_id": "b6",
914
- "title": "A tutorial on Hidden Markov Models and selected applications in speech recognition",
915
- "authors": [
916
- {
917
- "first": "R",
918
- "middle": [],
919
- "last": "Lawrence",
920
- "suffix": ""
921
- },
922
- {
923
- "first": "",
924
- "middle": [],
925
- "last": "Rabiner",
926
- "suffix": ""
927
- }
928
- ],
929
- "year": 1989,
930
- "venue": "Proceedings o] the IEEE",
931
- "volume": "77",
932
- "issue": "",
933
- "pages": "257--285",
934
- "other_ids": {},
935
- "num": null,
936
- "urls": [],
937
- "raw_text": "Lawrence R. Rabiner. 1989. A tutorial on Hid- den Markov Models and selected applications in speech recognition. In Proceedings o] the IEEE, volume 77(2), pages 257-285.",
938
- "links": null
939
- },
940
- "BIBREF7": {
941
- "ref_id": "b7",
942
- "title": "A maximum entropy model for part-of-speech tagging",
943
- "authors": [
944
- {
945
- "first": "Adwait",
946
- "middle": [],
947
- "last": "Ratnaparkhi",
948
- "suffix": ""
949
- }
950
- ],
951
- "year": 1996,
952
- "venue": "Proceedings o] the Conference on Empirical Methods in Natural Language Processing EMNLP-96, Philadelphia",
953
- "volume": "",
954
- "issue": "",
955
- "pages": "",
956
- "other_ids": {},
957
- "num": null,
958
- "urls": [],
959
- "raw_text": "Adwait Ratnaparkhi. 1996. A maximum entropy model for part-of-speech tagging. In Proceedings o] the Conference on Empirical Methods in Nat- ural Language Processing EMNLP-96, Philadel- phia, PA.",
960
- "links": null
961
- },
962
- "BIBREF8": {
963
- "ref_id": "b8",
964
- "title": "Morphological tagging based entirely on Bayesian inference",
965
- "authors": [
966
- {
967
- "first": "Christer",
968
- "middle": [],
969
- "last": "Samuelsson",
970
- "suffix": ""
971
- }
972
- ],
973
- "year": 1993,
974
- "venue": "9th Nordic Conference on Computational Linguistics NODALIDA-93",
975
- "volume": "",
976
- "issue": "",
977
- "pages": "",
978
- "other_ids": {},
979
- "num": null,
980
- "urls": [],
981
- "raw_text": "Christer Samuelsson. 1993. Morphological tag- ging based entirely on Bayesian inference. In 9th Nordic Conference on Computational Lin- guistics NODALIDA-93, Stockholm University, Stockholm, Sweden.",
982
- "links": null
983
- },
984
- "BIBREF9": {
985
- "ref_id": "b9",
986
- "title": "Improvements in part-ofspeech tagging with an application to German",
987
- "authors": [
988
- {
989
- "first": "Helmut",
990
- "middle": [],
991
- "last": "Schmid",
992
- "suffix": ""
993
- }
994
- ],
995
- "year": 1995,
996
- "venue": "Lexikon und Text. Niemeyer, Tfibingen",
997
- "volume": "",
998
- "issue": "",
999
- "pages": "",
1000
- "other_ids": {},
1001
- "num": null,
1002
- "urls": [],
1003
- "raw_text": "Helmut Schmid. 1995. Improvements in part-of- speech tagging with an application to German. In Helmut Feldweg and Erhard Hinrichts, editors, Lexikon und Text. Niemeyer, Tfibingen.",
1004
- "links": null
1005
- },
1006
- "BIBREF10": {
1007
- "ref_id": "b10",
1008
- "title": "An annotation scheme for free word order languages",
1009
- "authors": [
1010
- {
1011
- "first": "Wojciech",
1012
- "middle": [],
1013
- "last": "Skut",
1014
- "suffix": ""
1015
- },
1016
- {
1017
- "first": "Brigitte",
1018
- "middle": [],
1019
- "last": "Krenn",
1020
- "suffix": ""
1021
- },
1022
- {
1023
- "first": "Thorsten",
1024
- "middle": [],
1025
- "last": "Brants",
1026
- "suffix": ""
1027
- },
1028
- {
1029
- "first": "Hans",
1030
- "middle": [],
1031
- "last": "Uszkoreit",
1032
- "suffix": ""
1033
- }
1034
- ],
1035
- "year": 1997,
1036
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing ANLP-97",
1037
- "volume": "",
1038
- "issue": "",
1039
- "pages": "",
1040
- "other_ids": {},
1041
- "num": null,
1042
- "urls": [],
1043
- "raw_text": "Wojciech Skut, Brigitte Krenn, Thorsten Brants, and Hans Uszkoreit. 1997. An annotation scheme for free word order languages. In Proceedings of the Fifth Conference on Applied Natural Language Processing ANLP-97, Washington, DC.",
1044
- "links": null
1045
- },
1046
- "BIBREF11": {
1047
- "ref_id": "b11",
1048
- "title": "Improving data driven wordclass tagging by system combination",
1049
- "authors": [
1050
- {
1051
- "first": "Jakub",
1052
- "middle": [],
1053
- "last": "Hans Van Halteren",
1054
- "suffix": ""
1055
- },
1056
- {
1057
- "first": "Walter",
1058
- "middle": [],
1059
- "last": "Zavrel",
1060
- "suffix": ""
1061
- },
1062
- {
1063
- "first": "",
1064
- "middle": [],
1065
- "last": "Daelemans",
1066
- "suffix": ""
1067
- }
1068
- ],
1069
- "year": 1998,
1070
- "venue": "Proceedings of the International Conference on Computational Linguistics COLING-98",
1071
- "volume": "",
1072
- "issue": "",
1073
- "pages": "491--497",
1074
- "other_ids": {},
1075
- "num": null,
1076
- "urls": [],
1077
- "raw_text": "Hans van Halteren, Jakub Zavrel, and Walter Daele- mans. 1998. Improving data driven wordclass tag- ging by system combination. In Proceedings of the International Conference on Computational Lin- guistics COLING-98, pages 491-497, Montreal, Canada.",
1078
- "links": null
1079
- },
1080
- "BIBREF12": {
1081
- "ref_id": "b12",
1082
- "title": "Comparing a statistical and a rule-based tagger for german",
1083
- "authors": [
1084
- {
1085
- "first": "Martin",
1086
- "middle": [],
1087
- "last": "Volk",
1088
- "suffix": ""
1089
- },
1090
- {
1091
- "first": "Gerold",
1092
- "middle": [],
1093
- "last": "Schneider",
1094
- "suffix": ""
1095
- }
1096
- ],
1097
- "year": 1998,
1098
- "venue": "Proceedings of KONVENS-98",
1099
- "volume": "",
1100
- "issue": "",
1101
- "pages": "125--137",
1102
- "other_ids": {},
1103
- "num": null,
1104
- "urls": [],
1105
- "raw_text": "Martin Volk and Gerold Schneider. 1998. Compar- ing a statistical and a rule-based tagger for ger- man. In Proceedings of KONVENS-98, pages 125- 137, Bonn.",
1106
- "links": null
1107
- },
1108
- "BIBREF13": {
1109
- "ref_id": "b13",
1110
- "title": "Evaluatie van part-of-speech taggers voor bet corpus gesproken nederlands",
1111
- "authors": [
1112
- {
1113
- "first": "Jakub",
1114
- "middle": [],
1115
- "last": "Zavrel",
1116
- "suffix": ""
1117
- },
1118
- {
1119
- "first": "Walter",
1120
- "middle": [],
1121
- "last": "Daelemans",
1122
- "suffix": ""
1123
- }
1124
- ],
1125
- "year": 1999,
1126
- "venue": "CGN technical report",
1127
- "volume": "",
1128
- "issue": "",
1129
- "pages": "",
1130
- "other_ids": {},
1131
- "num": null,
1132
- "urls": [],
1133
- "raw_text": "Jakub Zavrel and Walter Daelemans. 1999. Eval- uatie van part-of-speech taggers voor bet cor- pus gesproken nederlands. CGN technical report, Katholieke Universiteit Brabant, Tilburg.",
1134
- "links": null
1135
- }
1136
- },
1137
- "ref_entries": {
1138
- "FIGREF0": {
1139
- "num": null,
1140
- "uris": null,
1141
- "type_str": "figure",
1142
- "text": "Algorithm for calculting the weights for context-independent linear interpolation )%1, )%2, )%3 when the n-gram frequencies are known. N is the size of the corpus\u2022 If the denominator in one of the expressions is 0, we define the result of that expression to be 0. for i = m... 0, using the maximum likelihood estimates/5 from frequencies in the lexicon, weights Oi and the initialization"
1143
- },
1144
- "FIGREF1": {
1145
- "num": null,
1146
- "uris": null,
1147
- "type_str": "figure",
1148
- "text": "for all i = 0... m -1, using a tagset of s tags and the average $ /5 = 1 ~/5(tj)(11)8 j----IThis usually yields values in the range 0.03 ... 0.10."
1149
- },
1150
- "FIGREF2": {
1151
- "num": null,
1152
- "uris": null,
1153
- "type_str": "figure",
1154
- "text": "Learning curve for tagging the NEGRA corpus. The training sets of variable sizes as well as test sets of 30,000 tokens were randomly chosen. Training and test sets were disjoint, the procedure was repeated 10 times and results were averaged. Percentages of unknowns for 500k and 1000k training are determined from an untagged extension.NEGRA Corpus: Accuracy of reliable assignments reliable -53.5 62.9 69.6 74.5 79.8 82.7 85.2 88.0 89.6 90.8 91.8 92.2 acc. of complement"
1155
- },
1156
- "FIGREF3": {
1157
- "num": null,
1158
- "uris": null,
1159
- "type_str": "figure",
1160
- "text": "Tagging accuracy for the NEGRA corpus when separating reliable and unreliable assignments. The curve shows accuracies for reliable assignments. The numbers at the bottom line indicate the percentage of reliable assignments and the accuracy of the complement set (i.e., unreliable assignments)."
1161
- },
1162
- "FIGREF4": {
1163
- "num": null,
1164
- "uris": null,
1165
- "type_str": "figure",
1166
- "text": "Tagging accuracy for the Penn Treebank when separating reliable and unreliable assignments. The curve shows accuracies for reliable assignments. The numbers at the bottom line indicate the percentage of reliable assignments and the accuracy of the complement set."
1167
- },
1168
- "TABREF0": {
1169
- "text": "Part-of-speech tagging accuracy for the NEGRA corpus, averaged over 10 test runs, training and test set are disjoint. The table shows the percentage of unknown tokens, separate accuracies and standard deviations for known and unknown tokens, as well as the overall accuracy.",
1170
- "type_str": "table",
1171
- "html": null,
1172
- "content": "<table><tr><td/><td/><td/><td colspan=\"2\">percentage</td><td colspan=\"2\">known</td><td/><td/><td colspan=\"2\">unknown</td><td colspan=\"2\">overall</td></tr><tr><td/><td/><td/><td colspan=\"2\">unknowns</td><td>ace.</td><td/><td/><td/><td>acc.</td><td>(x</td><td>aCE.</td><td>o\"</td></tr><tr><td colspan=\"3\">NEGRA corpus</td><td/><td>11.9%</td><td colspan=\"2\">97.7% 0.23</td><td/><td colspan=\"3\">89.0% 0.72</td><td colspan=\"2\">96.7% 0.29</td></tr><tr><td/><td/><td colspan=\"9\">NEGRA Corpus: POS Learning Curve</td><td/></tr><tr><td>100</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Overall</td></tr><tr><td>9O</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>min =78.1%</td></tr><tr><td colspan=\"3\">80 70 /S</td><td/><td/><td/><td/><td/><td/><td/><td/><td>e</td><td>max=96.7% Known rain =95.7% max=97.7%</td></tr><tr><td>6O</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">---a---Unknown rain =61.2%</td></tr><tr><td>50</td><td>,</td><td>i</td><td>I</td><td>i</td><td>i</td><td>I</td><td>I</td><td>I</td><td>i</td><td>I</td><td/><td>max=89.0%</td></tr><tr><td>1</td><td>2</td><td>5</td><td>10</td><td>20</td><td>50</td><td colspan=\"4\">100 200 320 500</td><td colspan=\"3\">1000 x 1000 Training Length</td></tr><tr><td colspan=\"2\">50.8 46.4</td><td>41.4</td><td colspan=\"2\">36.0 30.7</td><td colspan=\"5\">23.0 18.3 14.3 11.9 11).3</td><td colspan=\"3\">St avg. percentage unknown</td></tr></table>",
1173
- "num": null
1174
- },
1175
- "TABREF1": {
1176
- "text": "Part-of-speech tagging accuracy for the Penn Treebank. The table shows the percentage of unknown tokens, separate accuracies and standard deviations for known and unknown tokens, as well as the overall accuracy. Learning curve for tagging the Penn Treebank. The training sets of variable sizes as well as test sets of 100,000 tokens were randomly chosen. Training and test sets were disjoint, the procedure was repeated 10 times and results were averaged.",
1177
- "type_str": "table",
1178
- "html": null,
1179
- "content": "<table><tr><td/><td/><td/><td/><td/><td colspan=\"2\">I percentage</td><td/><td colspan=\"2\">known</td><td/><td colspan=\"2\">unknown</td><td>overall</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">unknowns</td><td colspan=\"2\">acc.</td><td>a</td><td/><td>aCC.</td><td>O\"</td><td>aCE.</td><td>O\"</td></tr><tr><td/><td colspan=\"3\">Penn Treebank</td><td/><td/><td colspan=\"4\">2.9% 97.0% 0.15</td><td/><td colspan=\"2\">85.5% 0.69</td><td>96.7% 0.15</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"8\">Penn Treebank: POS Learning Curve</td></tr><tr><td/><td>100</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Overall</td></tr><tr><td/><td>9O</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>rain =78.6%</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>max=96.7%</td></tr><tr><td/><td>80</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Known</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>rain =95.2%</td></tr><tr><td>&lt;</td><td>70 60</td><td>/</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>max=97.0% Unknown min =62.2%</td></tr><tr><td/><td>50 I</td><td>I</td><td>~</td><td/><td>i</td><td>I</td><td>I</td><td>I</td><td/><td>i</td><td>I</td><td>I</td><td>max=85.5%</td></tr><tr><td/><td>1</td><td>2</td><td>5</td><td/><td>10</td><td>20</td><td>50</td><td colspan=\"3\">100 200</td><td>500</td><td colspan=\"2\">1000 \u00d7 1000 Training Length</td></tr><tr><td/><td colspan=\"2\">50.3 42.8</td><td colspan=\"4\">33.4 26.8 20.2</td><td colspan=\"2\">13.2 9.8</td><td colspan=\"2\">7.0</td><td>4.4</td><td colspan=\"2\">2.9 avg. percentage unknown</td></tr><tr><td colspan=\"14\">Figure 6: Penn Treebank: Accuracy of reliable assignments</td></tr><tr><td/><td>100</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Overall</td></tr><tr><td/><td>99</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>rain =96.6% max=99.4%</td></tr><tr><td/><td>98</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>97</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>96</td><td>i</td><td>i</td><td>i</td><td>i</td><td>t</td><td>i</td><td>i</td><td>i</td><td>i</td><td>i</td><td>i</td><td>i</td></tr><tr><td/><td/><td>2</td><td colspan=\"3\">5 10 20</td><td colspan=\"2\">50 100</td><td colspan=\"2\">500</td><td/><td>2000</td><td colspan=\"2\">10000 threshold 0</td></tr><tr><td/><td colspan=\"13\">100 97.7 94.6 92.2 89.8 86.3 83.5 80.4 76.6 73.8 71.0 67.2 64.5 % cases reliable</td></tr><tr><td/><td colspan=\"13\">-53.5 62.8 68.9 73.9 79.3 82.6 85.2 87.5 88.8 89.8 91.0 91.6 acc. of complement</td></tr></table>",
1180
- "num": null
1181
- }
1182
- }
1183
- }
1184
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1032.json DELETED
@@ -1,1209 +0,0 @@
1
- {
2
- "paper_id": "A00-1032",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:03.935537Z"
6
- },
7
- "title": "Language Independent Morphological Analysis",
8
- "authors": [
9
- {
10
- "first": "Tatsuo",
11
- "middle": [],
12
- "last": "Matsumoto",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Nara Institute of Science and Technology { tatuo-y",
17
- "location": {}
18
- },
19
- "email": "[email protected]@ac.jp"
20
- }
21
- ],
22
- "year": "",
23
- "venue": null,
24
- "identifiers": {},
25
- "abstract": "This paper proposes a framework of language independent morphological analysis and mainly concentrate on tokenization, the first process of morphological analysis. Although tokenization is usually not regarded as a difficult task in most segmented languages such as English, there are a number of problems in achieving precise treatment of lexical entries. We first introduce the concept of morpho-fragments, which are intermediate units between characters and lexical entries. We describe our approach to resolve problems arising in tokenization so as to attain a language independent morphological analyzer.",
26
- "pdf_parse": {
27
- "paper_id": "A00-1032",
28
- "_pdf_hash": "",
29
- "abstract": [
30
- {
31
- "text": "This paper proposes a framework of language independent morphological analysis and mainly concentrate on tokenization, the first process of morphological analysis. Although tokenization is usually not regarded as a difficult task in most segmented languages such as English, there are a number of problems in achieving precise treatment of lexical entries. We first introduce the concept of morpho-fragments, which are intermediate units between characters and lexical entries. We describe our approach to resolve problems arising in tokenization so as to attain a language independent morphological analyzer.",
32
- "cite_spans": [],
33
- "ref_spans": [],
34
- "eq_spans": [],
35
- "section": "Abstract",
36
- "sec_num": null
37
- }
38
- ],
39
- "body_text": [
40
- {
41
- "text": "The first step in natural language processing is to identify words in a sentence. We call this process a morphological analysis. Various languages exist in the world, and strategies for morphological analysis differ by types of language. Conventionally, morphological analyzers have been developed in one analyzer for each language approach. This is a language dependent approach. In contrast, We propose a framework of language independent morphological analysis system. We employ one analyzer for any language approach. This approach enables a rapid implementation of morphological analysis systems for new languages.",
42
- "cite_spans": [],
43
- "ref_spans": [],
44
- "eq_spans": [],
45
- "section": "Introduction",
46
- "sec_num": "1"
47
- },
48
- {
49
- "text": "We define two types of written languages: one is a segmented language, and the other is a nonsegmented language. In non-segmented languages such as Chinese and Japanese, since words are not separated by delimiters such as white spaces, tokenization is a important and difficult task. In segmented languages such as English, since words are seemingly separated by white spaces or punctuation marks, tokenization is regarded as a relatively easy task and little attention has been paid to. Therefore, each language dependent morphological analyzer has its own strategy for tokenization. We call a string defined in the dictionary lexeme. From an algorithmic point of view, tokenization is regarded as the process of converting an input stream of characters into a stream of lexemes.",
50
- "cite_spans": [],
51
- "ref_spans": [],
52
- "eq_spans": [],
53
- "section": "Introduction",
54
- "sec_num": "1"
55
- },
56
- {
57
- "text": "We assume that a morphological analysis consists of three processes: tokenization, dictionary lookup, and disambiguation. Dictionary look-up gets a string and returns a set of lexemes with part-ofspeech information. This implicitly contains lemmatization. Disambiguation selects the most plausible sequence of lexemes by a use of a rule-base model or a hidden Markov model (HMM) (Manning and Schiitze, 1999) . Disambiguation i s already language independent, since it does not process strings directly and therefore will not be taken up. On the other hand, tokenization and dictionary look-up are language dependent and shall be explained more in this paper.",
58
- "cite_spans": [
59
- {
60
- "start": 379,
61
- "end": 407,
62
- "text": "(Manning and Schiitze, 1999)",
63
- "ref_id": "BIBREF9"
64
- }
65
- ],
66
- "ref_spans": [],
67
- "eq_spans": [],
68
- "section": "Introduction",
69
- "sec_num": "1"
70
- },
71
- {
72
- "text": "We consider problems concerning tokenization of segmented languages in Section 2. To resolve these problem, we first apply the method of nonsegmented languages processing to segmented languages (Section 3). However, we do not obtain a satisfactory result. Then, we introduce the concept of morpho-fragments to generalize the method of non-segmented language processing (Section 4). The proposed framework resolves most problems in tokenization, and an efficient language independent part-of-speech tagging becomes possible.",
73
- "cite_spans": [],
74
- "ref_spans": [],
75
- "eq_spans": [],
76
- "section": "Introduction",
77
- "sec_num": "1"
78
- },
79
- {
80
- "text": "In segmented languages such as English, tokenization is regarded as a relatively easy task and little attention has been paid to. When a sentence has clear word boundaries, the analyzer just consults the dictionary look-up component whether strings between delimiters exist in the dictionary. If any string exists, the dictionary look-up component returns the set of possible parts-of-speech. This string is known as graphic word which is defined as \"a string of contiguous alphanumeric characters with space on either side; may include hyphens and apostrophes, but no other punctuation marks\" (Ku~era and Francis, 1967 Figure 1 ) and searches the dictionary for these graphic words. However, in practice, we want a sequence of lexemes (see the line labeled \"Lexemes\" in Figure 1 ). We list two major problems of tokenization in segmented languages below (examples in English). We use the term segment to refer to a string separated by white spaces.",
81
- "cite_spans": [
82
- {
83
- "start": 594,
84
- "end": 619,
85
- "text": "(Ku~era and Francis, 1967",
86
- "ref_id": "BIBREF8"
87
- }
88
- ],
89
- "ref_spans": [
90
- {
91
- "start": 620,
92
- "end": 628,
93
- "text": "Figure 1",
94
- "ref_id": "FIGREF0"
95
- },
96
- {
97
- "start": 771,
98
- "end": 779,
99
- "text": "Figure 1",
100
- "ref_id": "FIGREF0"
101
- }
102
- ],
103
- "eq_spans": [],
104
- "section": "Problems of Tokenization in Segmented Languages",
105
- "sec_num": "2"
106
- },
107
- {
108
- "text": "1. Segmentation(one segment into several lexemes): Segments with a period at the end (e.g, \"Calif.\" and \"etc.\") suffer from segmentation ambiguity. The period can denote an abbreviation, the end of a sentence, or both. The problem of sentence boundary ambiguity is not easy to solve (Palmer and Hearst, 1997) . A segment with an apostrophe also has segmentation ambiguity. For example, \"McDonald's\" is ambiguous since this string can be segmented into either \"Mc-Donald / Proper noun\" + \" 's / Possessive ending\" or \"McDonald's / Proper noun (company name)\". In addition, \"boys' \" in a sentence \"... the boys' toys ...\" is ambiguous. The string can be segmented into either \"boys' / Plural possessive\" or \"boys/Plural Noun\" \u00f7 \" ' / Punctuation (the end of a quotation)\" (Manning and Schiitze, 1999) . If a hyphenated segment such as \"data-base,\" \"F-16,\" or \"MS-DOS\" exists in the dictionary, it should be an independent lexeme. However, if a hyphenated segment such as \"55-years-old\" does not exist in the dictionary, hyphens should be treated as independent tokens (Fox, 1992) . Other punctuation marks such as \"/\" or \"_\" have the same problem in \"OS/2\" or \"max_size\" (in programming languages).",
109
- "cite_spans": [
110
- {
111
- "start": 283,
112
- "end": 308,
113
- "text": "(Palmer and Hearst, 1997)",
114
- "ref_id": "BIBREF17"
115
- },
116
- {
117
- "start": 770,
118
- "end": 798,
119
- "text": "(Manning and Schiitze, 1999)",
120
- "ref_id": "BIBREF9"
121
- },
122
- {
123
- "start": 1066,
124
- "end": 1077,
125
- "text": "(Fox, 1992)",
126
- "ref_id": "BIBREF2"
127
- }
128
- ],
129
- "ref_spans": [],
130
- "eq_spans": [],
131
- "section": "Problems of Tokenization in Segmented Languages",
132
- "sec_num": "2"
133
- },
134
- {
135
- "text": "If a lexeme consisting of a sequence of segments such as a proper noun (e.g., \"New York\") or a phrasal verb (e.g., \"look at\" and \"get up\") exists in the dictionary, it should be a lexeme.",
136
- "cite_spans": [],
137
- "ref_spans": [],
138
- "eq_spans": [],
139
- "section": "Round-up(several segments into one lexeme):",
140
- "sec_num": "2."
141
- },
142
- {
143
- "text": "To handle such lexemes, we need to store multisegment lexemes in the dictionary. Webster and Kit handle idioms and fixed expressions in this way (Webster and Kit, 1992) . In Penn Treebank (Santorini, 1990 ), a proper noun like \"New York\" is defined as two individual proper nouns \"New / NNP\" \u00f7 \"York / NNP,\" disregarding round-up of several:segments into a lexeme.",
144
- "cite_spans": [
145
- {
146
- "start": 145,
147
- "end": 168,
148
- "text": "(Webster and Kit, 1992)",
149
- "ref_id": "BIBREF19"
150
- },
151
- {
152
- "start": 188,
153
- "end": 204,
154
- "text": "(Santorini, 1990",
155
- "ref_id": "BIBREF18"
156
- }
157
- ],
158
- "ref_spans": [],
159
- "eq_spans": [],
160
- "section": "Round-up(several segments into one lexeme):",
161
- "sec_num": "2."
162
- },
163
- {
164
- "text": "The definition of lexemes in a dictionary depends on the requirement of application. Therefore, a simple pattern matcher is not enough to deal with language independent tokenization.",
165
- "cite_spans": [],
166
- "ref_spans": [],
167
- "eq_spans": [],
168
- "section": "Round-up(several segments into one lexeme):",
169
- "sec_num": "2."
170
- },
171
- {
172
- "text": "Non-segmented languages do not have a delimiter between lexemes ( Figure 2 ). Therefore, a treatment of further segmentation and rounding up has been well considered. In a non-segmented language, the analyzer considers all prefixes from each position in the sentence, checks whether each prefix matches the lexeme in the dictionary, stores these lexemes in a graph structure, and finds the most plausible sequence of lexemes in the graph structure. To find the sequence, Nagata proposed a probabilistic language model for non-segmented languages (Nagata, 1994) (Nagata, 1999) .",
173
- "cite_spans": [
174
- {
175
- "start": 546,
176
- "end": 560,
177
- "text": "(Nagata, 1994)",
178
- "ref_id": "BIBREF15"
179
- },
180
- {
181
- "start": 561,
182
- "end": 575,
183
- "text": "(Nagata, 1999)",
184
- "ref_id": "BIBREF16"
185
- }
186
- ],
187
- "ref_spans": [
188
- {
189
- "start": 66,
190
- "end": 74,
191
- "text": "Figure 2",
192
- "ref_id": "FIGREF1"
193
- }
194
- ],
195
- "eq_spans": [],
196
- "section": "Round-up(several segments into one lexeme):",
197
- "sec_num": "2."
198
- },
199
- {
200
- "text": "The crucial difference between segmented and non-segmented languages in the process of morphological analysis appears in the way of the dictionary look-up. The standard technique for looking up lexemes in Japanese dictionaries is to use a trie structure (Fredkin, 1960 )(Knuth, 1998 . A trie structured dictionary gives all possible lexemes that start at a given position in a sentence effectively (Morimoto and Aoe, 1993) . We call this method of word looking-up as \"common prefix search\" (hereafter CPS). Figure 3 shows a part of the trie for Japanese lexeme dictionary. The results of CPS for \"~j~ ~'7 ~ o \"(I go to Ebina.) are \"~j~\" and \"~.\" To get all possible lexemes in the sentence, the analyzer has to slide the start position for CPS to the right by character by character.",
201
- "cite_spans": [
202
- {
203
- "start": 254,
204
- "end": 268,
205
- "text": "(Fredkin, 1960",
206
- "ref_id": "BIBREF4"
207
- },
208
- {
209
- "start": 269,
210
- "end": 282,
211
- "text": ")(Knuth, 1998",
212
- "ref_id": "BIBREF7"
213
- },
214
- {
215
- "start": 398,
216
- "end": 422,
217
- "text": "(Morimoto and Aoe, 1993)",
218
- "ref_id": "BIBREF14"
219
- }
220
- ],
221
- "ref_spans": [
222
- {
223
- "start": 507,
224
- "end": 515,
225
- "text": "Figure 3",
226
- "ref_id": null
227
- }
228
- ],
229
- "eq_spans": [],
230
- "section": "Round-up(several segments into one lexeme):",
231
- "sec_num": "2."
232
- },
233
- {
234
- "text": "A simple method that directly applies the morphological analysis method for non-segmented languages can handle the problems of segmented languages.",
235
- "cite_spans": [],
236
- "ref_spans": [],
237
- "eq_spans": [],
238
- "section": "A Naive Approach",
239
- "sec_num": "3"
240
- },
241
- {
242
- "text": "For instance, to analyze the sentence, \"They've gone to school together,\" we first delete all white spaces in the sentence and get \"They'vegonetoschooltogether.\" Then we pass it to the analyzer for non-segmented languages. However, the analyzer may return the result as \"They / 've / gone / to / school / to / get / her / .\" inducing a spurious ambiguity. Mills applied this method and tokenized the medieval manuscript in Cornish (Mills, 1998) .",
243
- "cite_spans": [
244
- {
245
- "start": 431,
246
- "end": 444,
247
- "text": "(Mills, 1998)",
248
- "ref_id": "BIBREF12"
249
- }
250
- ],
251
- "ref_spans": [],
252
- "eq_spans": [],
253
- "section": "A Naive Approach",
254
- "sec_num": "3"
255
- },
256
- {
257
- "text": "We carried out experiments to examine the influence of delimiter deletion. We use Penn Treebank (Santorini, 1990 ) part-of-speech tagged corpus (1.3M lexemes) to train an HMM and analyze sentences by HMM-based morphological analyzer MOZ (Yamashita, 1999) (Ymashita et al., 1999) . We use a bigram model for training it from the corpus. Test data is the same as the training corpus. Table 1 shows accuracy of segmentation and part-ofspeech tagging. The accuracy is expressed in terms of recall and precision (Nagata, 1999) . Let the number of lexemes in the tagged corpus be Std, the number of lexemes in the output of the analyze be Sys, and the number of matched lexemes be M. Recall is defined as M/Std, precision is defined as M/Sys.",
258
- "cite_spans": [
259
- {
260
- "start": 96,
261
- "end": 112,
262
- "text": "(Santorini, 1990",
263
- "ref_id": "BIBREF18"
264
- },
265
- {
266
- "start": 237,
267
- "end": 254,
268
- "text": "(Yamashita, 1999)",
269
- "ref_id": "BIBREF20"
270
- },
271
- {
272
- "start": 255,
273
- "end": 278,
274
- "text": "(Ymashita et al., 1999)",
275
- "ref_id": "BIBREF21"
276
- },
277
- {
278
- "start": 507,
279
- "end": 521,
280
- "text": "(Nagata, 1999)",
281
- "ref_id": "BIBREF16"
282
- }
283
- ],
284
- "ref_spans": [
285
- {
286
- "start": 382,
287
- "end": 389,
288
- "text": "Table 1",
289
- "ref_id": null
290
- }
291
- ],
292
- "eq_spans": [],
293
- "section": "A Naive Approach",
294
- "sec_num": "3"
295
- },
296
- {
297
- "text": "The following are the labels in Table 1 (sentence formats and methods we use):",
298
- "cite_spans": [],
299
- "ref_spans": [
300
- {
301
- "start": 32,
302
- "end": 39,
303
- "text": "Table 1",
304
- "ref_id": null
305
- }
306
- ],
307
- "eq_spans": [],
308
- "section": "A Naive Approach",
309
- "sec_num": "3"
310
- },
311
- {
312
- "text": "LXS We isolate all the lexemes in sentences and apply the method for segmented languages to the sentences. This situation is ideal, since the problems we discussed in Section 2 do not exist. In other words, all the sentences do not have segmentation ambiguity. We use the results as the baseline. Example sentence:",
313
- "cite_spans": [],
314
- "ref_spans": [],
315
- "eq_spans": [],
316
- "section": "A Naive Approach",
317
- "sec_num": "3"
318
- },
319
- {
320
- "text": "\"Itu ' suMr. uLeeu ' supenu \u2022\"",
321
- "cite_spans": [],
322
- "ref_spans": [],
323
- "eq_spans": [],
324
- "section": "A Naive Approach",
325
- "sec_num": "3"
326
- },
327
- {
328
- "text": "NSP We remove all the spaces in sentences and apply the method for non-segmented languages to the sentences. Example sentence:",
329
- "cite_spans": [],
330
- "ref_spans": [],
331
- "eq_spans": [],
332
- "section": "A Naive Approach",
333
- "sec_num": "3"
334
- },
335
- {
336
- "text": "\"It ' sMr. Lee ' spen.\"",
337
- "cite_spans": [],
338
- "ref_spans": [],
339
- "eq_spans": [],
340
- "section": "A Naive Approach",
341
- "sec_num": "3"
342
- },
343
- {
344
- "text": "NOR Sentences are in the original normal format. We apply the method for non-segmented languages to the sentences. Example sentence:",
345
- "cite_spans": [],
346
- "ref_spans": [],
347
- "eq_spans": [],
348
- "section": "A Naive Approach",
349
- "sec_num": "3"
350
- },
351
- {
352
- "text": "\"It ' SuMr. uLee ' supen.\"",
353
- "cite_spans": [],
354
- "ref_spans": [],
355
- "eq_spans": [],
356
- "section": "A Naive Approach",
357
- "sec_num": "3"
358
- },
359
- {
360
- "text": "Because of no segmentation ambiguity, \"LXS\" performs better than \"NSP\" and \"NOR.\" The following are typical example of segmentation errors. The errors originate from conjunctive ambiguity and disjunctive ambiguity (Guo, 1997) .",
361
- "cite_spans": [
362
- {
363
- "start": 214,
364
- "end": 225,
365
- "text": "(Guo, 1997)",
366
- "ref_id": "BIBREF5"
367
- }
368
- ],
369
- "ref_spans": [],
370
- "eq_spans": [],
371
- "section": "A Naive Approach",
372
- "sec_num": "3"
373
- },
374
- {
375
- "text": "conjunctive ambiguity The analyzer recognized \"away, .... ahead,\" \"anymore,\" and '~orkforce\" as \"a way,\" \"a head,\" \"any more,\" and '~ork force,\" respectively. In the results of \"NSP,\" the number of this type of error is 11,267.",
376
- "cite_spans": [],
377
- "ref_spans": [],
378
- "eq_spans": [],
379
- "section": "A Naive Approach",
380
- "sec_num": "3"
381
- },
382
- {
383
- "text": "disjunctive ambiguity The analyzer recognized \"a tour,\" \"a ton,\" and \"Alaskan or\" as \"at our,\" \"at on,\" and \"Alaska nor,\" respectively. In the results of \"NSP,\" the number of this type of error is 233.",
384
- "cite_spans": [],
385
- "ref_spans": [],
386
- "eq_spans": [],
387
- "section": "A Naive Approach",
388
- "sec_num": "3"
389
- },
390
- {
391
- "text": "Since only \"NSP\" has disjunctive ambiguity, \"NOR\" performs better than \"NSP.\" This shows that white spaces between segments help to decrease segmentation ambiguity. Though the proportional difference in accuracy looks slight between these models, there is a considerable influence in the analysis efficiency. In the cases of \"NSP\" and \"NOR,\" the analyzer may look up the dictionary from any position in a given sentence, therefore candidates for lexemes increase, and the analysis time also increase. The results of our experiments show that the run time of analyses of \"NSP\" or \"NOR\" takes about 4 times more than that of \"LXS.\"",
392
- "cite_spans": [],
393
- "ref_spans": [],
394
- "eq_spans": [],
395
- "section": "A Naive Approach",
396
- "sec_num": "3"
397
- },
398
- {
399
- "text": "Morpho-fragments: The Building Blocks Although segmented languages seemingly have clear word boundaries, there are problems of further segmentation and rounding up as introduced in Section 2. The naive approach in Section 3 does not work well. In this section, we propose an efficient and sophisticated method to solve the problems by introducing the concept of morpho-/ragments. We also show that a uniform treatment of segmented and non-segmented languages is possible without inducing the spurious ambiguity.",
400
- "cite_spans": [],
401
- "ref_spans": [],
402
- "eq_spans": [],
403
- "section": "4",
404
- "sec_num": null
405
- },
406
- {
407
- "text": "The morpho-fragments (MFs) of a language is defined as the smallest set of strings of the alphabet which can compose all lexemes in the dictionary. In other words, MFs are intermediate units between Figure 1 and Figure 2 ). MFs are well defined tokens which are specialized for language independent morphological analysis. For example, in English, all punctuation marks are MFs. Parts of a token separated by a punctuation mark such as \"He,\" \"s,\" and the punctuation mark itself, ..... in \"He's\" are MFs. The tokens in a compound lexeme such as \"New\" and \"York\" in \"New York\" are also MFs. In non-segmented languages such as Chinese and Japanese, every single character is a MF. Figure 4 shows decomposition of sentences into MFs (enclosed by \"[\" and \"]\") for several languages. Delimiters (denoted \"J') are treated as special MFs that cannot start nor end a lexeme.",
408
- "cite_spans": [],
409
- "ref_spans": [
410
- {
411
- "start": 199,
412
- "end": 207,
413
- "text": "Figure 1",
414
- "ref_id": "FIGREF0"
415
- },
416
- {
417
- "start": 212,
418
- "end": 220,
419
- "text": "Figure 2",
420
- "ref_id": "FIGREF1"
421
- },
422
- {
423
- "start": 679,
424
- "end": 687,
425
- "text": "Figure 4",
426
- "ref_id": null
427
- }
428
- ],
429
- "eq_spans": [],
430
- "section": "Definition",
431
- "sec_num": "4.1"
432
- },
433
- {
434
- "text": "Once the set of MFs is determined, the dictionary is compiled into a trie structure in which the edges are labeled by MFs, as shown in Figure 5 for English and in Figure 3 for Japanese. A trie structure ensures to return all and only possible lexemes starting at a particular position in a sentence by a one-time consultation to the dictionary, resulting in an efficient dictionary look-up with no spurious ambiguity.",
435
- "cite_spans": [],
436
- "ref_spans": [
437
- {
438
- "start": 135,
439
- "end": 143,
440
- "text": "Figure 5",
441
- "ref_id": "FIGREF3"
442
- },
443
- {
444
- "start": 163,
445
- "end": 171,
446
- "text": "Figure 3",
447
- "ref_id": null
448
- }
449
- ],
450
- "eq_spans": [],
451
- "section": "Definition",
452
- "sec_num": "4.1"
453
- },
454
- {
455
- "text": "When we analyze a sentence of a non-segmented language, to get all possible lexemes in the sentence, the analyzer slides the position one character by one character from the beginning to the end of the sentence and consults the trie structured dictionary (Section 2). Note that every character is a MF in non-segmented languages. In the same way, to analyze a sentence of a segmented language, the analyzer slides the position one MF by one MF and consults the trie structured dictionary, then, all possible lexemes are obtained. For example, in Figure 5 , the results of CPS for \"'m in ...\" are ..... and \"'m,\" and the results for \"New York is ...\" are \"New\" and \"New York.\" Therefore, a morphological analyzer with CPSbased dictionary look-up for non-segmented languages can be used for the analysis of segmented languages. In other words, MFs make possible language independent morphological analysis. We can also say MFs specify the positions to start as well as to end the dictionary look-up. ",
456
- "cite_spans": [],
457
- "ref_spans": [
458
- {
459
- "start": 546,
460
- "end": 554,
461
- "text": "Figure 5",
462
- "ref_id": "FIGREF3"
463
- }
464
- ],
465
- "eq_spans": [],
466
- "section": "Definition",
467
- "sec_num": "4.1"
468
- },
469
- {
470
- "text": "The problem is that it is not easy to identify the complete set of MFs for a segmented language. We do not make effort to find out the minimum and complete set of MFs. Instead, we decide to specify all the possible delimiters and punctuation marks appearing in the dictionary, these may separate MFs or become themselves as MFs. By specifying the following three kinds of information for the language under consideration, we attain a pseudo-complete MF definition. The following setting not only simplifies the identification of MFs but also achieves a uniform framework of language dependent morphological analysis system.",
471
- "cite_spans": [],
472
- "ref_spans": [],
473
- "eq_spans": [],
474
- "section": "How to Recognize Morpho-fragments",
475
- "sec_num": "4.2"
476
- },
477
- {
478
- "text": "The languages are classified into two groups: segmented and non-segmented languages. \"Language type\" decides if every character in the language can be an MF. In non-segmented language every character can be an MF. In segmented language, punctuation marks and sequences of characters except for delimiters can be an MF.",
479
- "cite_spans": [],
480
- "ref_spans": [],
481
- "eq_spans": [],
482
- "section": "The language type:",
483
- "sec_num": "1."
484
- },
485
- {
486
- "text": "2. The set of the delimiters acting as boundaries: These act as boundaries of MFs. However, these can not be independent MFs (can not start nor end a lexeme). For example, white spaces are delimiters in segmented languages.",
487
- "cite_spans": [],
488
- "ref_spans": [],
489
- "eq_spans": [],
490
- "section": "The language type:",
491
- "sec_num": "1."
492
- },
493
- {
494
- "text": "3. The set of the punctuation marks and other symbols: These act as a boundary of MFs as well as an MF. Examples are an apostrophe in \"It's,\" a period in \"Mr.,\" and a hyphen in \"F-16.\"",
495
- "cite_spans": [],
496
- "ref_spans": [],
497
- "eq_spans": [],
498
- "section": "The language type:",
499
- "sec_num": "1."
500
- },
501
- {
502
- "text": "\u2022 Using these information, the process of recognizing MFs becomes simple and easy. The process can be implemented by a finite state machine or a simple pattern matcher.",
503
- "cite_spans": [],
504
- "ref_spans": [],
505
- "eq_spans": [],
506
- "section": "The language type:",
507
- "sec_num": "1."
508
- },
509
- {
510
- "text": "The following is the example of the definition for English: In incompletely-segmented languages, such as Korean, we have to consider two types of connection of lexemes, one is \"over a delimiter\" and the other is \"inside a segment\" (Hirano and Matsumoto, 1996) . If we regard delimiters as lexemes, a trigram model can make it possible to treat both types. The definition gives possible starting positions of MFs in sentences of the language and the same morphological analysis system is usable for any language.",
511
- "cite_spans": [
512
- {
513
- "start": 231,
514
- "end": 259,
515
- "text": "(Hirano and Matsumoto, 1996)",
516
- "ref_id": "BIBREF6"
517
- }
518
- ],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "The language type:",
522
- "sec_num": "1."
523
- },
524
- {
525
- "text": "We examined an effect of applying the morphofragments to analysis. Conditions of the experiment are almost the same as \"NOR.\" The difference is that we use the morpho-fragments definition for English. The row labeled \"MF\" in Table 1 shows the results of the analysis. Using the morpho-fragments decreases the analysis time drastically. The accuracy is also better than those of the naive approaches.",
526
- "cite_spans": [],
527
- "ref_spans": [
528
- {
529
- "start": 225,
530
- "end": 232,
531
- "text": "Table 1",
532
- "ref_id": null
533
- }
534
- ],
535
- "eq_spans": [],
536
- "section": "The language type:",
537
- "sec_num": "1."
538
- },
539
- {
540
- "text": "Well studied language such as English may have a hand tuned tokenizer which is superior to ours. However, to tune a tokenizer by hand is not suitable to implement many minor languages.",
541
- "cite_spans": [],
542
- "ref_spans": [],
543
- "eq_spans": [],
544
- "section": "The language type:",
545
- "sec_num": "1."
546
- },
547
- {
548
- "text": "We implement a language independent morphological analysis system based on the concept of morphofragments (Yamashita, 1999) . With an existence of tagged corpus, it is straightforward to implement part-of-speech taggers. We have implemented several of such taggers. The system uses an HMM. This is trained by a part-of-speech tagged corpus. We overview the setting and performance of tagging for several languages.",
549
- "cite_spans": [
550
- {
551
- "start": 106,
552
- "end": 123,
553
- "text": "(Yamashita, 1999)",
554
- "ref_id": "BIBREF20"
555
- }
556
- ],
557
- "ref_spans": [],
558
- "eq_spans": [],
559
- "section": "Implementation",
560
- "sec_num": "4.3"
561
- },
562
- {
563
- "text": "An HMM is trained by the part-of-speech tagged corpus part of Penn Treebank (Santorini, 1990) (1.3 million morphemes). We use a trigram model. The lexemes in the dictionary are taken from the corpus as well as from the entry words in Oxford Advanced Learner's Dictionary (Mitton, 1992) . The system achieves 97% precision and recall for training data, 95% precision and recall for test data.",
564
- "cite_spans": [
565
- {
566
- "start": 76,
567
- "end": 93,
568
- "text": "(Santorini, 1990)",
569
- "ref_id": "BIBREF18"
570
- },
571
- {
572
- "start": 271,
573
- "end": 285,
574
- "text": "(Mitton, 1992)",
575
- "ref_id": "BIBREF13"
576
- }
577
- ],
578
- "ref_spans": [],
579
- "eq_spans": [],
580
- "section": "English",
581
- "sec_num": null
582
- },
583
- {
584
- "text": "An HMM is trained by Japanese part-of-speech tagged corpus(Rea, 1998) (0.9 million morphemes).",
585
- "cite_spans": [],
586
- "ref_spans": [],
587
- "eq_spans": [],
588
- "section": "Japanese",
589
- "sec_num": null
590
- },
591
- {
592
- "text": "We use a trigram model. The lexemes in the dictionary are taken from the corpus as well as from the dictionary of ChaSen ), a freely available Japanese morphological analyzer. The system achieves 97% precision and recall for training and test data.",
593
- "cite_spans": [],
594
- "ref_spans": [],
595
- "eq_spans": [],
596
- "section": "Japanese",
597
- "sec_num": null
598
- },
599
- {
600
- "text": "An HMM is trained by the Chinese part-ofspeech tagged corpus released by CKIP(Chinese Knowledge Information Processing Group, 1995) (2.1 million morphemes). We use a bigram model. The lexemes in the dictionary are taken only from the corpus. The system achieves 95% precision and recall for training data, 91% precision and recall for test data.",
601
- "cite_spans": [],
602
- "ref_spans": [],
603
- "eq_spans": [],
604
- "section": "Chinese",
605
- "sec_num": null
606
- },
607
- {
608
- "text": "We address two problems of tokenization in segmented languages: further segmentation and roundup. These problems are discussed by several authors including Mills (Mills, 1998) Webster & Kit (Webster and Kit, 1992) . However, their proposed solutions are not language independent.",
609
- "cite_spans": [
610
- {
611
- "start": 162,
612
- "end": 175,
613
- "text": "(Mills, 1998)",
614
- "ref_id": "BIBREF12"
615
- },
616
- {
617
- "start": 190,
618
- "end": 213,
619
- "text": "(Webster and Kit, 1992)",
620
- "ref_id": "BIBREF19"
621
- }
622
- ],
623
- "ref_spans": [],
624
- "eq_spans": [],
625
- "section": "Related Work and Remarks",
626
- "sec_num": "5"
627
- },
628
- {
629
- "text": "To resolve the problems of tokenization, we first apply the method of non-segmented languages processing. However, this causes spurious segmentation ambiguity and a considerable influence in the analysis times. Therefore, we propose the concept of morpho-fragments that minimally comprises the lexemes in a language. Although the idea is quite simple, our approach avoids spurious ambiguity and attains an efficient look-up of a trie structured dictionary. In conclusion, the concept of morphofragments makes it easy to implemented language independent morphological analysis.",
630
- "cite_spans": [],
631
- "ref_spans": [],
632
- "eq_spans": [],
633
- "section": "Related Work and Remarks",
634
- "sec_num": "5"
635
- }
636
- ],
637
- "back_matter": [],
638
- "bib_entries": {
639
- "BIBREF0": {
640
- "ref_id": "b0",
641
- "title": "Chinese Knowledge Information Processing Group",
642
- "authors": [],
643
- "year": 1995,
644
- "venue": "",
645
- "volume": "",
646
- "issue": "",
647
- "pages": "",
648
- "other_ids": {},
649
- "num": null,
650
- "urls": [],
651
- "raw_text": "Chinese Knowledge Information Processing Group, 1995. ~ ~;",
652
- "links": null
653
- },
654
- "BIBREF2": {
655
- "ref_id": "b2",
656
- "title": "Lexical Analysis and Stoplists",
657
- "authors": [
658
- {
659
- "first": "Christopher",
660
- "middle": [],
661
- "last": "Fox",
662
- "suffix": ""
663
- }
664
- ],
665
- "year": 1992,
666
- "venue": "",
667
- "volume": "",
668
- "issue": "",
669
- "pages": "102--130",
670
- "other_ids": {},
671
- "num": null,
672
- "urls": [],
673
- "raw_text": "Christopher Fox, 1992. Lexical Analysis and Sto- plists, chapter 7, pages 102-130. In Frakes and Baeza-Yates (Frakes and Baeza-Yates, 1992).",
674
- "links": null
675
- },
676
- "BIBREF3": {
677
- "ref_id": "b3",
678
- "title": "Information Retrieval: Data Structures ~ Algorithms",
679
- "authors": [
680
- {
681
- "first": "B",
682
- "middle": [],
683
- "last": "William",
684
- "suffix": ""
685
- },
686
- {
687
- "first": "Ricardo",
688
- "middle": [
689
- "A"
690
- ],
691
- "last": "Frakes",
692
- "suffix": ""
693
- },
694
- {
695
- "first": "",
696
- "middle": [],
697
- "last": "Baeza-Yates",
698
- "suffix": ""
699
- }
700
- ],
701
- "year": 1992,
702
- "venue": "",
703
- "volume": "",
704
- "issue": "",
705
- "pages": "",
706
- "other_ids": {},
707
- "num": null,
708
- "urls": [],
709
- "raw_text": "William B. Frakes and Ricardo A. Baeza-Yates, ed- itors. 1992. Information Retrieval: Data Struc- tures ~ Algorithms. Prentice-Hall.",
710
- "links": null
711
- },
712
- "BIBREF4": {
713
- "ref_id": "b4",
714
- "title": "Trie memory",
715
- "authors": [
716
- {
717
- "first": "Edward",
718
- "middle": [],
719
- "last": "Fredkin",
720
- "suffix": ""
721
- }
722
- ],
723
- "year": 1960,
724
- "venue": "Communications of the ACM",
725
- "volume": "3",
726
- "issue": "9",
727
- "pages": "490--500",
728
- "other_ids": {},
729
- "num": null,
730
- "urls": [],
731
- "raw_text": "Edward Fredkin. 1960. Trie memory. Communica- tions of the ACM, 3(9):490-500, September.",
732
- "links": null
733
- },
734
- "BIBREF5": {
735
- "ref_id": "b5",
736
- "title": "Critical tokenization and its properties",
737
- "authors": [
738
- {
739
- "first": "Jin",
740
- "middle": [],
741
- "last": "Guo",
742
- "suffix": ""
743
- }
744
- ],
745
- "year": 1997,
746
- "venue": "Computational Linguistics",
747
- "volume": "23",
748
- "issue": "4",
749
- "pages": "569--596",
750
- "other_ids": {},
751
- "num": null,
752
- "urls": [],
753
- "raw_text": "Jin Guo. 1997. Critical tokenization and its prop- erties. Computational Linguistics, 23(4):569-596, December.",
754
- "links": null
755
- },
756
- "BIBREF6": {
757
- "ref_id": "b6",
758
- "title": "A proposal of korean conjugation system and its application to morphological analysis",
759
- "authors": [
760
- {
761
- "first": "Yoshitaka",
762
- "middle": [],
763
- "last": "Hirano",
764
- "suffix": ""
765
- },
766
- {
767
- "first": "Yuji",
768
- "middle": [],
769
- "last": "Matsumoto",
770
- "suffix": ""
771
- }
772
- ],
773
- "year": 1996,
774
- "venue": "Proceedings of the 11th Pacific Asia Conference on Language, Information and Computation (PACLIC 11)",
775
- "volume": "",
776
- "issue": "",
777
- "pages": "229--236",
778
- "other_ids": {},
779
- "num": null,
780
- "urls": [],
781
- "raw_text": "Yoshitaka Hirano and Yuji Matsumoto. 1996. A proposal of korean conjugation system and its ap- plication to morphological analysis. In Proceed- ings of the 11th Pacific Asia Conference on Lan- guage, Information and Computation (PACLIC 11), pages 229-236, December.",
782
- "links": null
783
- },
784
- "BIBREF7": {
785
- "ref_id": "b7",
786
- "title": "The Art of Computer Programming : Sorting and Searching",
787
- "authors": [
788
- {
789
- "first": "E",
790
- "middle": [],
791
- "last": "Donald",
792
- "suffix": ""
793
- },
794
- {
795
- "first": "",
796
- "middle": [],
797
- "last": "Knuth",
798
- "suffix": ""
799
- }
800
- ],
801
- "year": 1998,
802
- "venue": "",
803
- "volume": "3",
804
- "issue": "",
805
- "pages": "",
806
- "other_ids": {},
807
- "num": null,
808
- "urls": [],
809
- "raw_text": "Donald E. Knuth. 1998. The Art of Computer Pro- gramming : Sorting and Searching, volume 3. Addison-Wesley, second edition, May.",
810
- "links": null
811
- },
812
- "BIBREF8": {
813
- "ref_id": "b8",
814
- "title": "Computational Analysis of Present-Day American English",
815
- "authors": [
816
- {
817
- "first": "Henry",
818
- "middle": [],
819
- "last": "Ku~era",
820
- "suffix": ""
821
- },
822
- {
823
- "first": "W. Nelson",
824
- "middle": [],
825
- "last": "Francis",
826
- "suffix": ""
827
- }
828
- ],
829
- "year": 1967,
830
- "venue": "",
831
- "volume": "",
832
- "issue": "",
833
- "pages": "",
834
- "other_ids": {},
835
- "num": null,
836
- "urls": [],
837
- "raw_text": "Henry Ku~era and W. Nelson Francis. 1967. Com- putational Analysis of Present-Day American En- glish. Brown University Press.",
838
- "links": null
839
- },
840
- "BIBREF9": {
841
- "ref_id": "b9",
842
- "title": "Foundations of Statistical Natural Language Processing",
843
- "authors": [
844
- {
845
- "first": "D",
846
- "middle": [],
847
- "last": "Christopher",
848
- "suffix": ""
849
- },
850
- {
851
- "first": "Hinrich",
852
- "middle": [],
853
- "last": "Manning",
854
- "suffix": ""
855
- },
856
- {
857
- "first": "",
858
- "middle": [],
859
- "last": "Schiitze",
860
- "suffix": ""
861
- }
862
- ],
863
- "year": 1999,
864
- "venue": "",
865
- "volume": "",
866
- "issue": "",
867
- "pages": "",
868
- "other_ids": {},
869
- "num": null,
870
- "urls": [],
871
- "raw_text": "Christopher D. Manning and Hinrich Schiitze. 1999. Foundations of Statistical Natural Language Pro- cessing. The MIT Press.",
872
- "links": null
873
- },
874
- "BIBREF10": {
875
- "ref_id": "b10",
876
- "title": "Japanese Morphological Analysis System ChaSen version",
877
- "authors": [
878
- {
879
- "first": "Yuji",
880
- "middle": [],
881
- "last": "Matsumoto",
882
- "suffix": ""
883
- },
884
- {
885
- "first": "Akira",
886
- "middle": [],
887
- "last": "Kitauchi",
888
- "suffix": ""
889
- },
890
- {
891
- "first": "Tatsuo",
892
- "middle": [],
893
- "last": "Yamashita",
894
- "suffix": ""
895
- },
896
- {
897
- "first": "Yoshitaka",
898
- "middle": [],
899
- "last": "Hirano",
900
- "suffix": ""
901
- }
902
- ],
903
- "year": 1999,
904
- "venue": "",
905
- "volume": "",
906
- "issue": "",
907
- "pages": "",
908
- "other_ids": {},
909
- "num": null,
910
- "urls": [],
911
- "raw_text": "Yuji Matsumoto, Akira Kitauchi, Tatsuo Yamashita, and Yoshitaka Hirano, 1999. Japanese Mor- phological Analysis System ChaSen version 2.0",
912
- "links": null
913
- },
914
- "BIBREF12": {
915
- "ref_id": "b12",
916
- "title": "Lexicon based critical tokenization: An algorithm",
917
- "authors": [
918
- {
919
- "first": "Jon",
920
- "middle": [],
921
- "last": "Mills",
922
- "suffix": ""
923
- }
924
- ],
925
- "year": 1998,
926
- "venue": "Euralex'98",
927
- "volume": "",
928
- "issue": "",
929
- "pages": "213--220",
930
- "other_ids": {},
931
- "num": null,
932
- "urls": [],
933
- "raw_text": "Jon Mills. 1998. Lexicon based critical tokenization: An algorithm. In Euralex'98, pages 213-220, Au- gust.",
934
- "links": null
935
- },
936
- "BIBREF13": {
937
- "ref_id": "b13",
938
- "title": "A Description of A Computer-Usable Dictionary File Based on The Oxford Advanced Learner's Dictionary of Current English",
939
- "authors": [
940
- {
941
- "first": "Roger",
942
- "middle": [],
943
- "last": "Mitton",
944
- "suffix": ""
945
- }
946
- ],
947
- "year": 1992,
948
- "venue": "",
949
- "volume": "",
950
- "issue": "",
951
- "pages": "",
952
- "other_ids": {},
953
- "num": null,
954
- "urls": [],
955
- "raw_text": "Roger Mitton, 1992. A Description of A Computer- Usable Dictionary File Based on The Oxford Ad- vanced Learner's Dictionary of Current English.",
956
- "links": null
957
- },
958
- "BIBREF14": {
959
- "ref_id": "b14",
960
- "title": "Two trie structures for natural language dictionaries",
961
- "authors": [
962
- {
963
- "first": "K",
964
- "middle": [],
965
- "last": "Morimoto",
966
- "suffix": ""
967
- },
968
- {
969
- "first": "J",
970
- "middle": [],
971
- "last": "Aoe",
972
- "suffix": ""
973
- }
974
- ],
975
- "year": 1993,
976
- "venue": "Proceedings of Natural Language Processing Pacific Rim Symposium",
977
- "volume": "",
978
- "issue": "",
979
- "pages": "302--311",
980
- "other_ids": {},
981
- "num": null,
982
- "urls": [],
983
- "raw_text": "K. Morimoto and J. Aoe. 1993. Two trie structures for natural language dictionaries. In Proceedings of Natural Language Processing Pacific Rim Sym- posium, pages 302-311.",
984
- "links": null
985
- },
986
- "BIBREF15": {
987
- "ref_id": "b15",
988
- "title": "A stochastic japanese morphological analyzer using a forward-dp backwarda* n-best search algorithm",
989
- "authors": [
990
- {
991
- "first": "Masaaki",
992
- "middle": [],
993
- "last": "Nagata",
994
- "suffix": ""
995
- }
996
- ],
997
- "year": 1994,
998
- "venue": "COLING-9$",
999
- "volume": "1",
1000
- "issue": "",
1001
- "pages": "201--207",
1002
- "other_ids": {},
1003
- "num": null,
1004
- "urls": [],
1005
- "raw_text": "Masaaki Nagata. 1994. A stochastic japanese mor- phological analyzer using a forward-dp backward- a* n-best search algorithm. In COLING-9$, vol- ume 1, pages 201-207, August.",
1006
- "links": null
1007
- },
1008
- "BIBREF16": {
1009
- "ref_id": "b16",
1010
- "title": "A part of speech estimation method for japanese unknown words using a statistical model of morphology and context",
1011
- "authors": [
1012
- {
1013
- "first": "Masaaki",
1014
- "middle": [],
1015
- "last": "Nagata",
1016
- "suffix": ""
1017
- }
1018
- ],
1019
- "year": 1999,
1020
- "venue": "37th Annual Meeting of the Association for Computational Linguistics, Proceedings of the Conference",
1021
- "volume": "",
1022
- "issue": "",
1023
- "pages": "277--284",
1024
- "other_ids": {},
1025
- "num": null,
1026
- "urls": [],
1027
- "raw_text": "Masaaki Nagata. 1999. A part of speech estimation method for japanese unknown words using a sta- tistical model of morphology and context. In 37th Annual Meeting of the Association for Computa- tional Linguistics, Proceedings of the Conference, pages 277-284, June.",
1028
- "links": null
1029
- },
1030
- "BIBREF17": {
1031
- "ref_id": "b17",
1032
- "title": "Adaptive multilingual sentence boundary disambiguation",
1033
- "authors": [
1034
- {
1035
- "first": "D",
1036
- "middle": [],
1037
- "last": "David",
1038
- "suffix": ""
1039
- },
1040
- {
1041
- "first": "Marti",
1042
- "middle": [
1043
- "A"
1044
- ],
1045
- "last": "Palmer",
1046
- "suffix": ""
1047
- },
1048
- {
1049
- "first": "",
1050
- "middle": [],
1051
- "last": "Hearst",
1052
- "suffix": ""
1053
- }
1054
- ],
1055
- "year": 1997,
1056
- "venue": "June. Real World Computing Partnership",
1057
- "volume": "23",
1058
- "issue": "2",
1059
- "pages": "241--267",
1060
- "other_ids": {},
1061
- "num": null,
1062
- "urls": [],
1063
- "raw_text": "David D. Palmer and Marti A. Hearst. 1997. Adap- tive multilingual sentence boundary disambigua- tion. Computational Linguistics, 23(2):241-267, June. Real World Computing Partnership, 1998. RWC Text Database Report. in Japanese.",
1064
- "links": null
1065
- },
1066
- "BIBREF18": {
1067
- "ref_id": "b18",
1068
- "title": "Part-of-Speech Tagging Guidelines for the Penn Treebank Project",
1069
- "authors": [
1070
- {
1071
- "first": "Beatrice",
1072
- "middle": [],
1073
- "last": "Santorini",
1074
- "suffix": ""
1075
- }
1076
- ],
1077
- "year": 1990,
1078
- "venue": "",
1079
- "volume": "",
1080
- "issue": "",
1081
- "pages": "",
1082
- "other_ids": {},
1083
- "num": null,
1084
- "urls": [],
1085
- "raw_text": "Beatrice Santorini, 1990. Part-of-Speech Tagging Guidelines for the Penn Treebank Project (3rd Re- vision, 2nd printing), June.",
1086
- "links": null
1087
- },
1088
- "BIBREF19": {
1089
- "ref_id": "b19",
1090
- "title": "Tokenization as the initial phase in nip",
1091
- "authors": [
1092
- {
1093
- "first": "Jonathan",
1094
- "middle": [
1095
- "J"
1096
- ],
1097
- "last": "Webster",
1098
- "suffix": ""
1099
- },
1100
- {
1101
- "first": "Chunyu",
1102
- "middle": [],
1103
- "last": "Kit",
1104
- "suffix": ""
1105
- }
1106
- ],
1107
- "year": 1992,
1108
- "venue": "COLING-92",
1109
- "volume": "4",
1110
- "issue": "",
1111
- "pages": "1106--1110",
1112
- "other_ids": {},
1113
- "num": null,
1114
- "urls": [],
1115
- "raw_text": "Jonathan J. Webster and Chunyu Kit. 1992. Tok- enization as the initial phase in nip. In COLING- 92, volume 4, pages 1106-1110, August.",
1116
- "links": null
1117
- },
1118
- "BIBREF20": {
1119
- "ref_id": "b20",
1120
- "title": "MOZ and LimaTK Manual NAIST Computational Linguistics Laboratory",
1121
- "authors": [
1122
- {
1123
- "first": "Tatsuo",
1124
- "middle": [],
1125
- "last": "Yamashita",
1126
- "suffix": ""
1127
- }
1128
- ],
1129
- "year": 1999,
1130
- "venue": "",
1131
- "volume": "",
1132
- "issue": "",
1133
- "pages": "",
1134
- "other_ids": {},
1135
- "num": null,
1136
- "urls": [],
1137
- "raw_text": "Tatsuo Yamashita, 1999. MOZ and LimaTK Man- ual NAIST Computational Linguistics Labora- tory, <http://cl.aist-nara.ac.jp/-tatuo-y/ma/>, August. in Japanese.",
1138
- "links": null
1139
- },
1140
- "BIBREF21": {
1141
- "ref_id": "b21",
1142
- "title": "Language independent tools for natural language processing",
1143
- "authors": [
1144
- {
1145
- "first": "Tatsuo",
1146
- "middle": [],
1147
- "last": "Ymashita",
1148
- "suffix": ""
1149
- },
1150
- {
1151
- "first": "Msakazu",
1152
- "middle": [],
1153
- "last": "Fujio",
1154
- "suffix": ""
1155
- },
1156
- {
1157
- "first": "Yuji",
1158
- "middle": [],
1159
- "last": "Matsumoto",
1160
- "suffix": ""
1161
- }
1162
- ],
1163
- "year": 1999,
1164
- "venue": "Proceedings of the Eighteenth International Conference on Computer Processing",
1165
- "volume": "",
1166
- "issue": "",
1167
- "pages": "237--240",
1168
- "other_ids": {},
1169
- "num": null,
1170
- "urls": [],
1171
- "raw_text": "Tatsuo Ymashita, Msakazu Fujio, and Yuji Mat- sumoto. 1999. Language independent tools for natural language processing. In Proceedings of the Eighteenth International Conference on Computer Processing, pages 237-240, March.",
1172
- "links": null
1173
- }
1174
- },
1175
- "ref_entries": {
1176
- "FIGREF0": {
1177
- "uris": null,
1178
- "type_str": "figure",
1179
- "text": "Decomposition of Sentence in English Sentence (He is home for holiday.) Characters [~[ ~ [z~k[ ~t \"C\" I')~ I~\" I L t \"C [ ~ I ~ ] o Morphofragments V I'J I I L. I I I I o",
1180
- "num": null
1181
- },
1182
- "FIGREF1": {
1183
- "uris": null,
1184
- "type_str": "figure",
1185
- "text": "Decomposition of Sentence in Japanese \"Graphic Words\" in",
1186
- "num": null
1187
- },
1188
- "FIGREF2": {
1189
- "uris": null,
1190
- "type_str": "figure",
1191
- "text": "Figure 3: Japanese Trie Structured Dictionary",
1192
- "num": null
1193
- },
1194
- "FIGREF3": {
1195
- "uris": null,
1196
- "type_str": "figure",
1197
- "text": "my little brother.) L~--~ ~ot I ~c~. (I go to school.) -~g[~ ~ $ b .~ ~) o (Let's go to school.) English Trie Structured Dictionary",
1198
- "num": null
1199
- },
1200
- "TABREF0": {
1201
- "num": null,
1202
- "type_str": "table",
1203
- "html": null,
1204
- "content": "<table><tr><td colspan=\"2\">Sentence Dr. Characters</td></tr><tr><td>Graphic Words</td><td>[Drl I ]Lee I land I IJohn'sl Isonl ]go I Itol Ithel ]McDonald's[ linl ]Newl IYork].l</td></tr><tr><td>Lexemes</td><td>[Dr.[ ILee I landl ]John]'s[ Isonl Igo] Itol ]thel IScDonald'sl ]inl tNew York].l</td></tr><tr><td>Morpho-fragments</td><td>IDr['[ ILeel landl [Johnl'lsl Isonl Igol Itol Ithel ]McDonaldl'[s] linl [Newl IYork].l</td></tr><tr><td/><td>).</td></tr><tr><td/><td>Conventionally, in segmented languages, an ana-</td></tr><tr><td/><td>lyzer converts a stream of characters into graphic</td></tr><tr><td/><td>words (see the rows labeled \"Characters\" and</td></tr></table>",
1205
- "text": "Lee and John's son go to the McDonald's in New York."
1206
- }
1207
- }
1208
- }
1209
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1033.json DELETED
@@ -1,964 +0,0 @@
1
- {
2
- "paper_id": "A00-1033",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:15.460269Z"
6
- },
7
- "title": "A Divide-and-Conquer Strategy for Shallow Parsing of German Free Texts",
8
- "authors": [
9
- {
10
- "first": "Giinter",
11
- "middle": [],
12
- "last": "Neumann",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "neumann@dfki@de"
16
- },
17
- {
18
- "first": "Christian",
19
- "middle": [],
20
- "last": "Braun",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- },
25
- {
26
- "first": "Jakub",
27
- "middle": [],
28
- "last": "Piskorski",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": "piskorsk@dfki@de"
32
- }
33
- ],
34
- "year": "",
35
- "venue": null,
36
- "identifiers": {},
37
- "abstract": "We present a divide-and-conquer strategy based on finite state technology for shallow parsing of realworld German texts. In a first phase only the topological structure of a sentence (i.e., verb groups, subclauses) are determined. In a second phase the phrasal grammars are applied to the contents of the different fields of the main and sub-clauses. Shallow parsing is supported by suitably configured preprocessing, including: morphological and on-line compound analysis, efficient POS-filtering, and named entity recognition. The whole approach proved to be very useful for processing of free word order languages like German. Especially for the divide-andconquer parsing strategy we obtained an f-measure of 87.14% on unseen data.",
38
- "pdf_parse": {
39
- "paper_id": "A00-1033",
40
- "_pdf_hash": "",
41
- "abstract": [
42
- {
43
- "text": "We present a divide-and-conquer strategy based on finite state technology for shallow parsing of realworld German texts. In a first phase only the topological structure of a sentence (i.e., verb groups, subclauses) are determined. In a second phase the phrasal grammars are applied to the contents of the different fields of the main and sub-clauses. Shallow parsing is supported by suitably configured preprocessing, including: morphological and on-line compound analysis, efficient POS-filtering, and named entity recognition. The whole approach proved to be very useful for processing of free word order languages like German. Especially for the divide-andconquer parsing strategy we obtained an f-measure of 87.14% on unseen data.",
44
- "cite_spans": [],
45
- "ref_spans": [],
46
- "eq_spans": [],
47
- "section": "Abstract",
48
- "sec_num": null
49
- }
50
- ],
51
- "body_text": [
52
- {
53
- "text": "Current information extraction (IE) systems are quite successful in efficient processing of large free text collections due to the fact that they can provide a partial understanding of specific types of text with a certain degree of partial accuracy using fast and robust language processing strategies (basically finite state technology). They have been \"made sensitive\" to certain key pieces of information and thereby provide an easy means to skip text without deep analysis. The majority of existing IE systems are applied to English text, but there are now a number of systems which process other languages as well (e.g., German (Neumann et al., 1997) , Italian (Ciravegna et al., 1999) or Japanese (Sekine and Nobata, 1998) ). The majority of current systems perform a partial parsing approach using only very few general syntactic knowledge for the identification of nominal and prepositional phrases and verb groups. The combination of such units is then performed by means of domain-specific templates. Usually, these templates are triggered by domain-specific predicates attached only to a relevant subset of verbs which express domain-specific selectional restrictions for possible argument fillers.",
54
- "cite_spans": [
55
- {
56
- "start": 634,
57
- "end": 656,
58
- "text": "(Neumann et al., 1997)",
59
- "ref_id": "BIBREF5"
60
- },
61
- {
62
- "start": 667,
63
- "end": 691,
64
- "text": "(Ciravegna et al., 1999)",
65
- "ref_id": "BIBREF2"
66
- },
67
- {
68
- "start": 704,
69
- "end": 729,
70
- "text": "(Sekine and Nobata, 1998)",
71
- "ref_id": "BIBREF10"
72
- }
73
- ],
74
- "ref_spans": [],
75
- "eq_spans": [],
76
- "section": "Introduction",
77
- "sec_num": "1"
78
- },
79
- {
80
- "text": "In most of the well-known shallow text processing systems (cf. (Sundheim, 1995) and (SAIC, 1998) ) cascaded chunk parsers are used which perform clause recognition after fragment recognition following a bottom-up style as described in (Abne), 1996) . We have also developed a similar bottomup strategy for the processing of German texts, cf. (Neumann et al., 1997) . However, the main problem we experienced using the bottom-up strategy was insufficient robustness: because the parser depends on the lower phrasal recognizers, its performance is heavily influenced by their respective performance. As a consequence, the parser frequently wasn't able to process structurally simple sentences, because they contained, for example, highly complex nominal phrases, as in the following example: \"[Die vom Bundesgerichtshof und den Wettbewerbshfitern als Verstofi gegen das Kartellverbot gegeiflelte zentrale TV-Vermarktung] ist g~ngige Praxis.\"",
81
- "cite_spans": [
82
- {
83
- "start": 58,
84
- "end": 79,
85
- "text": "(cf. (Sundheim, 1995)",
86
- "ref_id": null
87
- },
88
- {
89
- "start": 84,
90
- "end": 96,
91
- "text": "(SAIC, 1998)",
92
- "ref_id": "BIBREF9"
93
- },
94
- {
95
- "start": 235,
96
- "end": 248,
97
- "text": "(Abne), 1996)",
98
- "ref_id": null
99
- },
100
- {
101
- "start": 342,
102
- "end": 364,
103
- "text": "(Neumann et al., 1997)",
104
- "ref_id": "BIBREF5"
105
- }
106
- ],
107
- "ref_spans": [],
108
- "eq_spans": [],
109
- "section": "Introduction",
110
- "sec_num": "1"
111
- },
112
- {
113
- "text": "During free text processing it might be not possible (or even desirable) to recognize such a phrase completely. However, if we assume that domain-specific templates are associated with certain verbs or verb groups which trigger template filling, then it will be very difficult to find the appropriate fillers without knowing the correct clause structure. Furthermore in a sole bottom-up approach some ambiguities -for example relative pronouns -can't be resolved without introducing much underspecification into the intermediate structures.",
114
- "cite_spans": [],
115
- "ref_spans": [],
116
- "eq_spans": [],
117
- "section": "Central television raarketing, censured by the German Federal High Court and the guards against unfair competition as an infringement of anti-cartel legislation, is common practice.",
118
- "sec_num": null
119
- },
120
- {
121
- "text": "Therefore we propose the following divide-andconquer parsing strategy: In a first phase only the verb groups and the topological structure of a sentence according to the linguistic field the- \"[CooraS [sse,,* ..",
122
- "cite_spans": [
123
- {
124
- "start": 192,
125
- "end": 208,
126
- "text": "\"[CooraS [sse,,*",
127
- "ref_id": null
128
- }
129
- ],
130
- "ref_spans": [],
131
- "eq_spans": [],
132
- "section": "Central television raarketing, censured by the German Federal High Court and the guards against unfair competition as an infringement of anti-cartel legislation, is common practice.",
133
- "sec_num": null
134
- },
135
- {
136
- "text": "Figure 2: Overview of the system's architecture. ory (cf. (Engel, 1988) ) are determined domainindependently. In a second phase, general (as well as domain-specific) phrasal grammars (nominal and prepositional phrases) are applied to the contents of the different fields of the main and sub-clauses (see",
137
- "cite_spans": [
138
- {
139
- "start": 58,
140
- "end": 71,
141
- "text": "(Engel, 1988)",
142
- "ref_id": "BIBREF3"
143
- }
144
- ],
145
- "ref_spans": [],
146
- "eq_spans": [],
147
- "section": "Underspeclfied dependency trees",
148
- "sec_num": null
149
- },
150
- {
151
- "text": "This approach offers several advantages:",
152
- "cite_spans": [],
153
- "ref_spans": [],
154
- "eq_spans": [],
155
- "section": "fig. 1)",
156
- "sec_num": null
157
- },
158
- {
159
- "text": "\u2022 improved robustness, because parsing of the sentence topology is based only on simple indicators like verbgroups and conjunctions and their interplay,",
160
- "cite_spans": [],
161
- "ref_spans": [],
162
- "eq_spans": [],
163
- "section": "fig. 1)",
164
- "sec_num": null
165
- },
166
- {
167
- "text": "\u2022 the resolution of some ambiguities, including relative pronouns vs. determiner, sub junction vs. preposition and sentence coordination vs. NP coordination, and",
168
- "cite_spans": [],
169
- "ref_spans": [],
170
- "eq_spans": [],
171
- "section": "fig. 1)",
172
- "sec_num": null
173
- },
174
- {
175
- "text": "\u2022 a high degree of modularity (easy integration of domain-dependent subcomponents).",
176
- "cite_spans": [],
177
- "ref_spans": [],
178
- "eq_spans": [],
179
- "section": "fig. 1)",
180
- "sec_num": null
181
- },
182
- {
183
- "text": "The shallow divide-and-conquer parser (DC-PARSER) is supported by means of powerful morphological processing (including on-line compound analysis), efficient POS-filtering and named entity recognition. Thus the architecture of the complete shallow text processing approach consists basically of two main components: the preprocessor and the DC-PARSER itself (see fig. 2 ).",
184
- "cite_spans": [],
185
- "ref_spans": [
186
- {
187
- "start": 363,
188
- "end": 369,
189
- "text": "fig. 2",
190
- "ref_id": null
191
- }
192
- ],
193
- "eq_spans": [],
194
- "section": "fig. 1)",
195
- "sec_num": null
196
- },
197
- {
198
- "text": "The DC-PARSER relies on a suitably configured preprocessing strategy in order to achieve the desired simplicity and performance. It consists of the following main steps:",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "Preprocessor",
203
- "sec_num": "2"
204
- },
205
- {
206
- "text": "Tokenization The tokenizer maps sequences of consecutive characters into larger units called tokens and identifies their types. Currently we use more than 50 domain-independent token classes including generic classes for semantically ambiguous tokens (e.g., \"10:15\" could be a time expression or volleyball result, hence we classify this token as numberdot compound) and complex classes like abbreviations or complex compounds (e.g., \"AT&T-Chief\"). It proved that such variety of token classes simplifies the processing of subsequent submodules significantly.",
207
- "cite_spans": [],
208
- "ref_spans": [],
209
- "eq_spans": [],
210
- "section": "Preprocessor",
211
- "sec_num": "2"
212
- },
213
- {
214
- "text": "Morphology Each token identified as a potential wordform is submitted to the morphological analysis including on-line recognition of compounds (which is crucial since compounding is a very productive process of the German language) and hyphen coordination (e.g., in \"An-und Verkauf\" (purchase and sale) \"An-\" is resolved to \"Ankauf\" (purchase)). Each token recognized as a valid word form is associated with the list of its possible readings, characterized by stem, inflection information and part of speech category.",
215
- "cite_spans": [],
216
- "ref_spans": [],
217
- "eq_spans": [],
218
- "section": "Preprocessor",
219
- "sec_num": "2"
220
- },
221
- {
222
- "text": "POS-filtering Since a high amount of German word forms is ambiguous, especially word forms with a verb reading 1 and due to the fact that the quality of the results of the DC-PARSER relies essentially on the proper recognition of verb groups, efficient disambiguation strategies are needed. Using case-sensitive rules is straightforward since generally only nouns (and proper names) are written in standard German with a capitalized initial letter (e.g., \"das Unternehmen\" -the enterprise vs. \"wir unternehmen\" -we undertake). However for disambiguation of word forms appearing at the beginning of the sentence local contextual filtering rules are applied. For instance, the rule which forbids the verb written with a capitalized initial letter to be followed by a finite verb would filter out the verb reading of the word \"unternehmen\" in the sentence 130% of the wordforms in the test corpus \"Wirtschaftswoche\" (business news journal), which have a verb reading, turned to have at least one other non-verb reading.",
223
- "cite_spans": [],
224
- "ref_spans": [],
225
- "eq_spans": [],
226
- "section": "Preprocessor",
227
- "sec_num": "2"
228
- },
229
- {
230
- "text": "\"Unternehmen sind an Gewinnmaximierung interesiert.\" (Enterprises are interested in maximizing their profits). A major subclass of ambiguous wordforms are those which have an adjective or attributivly used participle reading beside the verb reading. For instance, in the sentence \"Sie bekannten, die bekannten Bilder gestohlen zu haben.\" (They confessed they have stolen the famous paintings.) the wordform \"bekannten\" is firstly used as a verb (confessed) and secondly as an adjective (famous). Since adjectives and attributively used participles are in most cases part of a nominal phrase a convenient rule would reject the verb reading if the previous word form is a determiner or the next word form is a noun. It is important to notice that such rules are based on some regularities, but they may yield false results, like for instance the rule for filtering out the verb reading of some word forms extremely rarely used as verbs (e.g., \"recht\" -right, to rake (3rd person,sg)). All rules are compiled into a single finite-state transducer according to the approach described in (Roche and Schabes, 1995) . 2 Named entity finder Named entities such as organizations, persons, locations and time expressions are identified using finite-state grammars. Since some named entities (e.g. company names) may appear in the text either with or without a designator, we use a dynamic lexicon to store recognized named entities without their designators (e.g., \"Braun AG\" vs. \"Braun\") in order to identify subsequent occurrences correctly. However a named entity, consisting solely of one word, may be also a valid word form (e.g., \"Braun\" -brown). Hence we classify such words as candidates for named entities since generally such ambiguities cannot be resolved at this level. Recognition of named entities could be postponed and integrated into the fragment recognizer, but performing this task at this stage of processing seems to be more appropriate. Firstly because the results of POS-filtering could be partially verified and improved and secondly the amount of the word forms to be processed by subsequent modules could be considerably reduced. For instance the verb reading of the word form \"achten\" (watch vs. eight) in the time expression \"am achten Oktober 1995\" (at the eight of the October 1995) could be filtered out if not done yet.",
231
- "cite_spans": [
232
- {
233
- "start": 1083,
234
- "end": 1108,
235
- "text": "(Roche and Schabes, 1995)",
236
- "ref_id": "BIBREF8"
237
- }
238
- ],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Preprocessor",
242
- "sec_num": "2"
243
- },
244
- {
245
- "text": "The DC-PARSER consists of two major domainindependent modules based on finite state technol-2The manually constructed rules proved to be a useful means for disambiguation, however not sufficient enough to filter out all unplausible readings. Hence supplementary rules determined by Brill's tagger were used in order to achieve broader coverage. ogy: 1) construction of the topological sentence structure, and 2) application of phrasal grammars on each determined subclause (see also fig. 3 ). In this paper we will concentrate on the first step, because it is the more novel part of the DC-PARSER, and will only briefly describe the second step in section 3.2.",
246
- "cite_spans": [],
247
- "ref_spans": [
248
- {
249
- "start": 483,
250
- "end": 489,
251
- "text": "fig. 3",
252
- "ref_id": null
253
- }
254
- ],
255
- "eq_spans": [],
256
- "section": "A Shallow Divide-and-Conquer Strategy",
257
- "sec_num": "3"
258
- },
259
- {
260
- "text": "The DC-PARSER applies cascades of finite-state grammars to the stream of tokens and named entitles delivered by the preprocessor in order to determine the topological structure of the sentence according to the linguistic field theory (Engel, 1988) . 3",
261
- "cite_spans": [
262
- {
263
- "start": 234,
264
- "end": 247,
265
- "text": "(Engel, 1988)",
266
- "ref_id": "BIBREF3"
267
- }
268
- ],
269
- "ref_spans": [],
270
- "eq_spans": [],
271
- "section": "Topological structure",
272
- "sec_num": "3.1"
273
- },
274
- {
275
- "text": "Based on the fact that in German a verb group (like \"h~tte fiberredet werden mfissen\" --*have convinced been should meaning should have been convinced) can be split into a left and a right verb part (\"h\u00a3tte\" and \"fiberredet werden miissen\") these parts (abbreviated as LVP and RVP) are used for the segmentation of a main sentence into several parts: the front field (VF), the left verb part, middle field (MF), right verb part, and rest field (RF). Subclauses can also be expressed in that way such that the left\" verb part is either empty or occupied by a relative pronoun or a sub junction element, and the complete verb group is placed in the right verb part, cf. figure 3. Note that each separated field can be arbitrarily complex with very few restrictions on the ordering of the phrases inside a field.",
276
- "cite_spans": [],
277
- "ref_spans": [],
278
- "eq_spans": [],
279
- "section": "Topological structure",
280
- "sec_num": "3.1"
281
- },
282
- {
283
- "text": "Recognition of the topological structure of a sentence can be described by the following four phases realized as cascade of finite state grammars (see also fig. 2; fig. 4 shows the different steps in action). 4 Initially, the stream of tokens and named entities is separated into a list of sentences based on punctuation signs. 5",
284
- "cite_spans": [],
285
- "ref_spans": [
286
- {
287
- "start": 156,
288
- "end": 170,
289
- "text": "fig. 2; fig. 4",
290
- "ref_id": null
291
- }
292
- ],
293
- "eq_spans": [],
294
- "section": "Topological structure",
295
- "sec_num": "3.1"
296
- },
297
- {
298
- "text": "Verb groups A verb grammar recognizes all single occurrences of verbforms (in most cases corresponding to LVP) and all closed verbgroups (i.e., sequences of verbforms, corresponding to RVP). The parts of discontinuous verb groups (e.g., separated LvP and RVP or separated verbs and verb-prefixes) cannot be put together at that step of processing because one needs contextual information which will only be available in the next steps. The major problem at this phase is not a structural one but the 3Details concerning the implementation of the topological parsing strategy can be found in (Braun, 1999) . Details concerning the representation and compilation of the used finite state machinery can be found in (Neumann et al., 1997) 4In this paper we can give only a brief overview of the current coverage of the individual steps. An exhaustive description of the covered phenomena can be found in (Braun, 1999) .",
299
- "cite_spans": [
300
- {
301
- "start": 591,
302
- "end": 604,
303
- "text": "(Braun, 1999)",
304
- "ref_id": "BIBREF1"
305
- },
306
- {
307
- "start": 712,
308
- "end": 734,
309
- "text": "(Neumann et al., 1997)",
310
- "ref_id": "BIBREF5"
311
- },
312
- {
313
- "start": 900,
314
- "end": 913,
315
- "text": "(Braun, 1999)",
316
- "ref_id": "BIBREF1"
317
- }
318
- ],
319
- "ref_spans": [],
320
- "eq_spans": [],
321
- "section": "Topological structure",
322
- "sec_num": "3.1"
323
- },
324
- {
325
- "text": "5 Performing this step after preprocessing has the advantage that the tokenizer and named entity finder already have determined abbreviation signs, so that this sort of disambiguation is resolved. and simply be recognized on the basis of commas, initial elements (like complementizer, interrogative or relative item -see also fig. 4 , where SUBCONJ-CL and REL-CL are tags for subclauses) and verb fragments. The different types of subclauses are described very compactly as finite state expressions. Figure 6 shows a (simplified) BC-structure in feature matrix notation. Clause combination It is very often the case that base clauses are recursively embedded as in the following example:",
326
- "cite_spans": [],
327
- "ref_spans": [
328
- {
329
- "start": 326,
330
- "end": 332,
331
- "text": "fig. 4",
332
- "ref_id": null
333
- },
334
- {
335
- "start": 500,
336
- "end": 508,
337
- "text": "Figure 6",
338
- "ref_id": null
339
- }
340
- ],
341
- "eq_spans": [],
342
- "section": "Topological structure",
343
- "sec_num": "3.1"
344
- },
345
- {
346
- "text": "... well der Hund den Braten gefressen hatte, den die Frau, nachdem sie ihn zubereitet hatte, auf die Fensterbank gestellt hatte.",
347
- "cite_spans": [],
348
- "ref_spans": [],
349
- "eq_spans": [],
350
- "section": "Topological structure",
351
- "sec_num": "3.1"
352
- },
353
- {
354
- "text": "Because the dog ate the beef which was put on the window sill after it had been prepared by the woman.",
355
- "cite_spans": [],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "Topological structure",
359
- "sec_num": "3.1"
360
- },
361
- {
362
- "text": "Two sorts of recursion can be distinguished: 1) middle field (MF) recursion, where the embedded base clause is framed by the left and right verb parts of the embedding sentence, and 2) the rest field (RF) recursion, where the embedded clause follows the right verb part of the embedding sentence. In order to express and handle this sort of recursion using a finite state approach, both recursions are treated as iterations such that they destructively substitute recognized embedded base clauses with their type. Hence, the complexity of the recognized structure of the sentence is reduced successively. However, because subclauses of MF-recursion may have their own embedded RF-recursion the CLAUSE COMBINA-TION (CC) is used for bundling subsequent base clauses before they would be combined with subclauses identified by the outer MF-recursion. The BC and CC module are called until no more base clauses can be reduced. If the CC module would not be used, then the following incorrect segmentation could not be avoided: ",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Topological structure",
367
- "sec_num": "3.1"
368
- },
369
- {
370
- "text": "After the topological structure of a sentence has been identified, each substring is passed to the FRAG-MENT RECOGNIZER in order to determine the internal phrasal structure. Note that processing of a substring might still be partial in the sense that no complete structure need be found (e.g., if we cannot combine sequences of phrases to one larger unit). The FRAGMENT RECOGNIZER uses finite state grammars in order to extract nominal and prepositional phrases, where the named entities recognized by the preprocessor are integrated into appropriate places (unplausibte phrases are rejected by agreement checking; see (Neumann et al., 1997) for more details)). The phrasal recognizer currently only considers processing of simple, non-recursive structures (see fig. 3 ; here, *NP* and *PP* are used for denoting phrasal types). Note that because of the high degree of modularity of our shallow parsing architecture, it is very easy to exchange the currently domain-independent fragment recognizer with a domain-specific one, without effecting the domainindependent DC-PARSER.",
371
- "cite_spans": [
372
- {
373
- "start": 619,
374
- "end": 641,
375
- "text": "(Neumann et al., 1997)",
376
- "ref_id": "BIBREF5"
377
- }
378
- ],
379
- "ref_spans": [
380
- {
381
- "start": 762,
382
- "end": 768,
383
- "text": "fig. 3",
384
- "ref_id": null
385
- }
386
- ],
387
- "eq_spans": [],
388
- "section": "Phrase recognition",
389
- "sec_num": "3.2"
390
- },
391
- {
392
- "text": "The final output of the parser for a sentence is an underspecified dependence structure UDS. An UDS is a flat dependency-based structure of a sentence, where only upper bounds for attachment and scoping of modifiers are expressed. This is achieved by collecting all NPs and PPs of a clause into separate sets as long as they are not part of some subclauses. This means that although the exact attachment point of each individual PP is not known it is guaranteed that a PP can only be attached to phrases which are dominated by the main verb of the sentence (which is the root node of the clause's tree). However, the exact point of attachment is a matter of domain-specific knowledge and hence should be defined as part of the domain knowledge of an application.",
393
- "cite_spans": [],
394
- "ref_spans": [],
395
- "eq_spans": [],
396
- "section": "Phrase recognition",
397
- "sec_num": "3.2"
398
- },
399
- {
400
- "text": "Due to the limited space, we concentrate on the evaluation of the topological structure. An evaluation of the other components (based on a subset of 20.000 tokens of the mentioned corpus from the \"Wirtschaftswoche\", see below) yields: From the 93,89% of the tokens which were identified by the morphological component as valid word forms, 95,23% got a unique POS-assignment with an accuracy of 97,9%. An initial evaluation on the same subset yielded a precision of 95.77% and a recall of 85% (90.1% F-measure) for our current named entity finder. Evaluation of the compound analysis of nouns, i.e. how often a morphosyntactical correct segmentation was found yield: Based on the 20.000 tokens, 1427 compounds are found, where 1417 have the correct segmentation (0.9929% precision). On a smaller subset of 1000 tokens containing 102 compounds, 101 correct segmentations where found (0.9901% recall), which is a quite promising result. An evaluation of simple NPs yielded a recall of 0.7611% and precision of 0.9194%. The low recall was mainly because of unknown words.",
401
- "cite_spans": [],
402
- "ref_spans": [],
403
- "eq_spans": [],
404
- "section": "Evaluation",
405
- "sec_num": "4"
406
- },
407
- {
408
- "text": "During the 2nd and 5th of July 1999 a test corpus of 43 messages from different press releases (viz. DEUTSCHE PREESSEAGENTUR (dpa), ASSOCIATED PRESS (ap) and REUTERS) and different domains (equal distribution of politics, business, sensations) was collected. 6 The corpus contains 400 sentences 6This data collection and evaluation was carried out by (Braun, 1999) . with a total of 6306 words. Note that it also was created after the DC-PARSER and all grammars were finally implemented. Table 1 shows the result of the evaluations (the F-measure was computed with /3=1). We used the correctness criteria as defined in figure 7 .",
409
- "cite_spans": [
410
- {
411
- "start": 351,
412
- "end": 364,
413
- "text": "(Braun, 1999)",
414
- "ref_id": "BIBREF1"
415
- }
416
- ],
417
- "ref_spans": [
418
- {
419
- "start": 488,
420
- "end": 495,
421
- "text": "Table 1",
422
- "ref_id": null
423
- },
424
- {
425
- "start": 619,
426
- "end": 627,
427
- "text": "figure 7",
428
- "ref_id": null
429
- }
430
- ],
431
- "eq_spans": [],
432
- "section": "Evaluation",
433
- "sec_num": "4"
434
- },
435
- {
436
- "text": "The evaluation of each component was measured on the basis of the result of all previous components. For the BC and MC module we also measured the performance by manually correcting the errors of the previous components (denoted as \"isolated evaluation\"). In most cases the difference between the precision and recall values is quite small, meaning that the modules keep a good balance between coverage and correctness. Only in the case of the MC-module the difference is about 5%. However, the result for the isolated evaluation of the MC-module suggests that this is mainly due to errors caused by previous components.",
437
- "cite_spans": [],
438
- "ref_spans": [],
439
- "eq_spans": [],
440
- "section": "Evaluation",
441
- "sec_num": "4"
442
- },
443
- {
444
- "text": "A more detailed analysis showed that the majority of errors were caused by mistakes in the preprocessing phase. For example ten errors were caused by an ambiguity between different verb stems (only the first reading is chosen) and ten errors because of wrong POS-filtering. Seven errors were caused by unknown verb forms, and in eight cases the parser failed because it could not properly handle the ambiguities of some word forms being either a separated verb prefix or adverb.",
445
- "cite_spans": [],
446
- "ref_spans": [],
447
- "eq_spans": [],
448
- "section": "Evaluation",
449
- "sec_num": "4"
450
- },
451
- {
452
- "text": "The evaluation has been performed with the Lisp-based version of SMES (cf. (Neumann et al., 1997) ) by replacing the original bidirectional shallow buttom-up parsing module with the DC-PARSER. The average run-time per sentence (average length 26 words) is 0.57 sec. A C++-version is nearly finished missing only the re-implementation of the base and main clause recognition phases, cf. (Piskorski and Neumann, 2000) . The run-time behavior is already encouraging: processing of a German text document (a collection of business news articles from the \"Wirtschaftswoche\") of 197118 tokens (1.26 MB) needs 45 seconds on a PentiumII, 266 MHz, 128 RAM, which corresponds to 4380 tokens per second. Since this is an increase in speed-up by a factor > 20 compared to the Lisp-version, we expect to be able to process 75-100 sentences per second.",
453
- "cite_spans": [
454
- {
455
- "start": 75,
456
- "end": 97,
457
- "text": "(Neumann et al., 1997)",
458
- "ref_id": "BIBREF5"
459
- },
460
- {
461
- "start": 386,
462
- "end": 415,
463
- "text": "(Piskorski and Neumann, 2000)",
464
- "ref_id": "BIBREF7"
465
- }
466
- ],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Evaluation",
470
- "sec_num": "4"
471
- },
472
- {
473
- "text": "To our knowledge, there are only very few other systems described which process free German texts. The new shallow text processor is a direct successor of the one used in the SMES-system, an IE-core system for real world German text processing (Neumann et al., 1997) . Here, a bidirectional verb-driven bottom-up parser was used, where the problems described in this paper concerning parsing of longer sentences were encountered. Another similar divide- Table 1 : Results of the evaluation of the topological structure and-conquer approach using a chart-based parser for analysis of German text documents was presented by (Wauschkuhn, 1996) . Nevertheless, comparing its performance with our approach seems to be rather difficult since he only measures for an unannotated test corpus how often his parser finds at least one result (where he reports 85.7% \"coverage\" of a test corpus of 72.000 sentences) disregarding to measure the accuracy of the parser. In this sense, our parser achieved a \"coverage\" of 94.25% (computing faund/total), ahnost certainly because we use more advanced lexical and phrasal components, e.g., pos-filter, compound and named entity processing. (Peh and Ting, 1996) also describe a divideand-conquer approach based on statistical methods, where the segmentation of the sentence is done by identifying so called link words (solely punctuations, conjunctions and prepositions) and disambiguating their specific role in the sentence. On an annotated test corpus of 600 English sentences they report an accuracy of 85.1% based on the correct recognition of part-of-speech, comma and conjunction disambiguation, and exact noun phrase recognition.",
474
- "cite_spans": [
475
- {
476
- "start": 244,
477
- "end": 266,
478
- "text": "(Neumann et al., 1997)",
479
- "ref_id": "BIBREF5"
480
- },
481
- {
482
- "start": 622,
483
- "end": 640,
484
- "text": "(Wauschkuhn, 1996)",
485
- "ref_id": "BIBREF12"
486
- },
487
- {
488
- "start": 1173,
489
- "end": 1193,
490
- "text": "(Peh and Ting, 1996)",
491
- "ref_id": "BIBREF6"
492
- }
493
- ],
494
- "ref_spans": [
495
- {
496
- "start": 454,
497
- "end": 461,
498
- "text": "Table 1",
499
- "ref_id": null
500
- }
501
- ],
502
- "eq_spans": [],
503
- "section": "Related Work",
504
- "sec_num": "5"
505
- },
506
- {
507
- "text": "We have presented a divide-and-conquer strategy for shallow analysis of German texts which is supported by means of powerful morphological processing, efficient POS-filtering and named entity recognition. Especially for the divide-and-conquer parsing strategy we obtained an F-measure of 87.14% on unseen data. Our shallow parsing strategy has a high degree of modularity which allows the integration of the domain-independent sentence recognition part with arbitrary domain-dependent subcomponents (e.g., specific named entity finders and fragment recognizers).",
508
- "cite_spans": [],
509
- "ref_spans": [],
510
- "eq_spans": [],
511
- "section": "Conclusion and future work",
512
- "sec_num": "6"
513
- },
514
- {
515
- "text": "Considered from an application-oriented point of view, our main experience is that even if we are only interested in some parts of a text (e.g., only in those linguistic entities which verbalize certain aspects of a domain-concept) we have to unfold the structural relationship between all elements of a large enough area (a paragraph or more) up to a certain level of depth in which the relevant information is embedded. Beside continuing the improvement of the whole approach we also started investigations towards the integration of deep processing into the DC-PARSER. The core idea is to call a deep parser only to the separated field elements which contain sequences of simple NPs and PPs (already determined by the shallow parser). Thus seen the shallow parser is used as an efficient preprocessor for dividing a sentence into syntactically valid smaller units, where the deep parser's task would be to identify the exact constituent structure only on demand.",
516
- "cite_spans": [],
517
- "ref_spans": [],
518
- "eq_spans": [],
519
- "section": "Conclusion and future work",
520
- "sec_num": "6"
521
- }
522
- ],
523
- "back_matter": [
524
- {
525
- "text": "The research underlying this paper was supported by a research grant from the German Bundesministerium fiir Bildung, Wissenschaft, Forschung und Technologie (BMBF) to the DFKI project PARADIME, FKZ ITW 9704. Many thanks to Thierry Declerck and Milena Valkova for their support during the evaluation of the system.",
526
- "cite_spans": [],
527
- "ref_spans": [],
528
- "eq_spans": [],
529
- "section": "Acknowledgments",
530
- "sec_num": null
531
- }
532
- ],
533
- "bib_entries": {
534
- "BIBREF0": {
535
- "ref_id": "b0",
536
- "title": "Partial parsing via finite-state cascades",
537
- "authors": [
538
- {
539
- "first": "S",
540
- "middle": [],
541
- "last": "Abney",
542
- "suffix": ""
543
- }
544
- ],
545
- "year": 1996,
546
- "venue": "Proceedings of the ESSLLI 96 Robust Parsing Workshop",
547
- "volume": "",
548
- "issue": "",
549
- "pages": "",
550
- "other_ids": {},
551
- "num": null,
552
- "urls": [],
553
- "raw_text": "S. Abney. 1996. Partial parsing via finite-state cas- cades. Proceedings of the ESSLLI 96 Robust Pars- ing Workshop.",
554
- "links": null
555
- },
556
- "BIBREF1": {
557
- "ref_id": "b1",
558
- "title": "Flaches und robustes Parsen Deutscher Satzgeffige. Master's thesis",
559
- "authors": [
560
- {
561
- "first": "C",
562
- "middle": [],
563
- "last": "Braun",
564
- "suffix": ""
565
- }
566
- ],
567
- "year": 1999,
568
- "venue": "",
569
- "volume": "",
570
- "issue": "",
571
- "pages": "",
572
- "other_ids": {},
573
- "num": null,
574
- "urls": [],
575
- "raw_text": "C. Braun. 1999. Flaches und robustes Parsen Deutscher Satzgeffige. Master's thesis, University of the Saarland.",
576
- "links": null
577
- },
578
- "BIBREF2": {
579
- "ref_id": "b2",
580
- "title": "Facile: Classifying texts integrating pattern matching and information extraction",
581
- "authors": [
582
- {
583
- "first": "F",
584
- "middle": [],
585
- "last": "Ciravegna",
586
- "suffix": ""
587
- },
588
- {
589
- "first": "A",
590
- "middle": [],
591
- "last": "Lavelli",
592
- "suffix": ""
593
- },
594
- {
595
- "first": "N",
596
- "middle": [],
597
- "last": "Mana",
598
- "suffix": ""
599
- },
600
- {
601
- "first": "L",
602
- "middle": [],
603
- "last": "Gilardoni",
604
- "suffix": ""
605
- },
606
- {
607
- "first": "S",
608
- "middle": [],
609
- "last": "Mazza",
610
- "suffix": ""
611
- },
612
- {
613
- "first": "M",
614
- "middle": [],
615
- "last": "Ferraro",
616
- "suffix": ""
617
- },
618
- {
619
- "first": "J",
620
- "middle": [],
621
- "last": "Matiasek",
622
- "suffix": ""
623
- },
624
- {
625
- "first": "W",
626
- "middle": [],
627
- "last": "Black",
628
- "suffix": ""
629
- },
630
- {
631
- "first": "F",
632
- "middle": [],
633
- "last": "Rinaldi",
634
- "suffix": ""
635
- },
636
- {
637
- "first": "D",
638
- "middle": [],
639
- "last": "Mowatt",
640
- "suffix": ""
641
- }
642
- ],
643
- "year": 1999,
644
- "venue": "Proceedings of IJCAI-99",
645
- "volume": "",
646
- "issue": "",
647
- "pages": "",
648
- "other_ids": {},
649
- "num": null,
650
- "urls": [],
651
- "raw_text": "F. Ciravegna, A. Lavelli, N. Mana, L. Gilardoni, S. Mazza, M. Ferraro, J. Matiasek, W. Black, F. Rinaldi, and D. Mowatt. 1999. Facile: Clas- sifying texts integrating pattern matching and in- formation extraction. In Proceedings of IJCAI-99, Stockholm.",
652
- "links": null
653
- },
654
- "BIBREF3": {
655
- "ref_id": "b3",
656
- "title": "Deutsche Grammatik",
657
- "authors": [
658
- {
659
- "first": "Ulrich",
660
- "middle": [],
661
- "last": "Engel",
662
- "suffix": ""
663
- }
664
- ],
665
- "year": 1988,
666
- "venue": "",
667
- "volume": "",
668
- "issue": "",
669
- "pages": "",
670
- "other_ids": {},
671
- "num": null,
672
- "urls": [],
673
- "raw_text": "Ulrich Engel. 1988. Deutsche Grammatik. Julius Groos Verlag, Heidelberg, 2., improved edition.",
674
- "links": null
675
- },
676
- "BIBREF4": {
677
- "ref_id": "b4",
678
- "title": "Shallow parsing and text chunking: A view on um derspecification in syntax",
679
- "authors": [
680
- {
681
- "first": "S",
682
- "middle": [],
683
- "last": "Federici",
684
- "suffix": ""
685
- },
686
- {
687
- "first": "S",
688
- "middle": [],
689
- "last": "Monyemagni",
690
- "suffix": ""
691
- },
692
- {
693
- "first": "V",
694
- "middle": [],
695
- "last": "Pirrelli",
696
- "suffix": ""
697
- }
698
- ],
699
- "year": 1996,
700
- "venue": "Workshop on Robust Parsing, 8th ESSLLI",
701
- "volume": "",
702
- "issue": "",
703
- "pages": "35--44",
704
- "other_ids": {},
705
- "num": null,
706
- "urls": [],
707
- "raw_text": "S. Federici, S. Monyemagni, and V. Pirrelli. 1996. Shallow parsing and text chunking: A view on um derspecification in syntax. In Workshop on Robust Parsing, 8th ESSLLI, pages 35-44.",
708
- "links": null
709
- },
710
- "BIBREF5": {
711
- "ref_id": "b5",
712
- "title": "An information extraction core system for real world german text processing",
713
- "authors": [
714
- {
715
- "first": "G",
716
- "middle": [],
717
- "last": "Neumann",
718
- "suffix": ""
719
- },
720
- {
721
- "first": "R",
722
- "middle": [],
723
- "last": "Backofen",
724
- "suffix": ""
725
- },
726
- {
727
- "first": "J",
728
- "middle": [],
729
- "last": "Baur",
730
- "suffix": ""
731
- },
732
- {
733
- "first": "M",
734
- "middle": [],
735
- "last": "Becker",
736
- "suffix": ""
737
- },
738
- {
739
- "first": "C",
740
- "middle": [],
741
- "last": "Braun",
742
- "suffix": ""
743
- }
744
- ],
745
- "year": 1997,
746
- "venue": "5th International Conference of Applied Natural Language",
747
- "volume": "",
748
- "issue": "",
749
- "pages": "208--215",
750
- "other_ids": {},
751
- "num": null,
752
- "urls": [],
753
- "raw_text": "G. Neumann, R. Backofen, J. Baur, M. Becker, and C. Braun. 1997. An information extraction core system for real world german text processing. In 5th International Conference of Applied Natu- ral Language, pages 208-215, Washington, USA, March.",
754
- "links": null
755
- },
756
- "BIBREF6": {
757
- "ref_id": "b6",
758
- "title": "A divideand-conquer strategy for parsing",
759
- "authors": [
760
- {
761
- "first": "L",
762
- "middle": [],
763
- "last": "Peh",
764
- "suffix": ""
765
- },
766
- {
767
- "first": "Christopher",
768
- "middle": [
769
- "H"
770
- ],
771
- "last": "Ting",
772
- "suffix": ""
773
- }
774
- ],
775
- "year": 1996,
776
- "venue": "Proceedings of the ACL/SIGPARSE 5th International Workshop on Parsing Technologies",
777
- "volume": "",
778
- "issue": "",
779
- "pages": "57--66",
780
- "other_ids": {},
781
- "num": null,
782
- "urls": [],
783
- "raw_text": "L. Peh and Christopher H. Ting. 1996. A divide- and-conquer strategy for parsing. In Proceedings of the ACL/SIGPARSE 5th International Work- shop on Parsing Technologies, pages 57-66.",
784
- "links": null
785
- },
786
- "BIBREF7": {
787
- "ref_id": "b7",
788
- "title": "An intelligent text extraction and navigation system",
789
- "authors": [
790
- {
791
- "first": "J",
792
- "middle": [],
793
- "last": "Piskorski",
794
- "suffix": ""
795
- },
796
- {
797
- "first": "G",
798
- "middle": [],
799
- "last": "Neumann",
800
- "suffix": ""
801
- }
802
- ],
803
- "year": 2000,
804
- "venue": "6th International Conference on Computer-Assisted Information Retrieval (RIAO-2000)",
805
- "volume": "18",
806
- "issue": "",
807
- "pages": "",
808
- "other_ids": {},
809
- "num": null,
810
- "urls": [],
811
- "raw_text": "J. Piskorski and G. Neumann. 2000. An intelligent text extraction and navigation system. In 6th In- ternational Conference on Computer-Assisted In- formation Retrieval (RIAO-2000). Paris, April. 18 pages.",
812
- "links": null
813
- },
814
- "BIBREF8": {
815
- "ref_id": "b8",
816
- "title": "Deterministic partof-speech tagging with finite state transducers",
817
- "authors": [
818
- {
819
- "first": "E",
820
- "middle": [],
821
- "last": "Roche",
822
- "suffix": ""
823
- },
824
- {
825
- "first": "Y",
826
- "middle": [],
827
- "last": "Schabes",
828
- "suffix": ""
829
- }
830
- ],
831
- "year": 1995,
832
- "venue": "Computational Linguistics",
833
- "volume": "21",
834
- "issue": "2",
835
- "pages": "227--253",
836
- "other_ids": {},
837
- "num": null,
838
- "urls": [],
839
- "raw_text": "E. Roche and Y. Schabes. 1995. Deterministic part- of-speech tagging with finite state transducers. Computational Linguistics, 21(2):227-253.",
840
- "links": null
841
- },
842
- "BIBREF9": {
843
- "ref_id": "b9",
844
- "title": "Seventh Message Understanding Conference (MUC-7",
845
- "authors": [
846
- {
847
- "first": "",
848
- "middle": [],
849
- "last": "Saic",
850
- "suffix": ""
851
- }
852
- ],
853
- "year": 1998,
854
- "venue": "",
855
- "volume": "",
856
- "issue": "",
857
- "pages": "",
858
- "other_ids": {},
859
- "num": null,
860
- "urls": [],
861
- "raw_text": "SAIC, editor. 1998. Seventh Message Understanding Conference (MUC-7),",
862
- "links": null
863
- },
864
- "BIBREF10": {
865
- "ref_id": "b10",
866
- "title": "An information extraction system and a customization tool",
867
- "authors": [
868
- {
869
- "first": "S",
870
- "middle": [],
871
- "last": "Sekine",
872
- "suffix": ""
873
- },
874
- {
875
- "first": "C",
876
- "middle": [],
877
- "last": "Nobata",
878
- "suffix": ""
879
- }
880
- ],
881
- "year": 1998,
882
- "venue": "Proceedings of Hitachi workshop-98",
883
- "volume": "",
884
- "issue": "",
885
- "pages": "",
886
- "other_ids": {},
887
- "num": null,
888
- "urls": [],
889
- "raw_text": "S. Sekine and C. Nobata. 1998. An infor- mation extraction system and a customization tool. In Proceedings of Hitachi workshop-98, http://cs.nyu.edu/cs/projects/proteus/sekine/.",
890
- "links": null
891
- },
892
- "BIBREF11": {
893
- "ref_id": "b11",
894
- "title": "Sixth Message Understanding Conference (MUC-6)",
895
- "authors": [],
896
- "year": 1995,
897
- "venue": "",
898
- "volume": "",
899
- "issue": "",
900
- "pages": "",
901
- "other_ids": {},
902
- "num": null,
903
- "urls": [],
904
- "raw_text": "B. Sundheim, editor. 1995. Sixth Message Un- derstanding Conference (MUC-6), Washington. Distributed by Morgan Kaufmann Publishers, Inc.,San Mateo, California.",
905
- "links": null
906
- },
907
- "BIBREF12": {
908
- "ref_id": "b12",
909
- "title": "Ein Werkzeug zur partiellen syntaktischen Analyse deutscher Textkorpora",
910
- "authors": [
911
- {
912
- "first": "O",
913
- "middle": [],
914
- "last": "Wauschkuhn",
915
- "suffix": ""
916
- }
917
- ],
918
- "year": 1996,
919
- "venue": "Natural Language Processing and Speech Technology. Results of the Third KONVENS Conference",
920
- "volume": "",
921
- "issue": "",
922
- "pages": "356--368",
923
- "other_ids": {},
924
- "num": null,
925
- "urls": [],
926
- "raw_text": "O. Wauschkuhn. 1996. Ein Werkzeug zur par- tiellen syntaktischen Analyse deutscher Textko- rpora. In Dafydd Gibbon, editor, Natural Lan- guage Processing and Speech Technology. Results of the Third KONVENS Conference, pages 356- 368. Mouton de Gruyter, Berlin.",
927
- "links": null
928
- }
929
- },
930
- "ref_entries": {
931
- "FIGREF0": {
932
- "text": "An example of a topological structure.",
933
- "type_str": "figure",
934
- "num": null,
935
- "uris": null
936
- },
937
- "FIGREF1": {
938
- "text": "The result of the DC-PARSER for the sentence \"Weil die Siemens GmbH, die vom Export lebt, Verluste erlitten hat, musste sie Aktien verkaufen.\" (Because the Siemens GmbH which strongly depends on exports suffered from losses they had to sell some of the shares.) abbreviated where convenient. It shows the separation of a sentence into the front field (vF), the verb group (VERB), and the middle field (MF). The elements of different fields have been computed by means of fragment recognition which takes place after the (possibly recursive) topological structure has been computed. Note that the front field consists only of one but complex subclause which itself has an internal field structure.Well die Siemens GmbH, die vom Export lebt, Verluste erlitt, musste sie Aktien verkaufen. Well die Siemens GmbH, die ...[Verb-Fin]The different steps of the DC-PARSER for the sentence of figure 3.massive morphosyntactic ambiguity of verbs (for example, most plural verb forms can also be non-finite or imperative forms). This kind of ambiguity cannot be resolved without taking into account a wider context. Therefore these verb forms are assigned disjunctive types, similar to the underspecified chunk categories proposed by(Federici et al., 1996). These types, like for example Fin-Inf-PP or Fin-PP, reflect the different readings of the verbform and en-able following modules to use these verb fonns according to the wider context, thereby removing the ambiguity. In addition to a type each recognized verb form is assigned a set of features which represent various properties of the form like tense and mode information. (cf. figure 5).Base clauses (BC) are subclauses of type subjunctive and subordinate. Although they are embedded into a larger structure they can independently",
939
- "type_str": "figure",
940
- "num": null,
941
- "uris": null
942
- },
943
- "FIGREF2": {
944
- "text": "The structure of the verb fragment \"nicht gelobt haben kann\" -*not praised have could-been meaning could not have been praised",
945
- "type_str": "figure",
946
- "num": null,
947
- "uris": null
948
- },
949
- "FIGREF3": {
950
- "text": "... *[daft das Gliick [, das JochenKroehne ernpfunden haben sollte Rel-C1] [, als ihm jiingst sein Groflaktion/ir die Ubertragungsrechte bescherte Sub j-elf, nicht mehr so recht erwKrmt Sub j-C1]In the correct reading the second subclause \"... als ihm jiingst sein ...\" is embedded into the first one \"... das Jochen Kroehne ...\".Main clauses (MC) Finally the MC module builds the complete topological structure of the input sentence on the basis of the recognized (remaining) verb groups and base clauses, as well as on the word form information not yet consumed. The latter includes basically punctuations and coordinations. The following figure schematically describes the current coverage of the implemented MC-module (see figure 1 for an example structure): . [RVP] ... ::= LVP ...[RVP] ... ::= CSent ( , CSent)* Coord CSent ] ::= CSent (, SSent)* Coord SSent ::= CSent , CSent ::= CSent , SSent I CSent , CSent ::= SSent , SSent",
951
- "type_str": "figure",
952
- "num": null,
953
- "uris": null
954
- },
955
- "TABREF0": {
956
- "content": "<table><tr><td>This information couldn't be verified by the Border</td></tr><tr><td>Police, Kinkel spoke of horrible figures that he didn't</td></tr><tr><td>believe.</td></tr></table>",
957
- "type_str": "table",
958
- "html": null,
959
- "text": "Diese Angaben konnte der Bundesgrenzschutz aber nicht best~itigen], [ssent Kinkel sprach von Horrorzahlen, [relct denen er keinen Glauben schenke]]].\"",
960
- "num": null
961
- }
962
- }
963
- }
964
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1034.json DELETED
@@ -1,719 +0,0 @@
1
- {
2
- "paper_id": "A00-1034",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:37.558941Z"
6
- },
7
- "title": "A Hybrid Approach for Named Entity and Sub-Type Tagging*",
8
- "authors": [
9
- {
10
- "first": "Rohini",
11
- "middle": [],
12
- "last": "Srihari",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Cymfony Net, Inc",
17
- "location": {
18
- "addrLine": "5500 Main Street Williamsville",
19
- "postCode": "14260",
20
- "region": "NY"
21
- }
22
- },
23
- "email": "[email protected]"
24
- },
25
- {
26
- "first": "Cheng",
27
- "middle": [],
28
- "last": "Niu",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": "[email protected]"
32
- },
33
- {
34
- "first": "Wei",
35
- "middle": [],
36
- "last": "Li",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": ""
40
- }
41
- ],
42
- "year": "",
43
- "venue": null,
44
- "identifiers": {},
45
- "abstract": "This paper presents a hybrid approach for named entity (NE) tagging which combines Maximum Entropy Model (MaxEnt), Hidden Markov Model (HMM) and handcrafted grammatical rules. Each has innate strengths and weaknesses; the combination results in a very high precision tagger. MaxEnt includes external gazetteers in the system. Sub-category generation is also discussed.",
46
- "pdf_parse": {
47
- "paper_id": "A00-1034",
48
- "_pdf_hash": "",
49
- "abstract": [
50
- {
51
- "text": "This paper presents a hybrid approach for named entity (NE) tagging which combines Maximum Entropy Model (MaxEnt), Hidden Markov Model (HMM) and handcrafted grammatical rules. Each has innate strengths and weaknesses; the combination results in a very high precision tagger. MaxEnt includes external gazetteers in the system. Sub-category generation is also discussed.",
52
- "cite_spans": [],
53
- "ref_spans": [],
54
- "eq_spans": [],
55
- "section": "Abstract",
56
- "sec_num": null
57
- }
58
- ],
59
- "body_text": [
60
- {
61
- "text": "Named entity (NE) tagging is a task in which location names, person names, organization names, monetary amounts, time and percentage expressions are recognized and classified in unformatted text documents. This task provides important semantic information, and is a critical first step in any information extraction system.",
62
- "cite_spans": [],
63
- "ref_spans": [],
64
- "eq_spans": [],
65
- "section": "Introduction",
66
- "sec_num": null
67
- },
68
- {
69
- "text": "Intense research has been focused on improving NE tagging accuracy using several different techniques. These include rule-based systems [Krupka 1998 ], Hidden Markov Models (HMM) [Bikel et al. 1997] and Maximum Entropy Models (MaxEnt) [Borthwick 1998 ]. A system based on manual rules may provide the best performance; however these require painstaking intense skilled labor.. Furthermore, shifting domains involves significant effort and may result in performance degradation. The strength of HMM models lie in their capacity for modeling local contextual information. HMMs have been widely used in continuous speech recognition, part-of-speech tagging, OCR, etc., and are generally regarded as the most successful statistical modelling paradigm in these domains. MaxEnt is a powerful tool to be used in situations where several ambiguous information sources need to be combined. Since statistical techniques such as HMM are only as good as the data they are trained on, they are required to use back-off models to compensate for unreliable statistics. In contrast to empirical back-off models used in HMMs, MaxEnt provides a systematic method by which a statistical model consistent with all obtained knowledge can be trained. [Borthwick et al. 1998 ] discuss a technique for combining the output of several NE taggers in a black box fashion by using MaxEnt. They demonstrate the superior performance of this system; however, the system is computationally inefficient since many taggers need to be run.",
70
- "cite_spans": [
71
- {
72
- "start": 136,
73
- "end": 148,
74
- "text": "[Krupka 1998",
75
- "ref_id": "BIBREF0"
76
- },
77
- {
78
- "start": 179,
79
- "end": 198,
80
- "text": "[Bikel et al. 1997]",
81
- "ref_id": "BIBREF1"
82
- },
83
- {
84
- "start": 235,
85
- "end": 250,
86
- "text": "[Borthwick 1998",
87
- "ref_id": "BIBREF2"
88
- },
89
- {
90
- "start": 1229,
91
- "end": 1251,
92
- "text": "[Borthwick et al. 1998",
93
- "ref_id": "BIBREF2"
94
- }
95
- ],
96
- "ref_spans": [],
97
- "eq_spans": [],
98
- "section": "Introduction",
99
- "sec_num": null
100
- },
101
- {
102
- "text": "In this paper we propose a hybrid method for NE tagging which combines all the modelling techniques mentioned above. NE tagging is a complex task and high-performance systems are required in order to be practically usable. Furthermore, the task demonstrates characteristics that can be exploited by all three techniques. For example, time and monetary expressions are fairly predictable and hence processed most efficiently with handcrafted grammar rules. Name, location and organization entities are highly variable and thus lend themselves to statistical training algorithms such as HMMs. Finally, many conflicting pieces of information regarding the class of a tag are frequently present. This includes information from less than perfect gazetteers. For this, a MaxEnt approach works well in utilizing diverse sources of information in determining the final tag. The structure of our system is shown in The first module is a rule-based tagger containing pattern match rules, or templates, for time, date, percentage, and monetary expressions. These tags include the standard MUC tags [Chinchor 1998 ], as well as several other sub-categories defined by our organization. More details concerning the sub-categories are presented later. The pattern matcher is based on Finite State Transducer (FST) technology [Roches & Schabes 1997] that has been implemented in-house. The subsequent modules are focused on location, person and organization names. The second module assigns tentative person and location tags based on external person and location gazetteers. Rather than relying on simple lookup of the gazetteer which is very error prone, this module employs MaxEnt to build a statistical model that incorporates gazetteers with common contextual information.",
103
- "cite_spans": [
104
- {
105
- "start": 1087,
106
- "end": 1101,
107
- "text": "[Chinchor 1998",
108
- "ref_id": null
109
- },
110
- {
111
- "start": 1311,
112
- "end": 1334,
113
- "text": "[Roches & Schabes 1997]",
114
- "ref_id": null
115
- }
116
- ],
117
- "ref_spans": [],
118
- "eq_spans": [],
119
- "section": "Introduction",
120
- "sec_num": null
121
- },
122
- {
123
- "text": "The core module of the system is a bigram-based HMM [Bikel et a1.1997] . Rules designed to correct errors in NE segmentation are incorporated into a constrained HMM network. These rules serve as constraints on the HMM model and enable it to utilize information beyond bigrams and remove obvious errors due to the limitation of the training corpus. HMM generates the standard MUC tags, person, location and organization. Based on MaxEnt, the last module derives sub-categories such as city, airport, government, etc. from the basic tags.",
124
- "cite_spans": [
125
- {
126
- "start": 52,
127
- "end": 70,
128
- "text": "[Bikel et a1.1997]",
129
- "ref_id": null
130
- }
131
- ],
132
- "ref_spans": [],
133
- "eq_spans": [],
134
- "section": "Introduction",
135
- "sec_num": null
136
- },
137
- {
138
- "text": "Section 1 describes the FST rule module. Section 2 discusses combining gazetteer information using MaxEnt. The constrained HMM is described in Section 3. Section 4 discusses sub-type generation by MaxEnt. The experimental results and conclusion are presented finally.",
139
- "cite_spans": [],
140
- "ref_spans": [],
141
- "eq_spans": [],
142
- "section": "Introduction",
143
- "sec_num": null
144
- },
145
- {
146
- "text": "The most attractive feature of the FST (Finite State Transducer) formalism lies in its superior time and space efficiency [Mohri 1997 ] [Roche & Schabes 1997 . Applying a deterministic FST depends linearly only on the input size of the text. Our experiments also show that an FST rule system is extraordinarily robust. In addition, it has been verified by many research programs [Krupka & Hausman 1998 2000] , that FST is also a convenient tool for capturing linguistic phenomena, especially for idioms and semi-productive expressions like time NEs and numerical NEs.",
147
- "cite_spans": [
148
- {
149
- "start": 122,
150
- "end": 133,
151
- "text": "[Mohri 1997",
152
- "ref_id": null
153
- },
154
- {
155
- "start": 134,
156
- "end": 157,
157
- "text": "] [Roche & Schabes 1997",
158
- "ref_id": null
159
- },
160
- {
161
- "start": 379,
162
- "end": 401,
163
- "text": "[Krupka & Hausman 1998",
164
- "ref_id": "BIBREF0"
165
- },
166
- {
167
- "start": 402,
168
- "end": 407,
169
- "text": "2000]",
170
- "ref_id": null
171
- }
172
- ],
173
- "ref_spans": [],
174
- "eq_spans": [],
175
- "section": "FST-based Pattern Matching Rules for Textract NE",
176
- "sec_num": "1"
177
- },
178
- {
179
- "text": "The rules which we have currently implemented include a grammar for temporal expressions (time, date, duration, frequency, age, etc.), a grammar for numerical expressions (money, percentage, length, weight, etc.), and a grammar for other non-MUC NEs (e.g. contact information like address, email).",
180
- "cite_spans": [],
181
- "ref_spans": [],
182
- "eq_spans": [],
183
- "section": "FST-based Pattern Matching Rules for Textract NE",
184
- "sec_num": "1"
185
- },
186
- {
187
- "text": "The following sample pattern rules give an idea of what our NE grammars look like. These rules capture typical US addresses, like: 5500 Main St., Williamsville, NY14221; 12345 Xyz Avenue, Apt. 678, Los Angeles, CA98765-4321. The following notation is used: @ for macro; I for logical OR; + for one or more; (...) for optionality. (\",\") .... + zip = @0_9 @0_9 @09 @0_9 @0_9",
188
- "cite_spans": [
189
- {
190
- "start": 146,
191
- "end": 169,
192
- "text": "Williamsville, NY14221;",
193
- "ref_id": null
194
- },
195
- {
196
- "start": 170,
197
- "end": 224,
198
- "text": "12345 Xyz Avenue, Apt. 678, Los Angeles, CA98765-4321.",
199
- "ref_id": null
200
- }
201
- ],
202
- "ref_spans": [],
203
- "eq_spans": [],
204
- "section": "FST-based Pattern Matching Rules for Textract NE",
205
- "sec_num": "1"
206
- },
207
- {
208
- "text": "(\"-\" @0_9 @0_9 @0_9 @0_9) street = [APT C.\") I Apt (\".\") [ Apartment] @number local_addr = @ street_addr (@delimiter @apt_addr) address = @ local_addr @delimiter @city @delimiter @state @zip (@delimiter @us)",
209
- "cite_spans": [],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "FST-based Pattern Matching Rules for Textract NE",
213
- "sec_num": "1"
214
- },
215
- {
216
- "text": "Our work is similar to the research on FST local grammars at LADL/University Paris VII [Silberztein 1998 ] 1, but that research was not turned into a functional rule based NE system.",
217
- "cite_spans": [
218
- {
219
- "start": 87,
220
- "end": 104,
221
- "text": "[Silberztein 1998",
222
- "ref_id": null
223
- }
224
- ],
225
- "ref_spans": [],
226
- "eq_spans": [],
227
- "section": "FST-based Pattern Matching Rules for Textract NE",
228
- "sec_num": "1"
229
- },
230
- {
231
- "text": "The rules in our NE grammars cover expressions with very predictable patterns. They were designed to address the weaknesses of our statistical NE tagger. For example, the following missings (underlined) and mistagging originally made by our statistical NE tagger have all been correctly identified by our temporal NE grammar. We use two gazetteers in our system, one for person and one for location. The person gazetteer consists of 3,000 male names, 5,000 female names and 14,000 family names. The location gazetteer consists of 250,000 location names with their categories such as CITY, PROVINCE, COUNTRY, AIRPORT, etc. The containing and being-contained relationship among locations is also provided.",
232
- "cite_spans": [],
233
- "ref_spans": [],
234
- "eq_spans": [],
235
- "section": "FST-based Pattern Matching Rules for Textract NE",
236
- "sec_num": "1"
237
- },
238
- {
239
- "text": "The following is a sample line in the location gazetteer, which denotes \"Aberdeen\" as a city in \"California\", and \"California\" as a province of \"United States\".",
240
- "cite_spans": [],
241
- "ref_spans": [],
242
- "eq_spans": [],
243
- "section": "FST-based Pattern Matching Rules for Textract NE",
244
- "sec_num": "1"
245
- },
246
- {
247
- "text": "Although gazetteers obviously contain useful name entity information, a straightforward word match approach may even degrade the system performance since the information from gazetteers is too ambiguous. There are a lot of common words that exist in the gazetteers, such as 'T', \"A\", \"Friday\", \"June\", \"Friendship\", etc. Also, there is large overlap between person names and location names, such as \"Clinton\", \"Jordan\", etc.",
248
- "cite_spans": [],
249
- "ref_spans": [],
250
- "eq_spans": [],
251
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
252
- "sec_num": null
253
- },
254
- {
255
- "text": "Here we propose a machine learning approach to incorporate the gazetteer information with other common contextual information based on MaxEnt. Using MaxEnt, the system may learn under what situation the occurrence in gazetteers is a reliable evidence for a name entity.",
256
- "cite_spans": [],
257
- "ref_spans": [],
258
- "eq_spans": [],
259
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
260
- "sec_num": null
261
- },
262
- {
263
- "text": "We first define \"LFEATURE\" based on occurrence in the location gazetteer as follows: COEXIST(A,B) is true iff A and B are in the same US state, or in the same foreign country) OTHER",
264
- "cite_spans": [],
265
- "ref_spans": [
266
- {
267
- "start": 85,
268
- "end": 97,
269
- "text": "COEXIST(A,B)",
270
- "ref_id": null
271
- }
272
- ],
273
- "eq_spans": [],
274
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
275
- "sec_num": null
276
- },
277
- {
278
- "text": "There is precedence from the first LFEATURE to the last one. Each token in the input document is assigned a unique \"LFEATURE\". We also define \"NFEATURE\" based on occurrence in the name gazetteer as follows:",
279
- "cite_spans": [],
280
- "ref_spans": [],
281
- "eq_spans": [],
282
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
283
- "sec_num": null
284
- },
285
- {
286
- "text": "FAMILY MALE FEMALE FAMILYANDMALE name) FAMILYANDFEMALE name) OTHER",
287
- "cite_spans": [],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
291
- "sec_num": null
292
- },
293
- {
294
- "text": "(family name) (male name) (female name) (family and male (family and female With these two extra features, every token in the document is regarded as a three-component vector (word, LFEATURE, NFEATURE). We can build a statistical model to evaluate the conditional probability based on these contextual and gazetteer features. Here \"tag\" represents one of the three possible tags (Person, Location, Other), and history represents any possible contextual history. Generally, we have:",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
299
- "sec_num": null
300
- },
301
- {
302
- "text": "p (tag, history) tag (1)",
303
- "cite_spans": [],
304
- "ref_spans": [],
305
- "eq_spans": [],
306
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
307
- "sec_num": null
308
- },
309
- {
310
- "text": "A maximum entropy solution for probability has the form [Rosenfeld 1994 ",
311
- "cite_spans": [
312
- {
313
- "start": 56,
314
- "end": 71,
315
- "text": "[Rosenfeld 1994",
316
- "ref_id": "BIBREF3"
317
- }
318
- ],
319
- "ref_spans": [],
320
- "eq_spans": [],
321
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
322
- "sec_num": null
323
- },
324
- {
325
- "text": "where fi (history, tag) are binary-valued feature functions that are dependent on whether the feature is applicable to the current contextual history. Here is an example of our feature function:",
326
- "cite_spans": [],
327
- "ref_spans": [],
328
- "eq_spans": [],
329
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
330
- "sec_num": null
331
- },
332
- {
333
- "text": "EQUATION",
334
- "cite_spans": [],
335
- "ref_spans": [],
336
- "eq_spans": [
337
- {
338
- "start": 0,
339
- "end": 8,
340
- "text": "EQUATION",
341
- "ref_id": "EQREF",
342
- "raw_str": "f(history,tag)={~ ifcurrenttokenisaeountryname, andtagisloeatiOnotherwise",
343
- "eq_num": "(4)"
344
- }
345
- ],
346
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
347
- "sec_num": null
348
- },
349
- {
350
- "text": "In (2) and(3) a i are weights associated to feature functions.",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
355
- "sec_num": null
356
- },
357
- {
358
- "text": "The weight evaluation scheme is as follows: We first compute the average value of each feature function according to a specific training corpus. The obtained average observations are set as constraints, and the Improved Iterative Scaling (IIS) algorithm [Pietra et al. 1995] is employed to evaluate the weights. The resulting probability distribution (2) possesses the maximum entropy among all the probability distributions consistent with the constraints imposed by feature function average values.",
359
- "cite_spans": [
360
- {
361
- "start": 254,
362
- "end": 274,
363
- "text": "[Pietra et al. 1995]",
364
- "ref_id": null
365
- }
366
- ],
367
- "ref_spans": [],
368
- "eq_spans": [],
369
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
370
- "sec_num": null
371
- },
372
- {
373
- "text": "In the training stage, our gazetteer module contains two sub-modules: feature function induction and weight evaluation [Pietra et al. 1995] . The structure is shown in Figure 2 . where the symbol .... denotes any possible values which may be inserted into that field. Different fields will be filled different values.",
374
- "cite_spans": [
375
- {
376
- "start": 119,
377
- "end": 139,
378
- "text": "[Pietra et al. 1995]",
379
- "ref_id": null
380
- }
381
- ],
382
- "ref_spans": [
383
- {
384
- "start": 168,
385
- "end": 176,
386
- "text": "Figure 2",
387
- "ref_id": null
388
- }
389
- ],
390
- "eq_spans": [],
391
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
392
- "sec_num": null
393
- },
394
- {
395
- "text": "Then, using a training corpus containing 230,000 tokens, we set up a feature function candidate space based on the feature function templates. The \"Feature Function Induction Module\" can select next feature function that reduces the Kullback-Leibler divergence the most [Pietra et al. 1995] . To make the weight evaluation computation tractable at the feature function induction stage, when trying a new feature function, all previous computed weights are held constant, and we only fit one new constraint that is imposed by the candidate feature function. Once the next feature function is selected, we recalculate the weights by IIS to satisfy all the constraints, and thus obtain the next tentative probability. The feature function induction module will stop when the Log-likelihood gain is less than a pre-set threshold.",
396
- "cite_spans": [
397
- {
398
- "start": 270,
399
- "end": 290,
400
- "text": "[Pietra et al. 1995]",
401
- "ref_id": null
402
- }
403
- ],
404
- "ref_spans": [],
405
- "eq_spans": [],
406
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
407
- "sec_num": null
408
- },
409
- {
410
- "text": "The gazetteer module recognizes the person and location names in the document despite the fact that some of them may be embedded in an organization name. For example, \"New York Fire Department\" may be tagged as <LOCATION> New York </NE> Fire Department. In the input stream for HMM, each token being tagged as location is accordingly transformed into one of the built-in tokens \"CITY\", \"PROVINCE\", \"COUNTRY\". The HMM may group \"CITY Fire Department\" into an organization name. A similar technique is applied for person names.",
411
- "cite_spans": [],
412
- "ref_spans": [],
413
- "eq_spans": [],
414
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
415
- "sec_num": null
416
- },
417
- {
418
- "text": "Since the tagged tokens from the gazetteer module are regarded by later modules as either person or location names, we require that the current module generates results with the highest possible precision. For each tagged token we will compute the entropy of the answer. If the entropy is higher than a pre-set threshold, the system will not be certain enough about the answer, and the word will be untagged. The missed location or person names may be recognized by the following HMM module.",
419
- "cite_spans": [],
420
- "ref_spans": [],
421
- "eq_spans": [],
422
- "section": "Aberdeen (CITY) California (PROVINCE) United States (COUNTRY)",
423
- "sec_num": null
424
- },
425
- {
426
- "text": "Our original HMM is similar to the Nymble [Bikel et al. 1997 ] system that is based on bigram statistics. To correct some of the leading errors, we incorporate manual segmentation rules with HMM.",
427
- "cite_spans": [
428
- {
429
- "start": 42,
430
- "end": 60,
431
- "text": "[Bikel et al. 1997",
432
- "ref_id": "BIBREF1"
433
- }
434
- ],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "Improving NE Segmentation through constrained HMM",
438
- "sec_num": "3"
439
- },
440
- {
441
- "text": "These syntactic rules may provide information beyond bigram and balance the limitation of the training corpus.",
442
- "cite_spans": [],
443
- "ref_spans": [],
444
- "eq_spans": [],
445
- "section": "Improving NE Segmentation through constrained HMM",
446
- "sec_num": "3"
447
- },
448
- {
449
- "text": "Our manual rules focus on improving the NE segmentation.",
450
- "cite_spans": [],
451
- "ref_spans": [],
452
- "eq_spans": [],
453
- "section": "Improving NE Segmentation through constrained HMM",
454
- "sec_num": "3"
455
- },
456
- {
457
- "text": "For example, in the token sequence \"College of William and Mary\", we have rules based on global sequence checking to determine if the words \"and\" or \"of\" are common words or parts of organization name.",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "Improving NE Segmentation through constrained HMM",
462
- "sec_num": "3"
463
- },
464
- {
465
- "text": "The output of the rules are some constraints on the HMM transition network, such as \"same tags for tokens A, B\", or \"common word for token A\". The Viterbi algorithm will select the optimized path that is consistent with such constraints.",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Improving NE Segmentation through constrained HMM",
470
- "sec_num": "3"
471
- },
472
- {
473
- "text": "The manual rules are divided into three categories: (i) preposition disambiguation, (ii) spurious capitalized word disambiguation, and (iii) spurious NE sequence disambiguation.",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "Improving NE Segmentation through constrained HMM",
478
- "sec_num": "3"
479
- },
480
- {
481
- "text": "The rules of preposition disambiguation are responsible for determination of boundaries involving prepositions (\"of\", \"and\", \"'s\", etc.). For example, for the sequence \"A of B\", we have the following rule: A and B have same tags if the lowercase of A and B both occur in OXFD dictionary. A \"global word sequence checking\" [Mikheev, 1999] is also employed. For the sequence \"Sprint and MCI\", we search the document globally. If the word \"Sprint\" or \"MCI\" occurs individually somewhere else, we mark \"and\" as a common word.",
482
- "cite_spans": [
483
- {
484
- "start": 322,
485
- "end": 337,
486
- "text": "[Mikheev, 1999]",
487
- "ref_id": null
488
- }
489
- ],
490
- "ref_spans": [],
491
- "eq_spans": [],
492
- "section": "Improving NE Segmentation through constrained HMM",
493
- "sec_num": "3"
494
- },
495
- {
496
- "text": "The rules of spurious capitalized word disambiguation are designed to recognize the first word in the sentence. If the first word is unknown in the training corpus, but occurs in OXFD as a common word in lowercase, HHM's unknown word model may be not accurate enough. The rules in the following paragraph are designed to treat such a situation.",
497
- "cite_spans": [],
498
- "ref_spans": [],
499
- "eq_spans": [],
500
- "section": "Improving NE Segmentation through constrained HMM",
501
- "sec_num": "3"
502
- },
503
- {
504
- "text": "If the second word of the same sentence is in lowercase, the first word is tagged as a common word since it never occurs as an isolated NE token in the training corpus unless it has been recognized as a NE elsewhere in the document. If the second word is capitalized, we will check globally if the same sequence occurs somewhere else. If so, the HMM is constrained to assign the same tag to the two tokens. Otherwise, the capitalized token is tagged as a common word.",
505
- "cite_spans": [],
506
- "ref_spans": [],
507
- "eq_spans": [],
508
- "section": "Improving NE Segmentation through constrained HMM",
509
- "sec_num": "3"
510
- },
511
- {
512
- "text": "The rules of spurious NE sequence disambiguation are responsible for finding spurious NE output from HMM, adding constraints, and re-computing NE by HMM. For example, in a sequence \"Person Organization\", we will require the same output tag for these two tokens and run HMM again.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "Improving NE Segmentation through constrained HMM",
517
- "sec_num": "3"
518
- },
519
- {
520
- "text": "The output document from constrained HMM contains MUC-standard NE.tags such as person, location and organization. However, for a real information extraction system, the MUC-standard NE tag may not be enough and further detailed NE information might be necessary. We have predefined the following sub-types for person, location and organization: If a NE is not covered by any of the above sub-categories, it should remain a MUC-standard tag. Obviously, the sub-categorization requires much more information beyond bigram than MUC-standard tagging. For example, it is hard to recognize CNN as a Mass Media company by bigram if the token \"CNN\" never occurs in the training corpus. External gazetteer information is critical for some sub-category recognition, and trigger word models may also play an important role.",
521
- "cite_spans": [],
522
- "ref_spans": [],
523
- "eq_spans": [],
524
- "section": "NE Sub-Type Tagging using Maximum Entropy Model",
525
- "sec_num": "4"
526
- },
527
- {
528
- "text": "With such considerations, we use the Maximum entropy model for sub-categorization, since MaxEnt is powerful enough to incorporate into the system gazetteer or other information sources which might become available at some later time.",
529
- "cite_spans": [],
530
- "ref_spans": [],
531
- "eq_spans": [],
532
- "section": "NE Sub-Type Tagging using Maximum Entropy Model",
533
- "sec_num": "4"
534
- },
535
- {
536
- "text": "Similar to the gazetteer module in Section 2, the sub-categorization module in the training stage contains two sub-modules, (i) feature function induction and (ii) weight evaluation. We have the following seven feature function templates: We have trained 1,000 feature functions by the feature function induction module according to the above templates.",
537
- "cite_spans": [],
538
- "ref_spans": [],
539
- "eq_spans": [],
540
- "section": "NE Sub-Type Tagging using Maximum Entropy Model",
541
- "sec_num": "4"
542
- },
543
- {
544
- "text": "Because much more external gazetteer information is necessary for the sub-categorization and there is an overlap between male and female name gazetteers, the result from the current MaxEnt module is not sufficiently accurate. Therefore, a conservative strategy has been applied. If the entropy of the output answer is higher than a threshold, we will back-off to the MUC-standard tags. Unlike MUC NE categories, local contextual information is not sufficient for sub-categorization. In the future more external gazetteers focusing on recognition of government, company, army, etc. will be incorporated into our system.",
545
- "cite_spans": [],
546
- "ref_spans": [],
547
- "eq_spans": [],
548
- "section": "NE Sub-Type Tagging using Maximum Entropy Model",
549
- "sec_num": "4"
550
- },
551
- {
552
- "text": "And we are considering using trigger words [Rosenfeld, 1994] to recognize some sub-categories. For example, \"psalms\" may be a trigger word for \"religious person\", and \"Navy\" may be a trigger word for \"military person\".",
553
- "cite_spans": [
554
- {
555
- "start": 43,
556
- "end": 60,
557
- "text": "[Rosenfeld, 1994]",
558
- "ref_id": "BIBREF3"
559
- }
560
- ],
561
- "ref_spans": [],
562
- "eq_spans": [],
563
- "section": "NE Sub-Type Tagging using Maximum Entropy Model",
564
- "sec_num": "4"
565
- },
566
- {
567
- "text": "We have tested our system on MUC-7 dry run data; this data consists of 22,000 words and represents articles from The New York Times. Since a key was provided with the data, it is possible to properly evaluate the performance of our NE tagger. The scoring program computes both the precision and recall, and combines these two measures into f-measure as the weighted harmonic mean [Chinchor, 1998] If the gazetteer module is removed from our system, and the constrained HMM is restored to the standard HMM, the f-measures for person, location, and organization are as follows: Obviously, our gazetteer model and constrained HMM have greatly increased the system accuracy on the recognition of persons, locations, and organizations. Currently, there are some errors in our gazetteers. Some common words such as \"Changes\", \"USER\", \"Administrator\", etc. are mistakenly included in the person name gazetteer. Also, too many person names are included into the location gazetteer. By cleaning up the gazetteers, we can continue improving the precision on person name and locations.",
568
- "cite_spans": [
569
- {
570
- "start": 380,
571
- "end": 396,
572
- "text": "[Chinchor, 1998]",
573
- "ref_id": null
574
- }
575
- ],
576
- "ref_spans": [],
577
- "eq_spans": [],
578
- "section": "Experiment and Conclusion",
579
- "sec_num": null
580
- },
581
- {
582
- "text": "We also ran our NE tagger on the formal test files of MUC-7. The following are the results: ",
583
- "cite_spans": [],
584
- "ref_spans": [],
585
- "eq_spans": [],
586
- "section": "Recall Precision",
587
- "sec_num": null
588
- }
589
- ],
590
- "back_matter": [
591
- {
592
- "text": "There is some performance degradation in the formal test. This decrease is because that the formal test is focused on satellite and rocket domains in which our system has not been trained. There are some person/location names used as spacecraft or robot names (ex. Mir, Alvin, Columbia...), and there are many high-tech company names which do not occur in our HMM training corpus. Since the finding of organization names totally relies on the HMM model, it suffers most from domain shift (10% degradation). This difference implies that gazetteer information may be useful in overcoming the domain dependency. This paper has demonstrated improved performance in an NE tagger by combining symbolic and statistical approaches. MaxEnt has been demonstrated to be a viable technique for integrating diverse sources of information and has been used in NE sub-categorization. ",
593
- "cite_spans": [],
594
- "ref_spans": [],
595
- "eq_spans": [],
596
- "section": "annex",
597
- "sec_num": null
598
- }
599
- ],
600
- "bib_entries": {
601
- "BIBREF0": {
602
- "ref_id": "b0",
603
- "title": "IsoQuest Inc: Description of the NetOwl \"Fext Extraction System as used for MUC-7",
604
- "authors": [
605
- {
606
- "first": "G",
607
- "middle": [],
608
- "last": "Krupka",
609
- "suffix": ""
610
- },
611
- {
612
- "first": "K",
613
- "middle": [],
614
- "last": "Hausman",
615
- "suffix": ""
616
- }
617
- ],
618
- "year": 1998,
619
- "venue": "Proceedings of Seventh Machine Understanding Conference",
620
- "volume": "",
621
- "issue": "",
622
- "pages": "",
623
- "other_ids": {},
624
- "num": null,
625
- "urls": [],
626
- "raw_text": "G. R Krupka and K. Hausman, \"IsoQuest Inc: Description of the NetOwl \"Fext Extraction System as used for MUC-7\" in Proceedings of Seventh Machine Understanding Conference (MUC-7) (1998)",
627
- "links": null
628
- },
629
- "BIBREF1": {
630
- "ref_id": "b1",
631
- "title": "Nymble: a high-performance learning name-finder",
632
- "authors": [
633
- {
634
- "first": "D",
635
- "middle": [
636
- "M"
637
- ],
638
- "last": "Bikel",
639
- "suffix": ""
640
- }
641
- ],
642
- "year": 1997,
643
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
644
- "volume": "",
645
- "issue": "",
646
- "pages": "194--201",
647
- "other_ids": {},
648
- "num": null,
649
- "urls": [],
650
- "raw_text": "D. M. Bikel, \"Nymble: a high-performance learning name-finder\" in Proceedings of the Fifth Conference on Applied Natural Language Processing, 1997, pp. 194-201, Morgan Kaufmann Publishers.",
651
- "links": null
652
- },
653
- "BIBREF2": {
654
- "ref_id": "b2",
655
- "title": "Description of the MENE named Entity System",
656
- "authors": [
657
- {
658
- "first": "A",
659
- "middle": [],
660
- "last": "Borthwick",
661
- "suffix": ""
662
- }
663
- ],
664
- "year": 1998,
665
- "venue": "Proceedings of the Seventh Machine Understanding Conference",
666
- "volume": "",
667
- "issue": "",
668
- "pages": "",
669
- "other_ids": {},
670
- "num": null,
671
- "urls": [],
672
- "raw_text": "A. Borthwick, et al., Description of the MENE named Entity System, In Proceedings of the Seventh Machine Understanding Conference (MUC-7) (1998)",
673
- "links": null
674
- },
675
- "BIBREF3": {
676
- "ref_id": "b3",
677
- "title": "Adaptive Statistical language Modeling, PHD thesis",
678
- "authors": [
679
- {
680
- "first": "R",
681
- "middle": [],
682
- "last": "Rosenfeld",
683
- "suffix": ""
684
- }
685
- ],
686
- "year": 1994,
687
- "venue": "",
688
- "volume": "",
689
- "issue": "",
690
- "pages": "",
691
- "other_ids": {},
692
- "num": null,
693
- "urls": [],
694
- "raw_text": "R. Rosenfeld, Adaptive Statistical language Modeling, PHD thesis, Carnegie Mellon University, (1994)",
695
- "links": null
696
- }
697
- },
698
- "ref_entries": {
699
- "FIGREF1": {
700
- "type_str": "figure",
701
- "num": null,
702
- "uris": null,
703
- "text": "[[St l ST I Rd I RD I Dr I DRI Ave[AVE ] C.\")] I Street[ Road[Drive[Avenue city = @word (@word) state = @uppercase (\".\") @uppercase (\".\") us--USA IU.S.AIUSIU.S.I (The) United States (of America) street_addr = @number @word @street apt_addr ="
704
- },
705
- "FIGREF2": {
706
- "type_str": "figure",
707
- "num": null,
708
- "uris": null,
709
- "text": "Rule ~|ect|on Module [ ~elect next rule reduce the entropy most \"-~ Evaluate weiEht for each Selected rule IteraUve $\u00a2atinB (US) t Fig.2, Structure of MaxEnt learning Process We predefine twenty-four feature function templates. The following are some examples and others have similar structures: 10 if LFEATURE = , and tag = _ f (history, tag) = else f(history, tag)={lo f(history, tag)={~ f(history,tag)={lo f(history, tag)={;"
710
- },
711
- "FIGREF3": {
712
- "type_str": "figure",
713
- "num": null,
714
- "uris": null,
715
- "text": "10 if MUC_tag = _, and tag = _ f (history, tag) = else { 10 if MUC_tag = _, LFEATURE = _, and tag = _ f (history, tag) = else 1 if contain word(__), MUC tag(history) = _,and tag = f (history, tag ) = _, MUC_tag = _,and tag = _ f (history, tag)= else f(history, tag)= {10 if following_Word= _,MUC_tag = _,andelse tag=_ f(history, tag)={lo ifMUC_tag= ,contain_male_name, and tag"
716
- }
717
- }
718
- }
719
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1035.json DELETED
@@ -1,1010 +0,0 @@
1
- {
2
- "paper_id": "A00-1035",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:07.309709Z"
6
- },
7
- "title": "Spelling and Grammar Correction for Danish in SCARRIE",
8
- "authors": [
9
- {
10
- "first": "Patrizia",
11
- "middle": [],
12
- "last": "Paggio",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "patrizia@[email protected]"
16
- }
17
- ],
18
- "year": "",
19
- "venue": null,
20
- "identifiers": {},
21
- "abstract": "This paper reports on work carried out to develop a spelling and grammar corrector for Danish, addressing in particular the issue of how a form of shallow parsing is combined with error detection and correction for the treatment of context-dependent spelling errors. The syntactic grammar for Danish used by the system has been developed with the aim of dealing with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with contextdependent errors.",
22
- "pdf_parse": {
23
- "paper_id": "A00-1035",
24
- "_pdf_hash": "",
25
- "abstract": [
26
- {
27
- "text": "This paper reports on work carried out to develop a spelling and grammar corrector for Danish, addressing in particular the issue of how a form of shallow parsing is combined with error detection and correction for the treatment of context-dependent spelling errors. The syntactic grammar for Danish used by the system has been developed with the aim of dealing with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with contextdependent errors.",
28
- "cite_spans": [],
29
- "ref_spans": [],
30
- "eq_spans": [],
31
- "section": "Abstract",
32
- "sec_num": null
33
- }
34
- ],
35
- "body_text": [
36
- {
37
- "text": "In her much-quoted and still relevant review of technologies for automatic word correction (Kukich, 1992) , Kukich observes that \"research in context-dependent spelling correction is in its infancy\" (p. 429), and that the task of treating context-dependent errors is still an elusive one due to the complexity of the linguistic knowledge often necessary to analyse the context in sufficient depth to find and correct such errors. But progress in parsing technology and the growing speed of computers seem to have made the task less of a chimera. The '90s have in fact seen a renewed interest in grammar checking, and proposals have been made for systems covering English (Bernth, 1997) and other languages such as Italian (Bolioli et al., 1992) , Spanish and Greek (Bustamante and Ldon, 1996) , Czech (Holan et al., 1997) and Swedish (Hein, 1998) .",
38
- "cite_spans": [
39
- {
40
- "start": 91,
41
- "end": 105,
42
- "text": "(Kukich, 1992)",
43
- "ref_id": "BIBREF6"
44
- },
45
- {
46
- "start": 671,
47
- "end": 685,
48
- "text": "(Bernth, 1997)",
49
- "ref_id": "BIBREF0"
50
- },
51
- {
52
- "start": 722,
53
- "end": 744,
54
- "text": "(Bolioli et al., 1992)",
55
- "ref_id": "BIBREF1"
56
- },
57
- {
58
- "start": 747,
59
- "end": 792,
60
- "text": "Spanish and Greek (Bustamante and Ldon, 1996)",
61
- "ref_id": null
62
- },
63
- {
64
- "start": 801,
65
- "end": 821,
66
- "text": "(Holan et al., 1997)",
67
- "ref_id": "BIBREF5"
68
- },
69
- {
70
- "start": 826,
71
- "end": 846,
72
- "text": "Swedish (Hein, 1998)",
73
- "ref_id": null
74
- }
75
- ],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "Introduction",
79
- "sec_num": "1"
80
- },
81
- {
82
- "text": "This paper describes the prototype of a spelling and grammar corrector for Danish which combines traditional spelling checking functionalities with the ability to carry out compound analysis and to detect and correct certain types of context-dependent spelling errors (hereafter simply \"grammar errors\"). Grammar correction is carried out by parsing the text, making use of feature overriding and error weights to accommodate the errors. Although a full parse of each sentence is attempted, the grammar has been developed with the aim of dealing only with the most frequent error types found in a parallel corpus of unedited and proofread texts specifically collected by the project's end users. By focussing on certain grammatical constructions and certain error types, it has been possible to exploit the linguistic 'intelligence' provided by syntactic parsing and yet keep the system robust and efficient. The system described is thus superior to other existing spelling checkers for Danish in its ability to deal with certain types of grammar errors.",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "Introduction",
87
- "sec_num": "1"
88
- },
89
- {
90
- "text": "We begin by giving an overview of the system's components in Section 2. In Section 3 we describe the error types we want to deal with: Section 4 gives an overview of the grammar: in particular, the methods adopted for treating feature mismatches and structural errors are explained. Finally, in Section 5 evaluation results are presented and a conclusion is drawn.",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "Introduction",
95
- "sec_num": "1"
96
- },
97
- {
98
- "text": "The prototype is a system for high-quality proofreading for Danish which has been developed in the context of a collaborative EUproject 1. Together with the Danish prototype, 1Main contractors in the consortium were: WordFinder Software AB (Sweden), Center for the project has also produced similar systems for Swedish and Norwegian, all of them tailored to meet the specific needs of the Scandinavian publishing industry. They all provide writing support in the form of word and grammar checking.",
99
- "cite_spans": [],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "The prototype",
103
- "sec_num": "2"
104
- },
105
- {
106
- "text": "The Danish version of the system 2 constitutes a further development of the CORRie prototype (Vosse, 1992) (Vosse, 1994) , adapted to deal with the Danish language, and to the needs of the project's end users. The system processes text in batch mode and produces an annotated output text where errors are flagged and replacements suggested where possible. Text correction is performed in two steps: first the system deals with spelling errors and typos resulting in invalid words, and then with grammar errors.",
107
- "cite_spans": [
108
- {
109
- "start": 93,
110
- "end": 106,
111
- "text": "(Vosse, 1992)",
112
- "ref_id": null
113
- },
114
- {
115
- "start": 107,
116
- "end": 120,
117
- "text": "(Vosse, 1994)",
118
- "ref_id": "BIBREF16"
119
- }
120
- ],
121
- "ref_spans": [],
122
- "eq_spans": [],
123
- "section": "The prototype",
124
- "sec_num": "2"
125
- },
126
- {
127
- "text": "Invalid words are identified on the basis of dictionary lookup. The dictionary presently consists of 251,000 domain-relevant word forms extracted from a collection of 68,000 newspaper articles. A separate idiom list allowing for the identification of multi-word expressions is also available. Among the words not found in the dictionary or the idiom list, those occurring most frequently in the text (where frequency is assessed relative to the length of the text) are taken to be new words or proper names 3. The remaining unknown words are passed on to the compound analysis grammar, which is a set of regular expressions covering the most common types of compound nominals in Danish. This is an important feature, as in Danish compounding is very productive, and compounds are written as single words.",
128
- "cite_spans": [],
129
- "ref_spans": [],
130
- "eq_spans": [],
131
- "section": "The prototype",
132
- "sec_num": "2"
133
- },
134
- {
135
- "text": "Words still unknown at this point are taken to be spelling errors. The System flags them as Sprogteknologi (Denmark), Department of Linguistics at Uppsala University (Sweden), Institutt for lingvistikk og litteraturvitenskab at the University of Bergen (Norway), and Svenska Dagbladet (Sweden). A number of subcontractors also contributed to the project. Subcontractors in Denmark were: Munksgaard International Publishers, Berlingske Tidende, Det Danske Sprog-og Litteraturselskab, and Institut for Almen og Anvendt Sprogvidenskab at the University of Copenhagen.",
136
- "cite_spans": [],
137
- "ref_spans": [],
138
- "eq_spans": [],
139
- "section": "The prototype",
140
- "sec_num": "2"
141
- },
142
- {
143
- "text": "2In addition to the author of the present paper, tlle Danish SCARRIE team at CST consisted of Claus Povlsen, Bart Kongejan and Bradley Music.",
144
- "cite_spans": [],
145
- "ref_spans": [],
146
- "eq_spans": [],
147
- "section": "The prototype",
148
- "sec_num": "2"
149
- },
150
- {
151
- "text": "3The system also checks whether a closely matching alternative can be found in the dictionary, to avoid mistaking a consistently misspelt word for a new word. such and tries to suggest a replacement. The algorithm used is based on trigram and triphone analysis (van Berkel and Smedt, 1988) , and takes into account the orthographic strings corresponding to the invalid word under consideration and its possible replacement, as well as the phonetic representations of the same two words. Phonetic representations are generated by a set of grapheme-to-phoneme rules (Hansen, 1999) the aim of which is to assign phonetically motivated misspellings and their correct counterparts identical or similar phonetic representations.",
152
- "cite_spans": [
153
- {
154
- "start": 261,
155
- "end": 289,
156
- "text": "(van Berkel and Smedt, 1988)",
157
- "ref_id": "BIBREF13"
158
- },
159
- {
160
- "start": 564,
161
- "end": 578,
162
- "text": "(Hansen, 1999)",
163
- "ref_id": "BIBREF3"
164
- }
165
- ],
166
- "ref_spans": [],
167
- "eq_spans": [],
168
- "section": "The prototype",
169
- "sec_num": "2"
170
- },
171
- {
172
- "text": "Then the system tries to identify contextdependent spelling errors. This is done by parsing the text. Parsing results are passed on to a corrector to find replacements for the errors found. The parser is an implementation of the Tomita algorithm with a component for error recognition whose job is to keep track of error weights and feature mismatches as described in (Vosse, 1991) . Each input sentence is assigned the analysis with the lowest error weight. If the error is due to a feature mismatch, the offending feature is overridden, and if a dictionary entry satisfying the grammar constraints expressed by the context is found in the dictionary, it is offered as a replacement. If the structure is incomplete, on the other hand, an error message is generated. Finally, if the system identifies an error as a split-up or a run-on, it will suggest either a possible concatenation, or a sequence of valid words into which the misspelt word can be split up. Figure 1: Error distribution in the Danish corpus grammar development was then to enable the system to identify and analyse the grammatical constructions in which errors typically occur, whilst to some extent disregarding the remainder of the text.",
173
- "cite_spans": [
174
- {
175
- "start": 368,
176
- "end": 381,
177
- "text": "(Vosse, 1991)",
178
- "ref_id": null
179
- }
180
- ],
181
- "ref_spans": [],
182
- "eq_spans": [],
183
- "section": "The prototype",
184
- "sec_num": "2"
185
- },
186
- {
187
- "text": "The errors occurring in the corlbus have been analysed according to the taxonomy in (Rambell, 1997) . Figure 1 shows the distribution of the various error types into the five top-level categories of the taxonomy. As can be seen, grammar errors account for 30~0 of the errors. Of these, 70% fall into one of the following categories (Povlsen, 1998) Another way of grouping the errors is by the kind of parsing failure they generate: they can then be viewed as either feature mismatches, or as structural errors. Agreement errors are typical examples of feature mismatches. In the following nominal phrase, for example:",
188
- "cite_spans": [
189
- {
190
- "start": 84,
191
- "end": 99,
192
- "text": "(Rambell, 1997)",
193
- "ref_id": "BIBREF12"
194
- },
195
- {
196
- "start": 332,
197
- "end": 347,
198
- "text": "(Povlsen, 1998)",
199
- "ref_id": "BIBREF11"
200
- }
201
- ],
202
- "ref_spans": [
203
- {
204
- "start": 102,
205
- "end": 110,
206
- "text": "Figure 1",
207
- "ref_id": null
208
- }
209
- ],
210
- "eq_spans": [],
211
- "section": "The prototype",
212
- "sec_num": "2"
213
- },
214
- {
215
- "text": "(1) de *interessant projekter (the interesting projects) _the error can be formalised as a mismatch between the definiteness of the determiner de (the) and the indefiniteness of the adjective interessant (interesting). Adjectives have in fact both an indefinite and a definite form in Danish.",
216
- "cite_spans": [],
217
- "ref_spans": [],
218
- "eq_spans": [],
219
- "section": "The prototype",
220
- "sec_num": "2"
221
- },
222
- {
223
- "text": "The sentence below, on the other hand, is an example of structural error.",
224
- "cite_spans": [],
225
- "ref_spans": [],
226
- "eq_spans": [],
227
- "section": "The prototype",
228
- "sec_num": "2"
229
- },
230
- {
231
- "text": "(2) i sin tid *skabet han skulpturer over atomkraften (during his time wardrobe/created he sculptures about nuclear power)",
232
- "cite_spans": [],
233
- "ref_spans": [],
234
- "eq_spans": [],
235
- "section": "The prototype",
236
- "sec_num": "2"
237
- },
238
- {
239
- "text": "Since the finite verb skabte (created) has been misspelt as skabet (the wardrobe), the syntactic structure corresponding to the sentence is missing a verbal head.",
240
- "cite_spans": [],
241
- "ref_spans": [],
242
- "eq_spans": [],
243
- "section": "The prototype",
244
- "sec_num": "2"
245
- },
246
- {
247
- "text": "Run-ons and split-ups are structural errors of a particular kind, having to do with leaves in the syntactic tree. In some cases they can only be detected on the basis of the context, because the misspelt word has the wrong category or bears some other grammatical feature that is incorrect in the context. Examples are given in (3) and (4) below, which like the preceding examples are taken from the project's corpus. In both cases, the error would be a valid word in a different context. More specifically, rigtignok (indeed) is an adverb, whilst rigtig nok (actually correct) is a modified adjective; and inden .for (inside) is a preposition, whilst indenfor (indoors) is an adverb. In both examples the correct alternative is indicated in parentheses.",
248
- "cite_spans": [],
249
- "ref_spans": [],
250
- "eq_spans": [],
251
- "section": "The prototype",
252
- "sec_num": "2"
253
- },
254
- {
255
- "text": "(3) ... studerede rain gruppe *rigtig nok (rigtignok) under temaoverskrifter (studied my group indeed on the basis of topic headings) (4) *indenfor (inden for) de gule mute (inside the yellow walls)",
256
- "cite_spans": [],
257
- "ref_spans": [],
258
- "eq_spans": [],
259
- "section": "The prototype",
260
- "sec_num": "2"
261
- },
262
- {
263
- "text": "Although the system has a facility for identifying and correcting split-ups and run-ons based on a complex interaction between the dictionary, the idiom list, the compound grammar and the syntactic grammar, this facility has not been fully developed yet, and will therefore not be described any further here. More details can be found in (Paggio, 1999) .",
264
- "cite_spans": [
265
- {
266
- "start": 338,
267
- "end": 352,
268
- "text": "(Paggio, 1999)",
269
- "ref_id": "BIBREF9"
270
- }
271
- ],
272
- "ref_spans": [],
273
- "eq_spans": [],
274
- "section": "The prototype",
275
- "sec_num": "2"
276
- },
277
- {
278
- "text": "The grammar is an augmented context-free grammar consisting of rewrite rules where symbols are associated with features. Error weights and error messages can also be attached to either rules or single features. The rules are applied by unification, but in cases where one or more features do not unify, the offending features will be overridden.",
279
- "cite_spans": [],
280
- "ref_spans": [],
281
- "eq_spans": [],
282
- "section": "The grammar",
283
- "sec_num": "4"
284
- },
285
- {
286
- "text": "In the current version of the grammar~ only the structures relevant to the error types we want the system to deal with -in other words nominal phrases and verbal groups -are accounted for in detail. The analysis produced is thus a kind of shallow syntactic analysis where the various sentence constituents are attached under the topmost S node as fragments.",
287
- "cite_spans": [],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "The grammar",
291
- "sec_num": "4"
292
- },
293
- {
294
- "text": "For example, adjective phrases can be analysed as fragments, as shown in the following rule:",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "The grammar",
299
- "sec_num": "4"
300
- },
301
- {
302
- "text": "Fragment -> AP \"?Fragment AP rule\":2",
303
- "cite_spans": [],
304
- "ref_spans": [],
305
- "eq_spans": [],
306
- "section": "The grammar",
307
- "sec_num": "4"
308
- },
309
- {
310
- "text": "To indicate that the fragment analysis is not optimal, it is associated with an error weight, as well as an error message to be used for debugging purposes (the message is not visible to the end user). The weight penalises parse trees built by applying the rule. The rule is used e.g. to analyse an AP following a copula verb as in:",
311
- "cite_spans": [],
312
- "ref_spans": [],
313
- "eq_spans": [],
314
- "section": "The grammar",
315
- "sec_num": "4"
316
- },
317
- {
318
- "text": "(5) De projekter er ikke interessante.",
319
- "cite_spans": [],
320
- "ref_spans": [],
321
- "eq_spans": [],
322
- "section": "The grammar",
323
- "sec_num": "4"
324
- },
325
- {
326
- "text": "(Those projects are not interesting)",
327
- "cite_spans": [],
328
- "ref_spans": [],
329
- "eq_spans": [],
330
- "section": "The grammar",
331
- "sec_num": "4"
332
- },
333
- {
334
- "text": "The main motivation for implementing a grammar based on the idea of fragments was efficiency. Furthermore, the fragment strategy could be implemented very quickly. However, as will be clarified in Section 5, this strategy is sometimes responsible for bad flags.",
335
- "cite_spans": [],
336
- "ref_spans": [],
337
- "eq_spans": [],
338
- "section": "The grammar",
339
- "sec_num": "4"
340
- },
341
- {
342
- "text": "As an alternative to the fragment analysis, APs can be attached as daughters in NPs. This is of course necessary for the treatment of agreement in NPs, one of the error types targeted in our application. This is shown in the following rule:",
343
- "cite_spans": [],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "Feature mismatches",
347
- "sec_num": "4.1"
348
- },
349
- {
350
- "text": "NP(def Gender PersNumber) -> Det (def Gender PersNumber) AP(def _ _) N(indef Gender:9-PersNumber)",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "Feature mismatches",
355
- "sec_num": "4.1"
356
- },
357
- {
358
- "text": "The rule will parse a correct definite NP such as:",
359
- "cite_spans": [],
360
- "ref_spans": [],
361
- "eq_spans": [],
362
- "section": "Feature mismatches",
363
- "sec_num": "4.1"
364
- },
365
- {
366
- "text": "but also (7) (S) de interessante projekter (the interesting projects)",
367
- "cite_spans": [],
368
- "ref_spans": [],
369
- "eq_spans": [],
370
- "section": "Feature mismatches",
371
- "sec_num": "4.1"
372
- },
373
- {
374
- "text": "de *interessant projekter de interessante *projekterne",
375
- "cite_spans": [],
376
- "ref_spans": [],
377
- "eq_spans": [],
378
- "section": "Feature mismatches",
379
- "sec_num": "4.1"
380
- },
381
- {
382
- "text": "The feature overriding mechanism makes it possible for the system to suggest interessante as the correct replacement in (7), and projekter in (8). Let us see how this is done in more detail for example (7). The parser tries to apply the NP rule to the input string. The rule states that the adjective phrase must be definite (AP (def _ _)). But the dictionary entry corresponding to interessant bears the feature 'indef'. The parser will override this feature and build an NP according to the constraints expressed by the rule. At this point, a new dictionary lookup is performed, and the definite form of the adjective can be suggested as a replacement.",
383
- "cite_spans": [],
384
- "ref_spans": [],
385
- "eq_spans": [],
386
- "section": "Feature mismatches",
387
- "sec_num": "4.1"
388
- },
389
- {
390
- "text": "Weights are used to control rule interaction as well as to establish priorities among features that may have to be overridden. For example in our NP rule, a weight has been attached to the Gender feature in the N node. The weight expresses the fact that it costs more to override gender on the head noun than on the determiner or adjective. The rationale behind this is the fact that if there is a gender mismatch, the parser should not try to find an alternative \u2022 form of the noun (which does not exist), but if necessary override the gender feature either on the adjective or the determiner.",
391
- "cite_spans": [],
392
- "ref_spans": [],
393
- "eq_spans": [],
394
- "section": "Feature mismatches",
395
- "sec_num": "4.1"
396
- },
397
- {
398
- "text": "To capture structural errors, the formalism allows the grammar writer to write so-called error rules. The syntax of error rules is very similar to that used in 'normal' rules, the only difference being that an error rule must have an er-\u2022 ror weight and an error message attached to it. The purpose of the weight is to ensure that error rules are applied only if 'normal' rules are not applicable. The error message can serve two purposes. Depending on whether it is stated as an implicit or an explicit message (i.e. whether it is preceded by a question mark or not), it will appear in the log file where it can be used for debugging purposes, or in the output text as a message to the end user.",
399
- "cite_spans": [],
400
- "ref_spans": [],
401
- "eq_spans": [],
402
- "section": "Capturing structural errors in grammar rules",
403
- "sec_num": "4.2."
404
- },
405
- {
406
- "text": "The following is an error rule example.",
407
- "cite_spans": [],
408
- "ref_spans": [],
409
- "eq_spans": [],
410
- "section": "Capturing structural errors in grammar rules",
411
- "sec_num": "4.2."
412
- },
413
- {
414
- "text": "VGroup(_ finite Tense) -> V(_ finite:4 Tense) V(_ finite:4 _) \"Sequence of two finite verbs\":4 A weight of 4 is attached to the rule as a whole, but there are also weights attached to the 'finiteness' feature on the daughters: their function is to make it costly for the system to apply the rule to non-finite forms. In other words, the feature specification 'finite' is made difficult to override to ensure that it is indeed a sequence of finite verbal forms the rule applies to and flags.",
415
- "cite_spans": [],
416
- "ref_spans": [],
417
- "eq_spans": [],
418
- "section": "Capturing structural errors in grammar rules",
419
- "sec_num": "4.2."
420
- },
421
- {
422
- "text": "The rule will for example parse the verbal sequence in the following sentence:",
423
- "cite_spans": [],
424
- "ref_spans": [],
425
- "eq_spans": [],
426
- "section": "Capturing structural errors in grammar rules",
427
- "sec_num": "4.2."
428
- },
429
- {
430
- "text": "(9) Jeg vil *bevarer (berate) rain frihed.",
431
- "cite_spans": [],
432
- "ref_spans": [],
433
- "eq_spans": [],
434
- "section": "Capturing structural errors in grammar rules",
435
- "sec_num": "4.2."
436
- },
437
- {
438
- "text": "(*I want keep my freedom)",
439
- "cite_spans": [],
440
- "ref_spans": [],
441
- "eq_spans": [],
442
- "section": "Capturing structural errors in grammar rules",
443
- "sec_num": "4.2."
444
- },
445
- {
446
- "text": "As a result of parsing, the system in this case will not attempt to correct the wrong verbal form, but issue the error message \"Sequence of two finite verbs\".",
447
- "cite_spans": [],
448
- "ref_spans": [],
449
- "eq_spans": [],
450
- "section": "Capturing structural errors in grammar rules",
451
- "sec_num": "4.2."
452
- },
453
- {
454
- "text": "Error rules can thus be used to explicitly describe an error and to issue error messages. However, so far we have made very limited use of them, as controlling their interaction with 'normal' rules and with the feature overriding mechanism is not entirely easy. In fact, they are consistently used only to identify incorrect sequences of finite verbal forms or sentences missing a finite verb. To this sparse use of error rules corresponds, on the other hand, an extensive exploitation of the feature overriding mechanism. This strategy allows us to keep the number of rules in the grammar relatively low, but relies on a careful manual adjustment of the weights attached to the various features in the rules.",
455
- "cite_spans": [],
456
- "ref_spans": [],
457
- "eq_spans": [],
458
- "section": "Capturing structural errors in grammar rules",
459
- "sec_num": "4.2."
460
- },
461
- {
462
- "text": "The project's access to a set of parallel unedited and proofread texts has made it possible to automate the evaluation of the system's linguistic functionality. A tool has been implemented to compare the results obtained by the system with the corrections suggested by the publisher's human proofreaders in order to derive measures telling us how well the system performed on recall (lexical coverage as well as coverage of errors), precision (percentage of correct flaggings), as well as suggestion adequacy (hits, misses and no suggestions offered). The reader is referred to (Paggio and Music, 1998) for more details on the evaluation methodology. The automatic procedure was used to evaluate the system during development, and in connection with the user validation. Testing was done on constructed test suites displaying examples of the errors targeted in the project and with text excerpts from the parallel corpora~ Figure 2 shows error recall and suggestion adequacy figures for the various error types represented in the test suites. These figures are very positive, especially with regard to the treatment of grammar errors. To make a comparison with a commercial product, the Danish version of the spelling and grammar checker provided by Microsoft Word does not flag any of the grammar errors. Figure 3 shows how the system performed on one of the test corpora. The corpus was assembled by mixing short excerpts containing relevant grammar errors and randomly chosen text. Since unlike test suites, the corpus also contains correct text, the figure this time also shows lexical coverage and precision figures. The corpus consists of 278 sentences, with an average length of 15.18 words per sentence. It may be surprising to see that it contains a limited number of errors, but it must be remembered that the texts targeted in the project are written by experienced journalists.",
463
- "cite_spans": [
464
- {
465
- "start": 578,
466
- "end": 602,
467
- "text": "(Paggio and Music, 1998)",
468
- "ref_id": "BIBREF8"
469
- }
470
- ],
471
- "ref_spans": [
472
- {
473
- "start": 923,
474
- "end": 931,
475
- "text": "Figure 2",
476
- "ref_id": null
477
- },
478
- {
479
- "start": 1306,
480
- "end": 1314,
481
- "text": "Figure 3",
482
- "ref_id": "FIGREF1"
483
- }
484
- ],
485
- "eq_spans": [],
486
- "section": "Evaluation and Conclusion",
487
- "sec_num": "5"
488
- },
489
- {
490
- "text": "The corpus was processed in 58 cpu-seconds on an HP 9000/B160. As expected, the system performs less well than on the test suites, and in general precision is clearly too low. However, we still consider these results encouraging given the relatively small resources the project has been able to spend on grammar development, and we We regard error coverage as quite satisfactory for a research prototype. In a comparative test made on a similar (slightly smaller) corpus, SCARR/E obtained 58.1% error coverage, and Word 53.5%. To quote a figure from another recently published test (Martins et al., 1998), the ReGra system is reported to miss 48.1% real errors. It is worth noting that ReGra has much more extensive linguistic resources available than SCARRIE, i.e. a dictionary of 1.5 million words and a grammar of 600 production rules. Most of the errors not found by SCAR-RIE in the test have to do with punctuation and other stylistic matters not treated in the project. There are also, however, agreement errors which go unnoticed. These failures are due to one of two reasons: either that no parse has been produced for the sentence in question, or that the grammar has produced a wrong analysis.",
491
- "cite_spans": [],
492
- "ref_spans": [],
493
- "eq_spans": [],
494
- "section": "Evaluation and Conclusion",
495
- "sec_num": "5"
496
- },
497
- {
498
- "text": "The precision obtained is at least at first sight much too low. On the same test corpus, however, Word only reached 15.9% precision. On closer inspection, 72 of the bad flags produced by SCARRIE turned out to be due to unrecognised proper names. Disregarding those, precision goes up to 34.9%. As was mentioned early, SCARRIE has a facility for guessing unknown proper names on the basis of their frequency of occurrence in the text. But since the test corpus consists of Short unrelated excerpts, a large number of proper names only occur once or twice. To get an impression of how the system would perform in a situation where the same proper names and unknown words had a higher frequency of occurrence, we doubled the test corpus by simply repeating the same text twice. As expected, precision increased. The system produced 178 flags, 60 of which were correct (39.7%). This compares well with the 40% precision reported for instance for ReGra.",
499
- "cite_spans": [],
500
- "ref_spans": [],
501
- "eq_spans": [],
502
- "section": "Evaluation and Conclusion",
503
- "sec_num": "5"
504
- },
505
- {
506
- "text": "In addition to the problem of unkown proper names, false flags are related to unrecognised acronyms and compounds (typically forms containing acronyms or dashes), and a not very precise treatment of capitalisation. Only 13 false flags are due to wrong grammar analyses caused either by the fragment approach or by the grammar's limited coverage. In particular, genitive phrases, which are not treated at the moment, are responsible for most of these false alarms.",
507
- "cite_spans": [],
508
- "ref_spans": [],
509
- "eq_spans": [],
510
- "section": "Evaluation and Conclusion",
511
- "sec_num": "5"
512
- },
513
- {
514
- "text": "In conclusion, we consider the results obtained so far promising, and the problems revealed by the evaluation tractable within the current system design. In particular, future development should focus on treating stylistic matters such as capitalisation and punctuation which have not been in focus in the current prototype. The coverage of the grammar, in particular the treatment of genitive phrases, should also be further developed. The data pro-vided by the evaluation reported on in this paper, however, are much too limited to base further development on. Therefore, more extensive testing and debugging should also be carried out.",
515
- "cite_spans": [],
516
- "ref_spans": [],
517
- "eq_spans": [],
518
- "section": "Evaluation and Conclusion",
519
- "sec_num": "5"
520
- },
521
- {
522
- "text": "In addition, two aspects of the system that have only be touched on in this paper would be worth further attention: one is the mechanism for the treatment of split-ups and run-ons, which as mentioned earlier is not well-integrated at the moment; the other is the weight adjustment process, which is done manually at the moment, and for which the adoption of a semiautomatic tool could be considered.",
523
- "cite_spans": [],
524
- "ref_spans": [],
525
- "eq_spans": [],
526
- "section": "Evaluation and Conclusion",
527
- "sec_num": "5"
528
- },
529
- {
530
- "text": "The errorsTo ensure the coverage of relevant error types, a set of parallel unedited and proofread texts provided by the Danish end users has been collected. This text collection consists of newspaper and magazine articles published in 1997 for a total of 270,805 running words. The articles have been collected in their raw version, as well as in the edited version provided by the publisher's own proofreaders. Although not very large in number of words, th@ corpus consists of excerpts from 450 different articles to ensure a good spread of lexical domains and error types. The corpus has been used to construct test suites for progress evaluation, and also to guide grammar development. The aim set for",
531
- "cite_spans": [],
532
- "ref_spans": [],
533
- "eq_spans": [],
534
- "section": "",
535
- "sec_num": null
536
- },
537
- {
538
- "text": ",'jr-O",
539
- "cite_spans": [],
540
- "ref_spans": [],
541
- "eq_spans": [],
542
- "section": "",
543
- "sec_num": null
544
- }
545
- ],
546
- "back_matter": [],
547
- "bib_entries": {
548
- "BIBREF0": {
549
- "ref_id": "b0",
550
- "title": "EasyEnglish: a tool for improving document quality",
551
- "authors": [
552
- {
553
- "first": "A",
554
- "middle": [],
555
- "last": "Bernth",
556
- "suffix": ""
557
- }
558
- ],
559
- "year": 1997,
560
- "venue": "Proceedings of \u2022 the Fifth Conference on Applied Natural Language Processing",
561
- "volume": "",
562
- "issue": "",
563
- "pages": "",
564
- "other_ids": {},
565
- "num": null,
566
- "urls": [],
567
- "raw_text": "A. Bernth. 1997. EasyEnglish: a tool for im- proving document quality. In Proceedings of \u2022 the Fifth Conference on Applied Natural Lan- guage Processing.",
568
- "links": null
569
- },
570
- "BIBREF1": {
571
- "ref_id": "b1",
572
- "title": "JDII: Parsing Italian with a robust constraint grammar",
573
- "authors": [
574
- {
575
- "first": "A",
576
- "middle": [],
577
- "last": "Bolioli",
578
- "suffix": ""
579
- },
580
- {
581
- "first": "L",
582
- "middle": [],
583
- "last": "Dini",
584
- "suffix": ""
585
- },
586
- {
587
- "first": "G",
588
- "middle": [],
589
- "last": "Malnati",
590
- "suffix": ""
591
- }
592
- ],
593
- "year": 1992,
594
- "venue": "Proceedings of COLING:92",
595
- "volume": "",
596
- "issue": "",
597
- "pages": "1003--1007",
598
- "other_ids": {},
599
- "num": null,
600
- "urls": [],
601
- "raw_text": "A. Bolioli, L. Dini, and G. Malnati. 1992. JDII: Parsing Italian with a robust constraint grammar. In Proceedings of COLING:92, pages 1003-1007.",
602
- "links": null
603
- },
604
- "BIBREF2": {
605
- "ref_id": "b2",
606
- "title": "GramCheck: A grammar and style checker",
607
- "authors": [
608
- {
609
- "first": "Ram~rez",
610
- "middle": [],
611
- "last": "Flora",
612
- "suffix": ""
613
- },
614
- {
615
- "first": "Fernando",
616
- "middle": [],
617
- "last": "Bustamante",
618
- "suffix": ""
619
- },
620
- {
621
- "first": "L@",
622
- "middle": [],
623
- "last": "S\u00a3nchez",
624
- "suffix": ""
625
- },
626
- {
627
- "first": "",
628
- "middle": [],
629
- "last": "On",
630
- "suffix": ""
631
- }
632
- ],
633
- "year": 1996,
634
- "venue": "Proceedings of COLING-96",
635
- "volume": "",
636
- "issue": "",
637
- "pages": "175--181",
638
- "other_ids": {},
639
- "num": null,
640
- "urls": [],
641
- "raw_text": "Flora Ram~rez Bustamante and Fer- nando S\u00a3nchez L@on. 1996. GramCheck: A grammar and style checker. In Proceedings of COLING-96, pages 175-181, Copenhagen, Denmark.",
642
- "links": null
643
- },
644
- "BIBREF3": {
645
- "ref_id": "b3",
646
- "title": "Grapheme-tophoneme rules for the Danish component of the SCARRIE project",
647
- "authors": [
648
- {
649
- "first": "",
650
- "middle": [],
651
- "last": "Peter Molb~ek Hansen",
652
- "suffix": ""
653
- }
654
- ],
655
- "year": 1999,
656
- "venue": "Datalingvistisk Forenings drsmcde 1998 i Kcbehavn, Proceedings, number 25 in LAMBDA",
657
- "volume": "",
658
- "issue": "",
659
- "pages": "79--91",
660
- "other_ids": {},
661
- "num": null,
662
- "urls": [],
663
- "raw_text": "Peter Molb~ek Hansen. 1999. Grapheme-to- phoneme rules for the Danish component of the SCARRIE project. In Hanne E. Thomsen and Sabine'Kirchmeier-Andersen, editors, Datalingvistisk Forenings drsmcde 1998 i Kcbehavn, Proceedings, number 25 in LAMBDA, pages 79-91. Institut for datal- ingvistik, Handelshcjskolen i Kcbenhaven.",
664
- "links": null
665
- },
666
- "BIBREF4": {
667
- "ref_id": "b4",
668
- "title": "A chart-based framework for grammar checking: Initial studies",
669
- "authors": [
670
- {
671
- "first": "Anna",
672
- "middle": [],
673
- "last": "S\u00a3gvall Hein",
674
- "suffix": ""
675
- }
676
- ],
677
- "year": 1998,
678
- "venue": "Proceedings of Nodalida-98",
679
- "volume": "",
680
- "issue": "",
681
- "pages": "",
682
- "other_ids": {},
683
- "num": null,
684
- "urls": [],
685
- "raw_text": "Anna S\u00a3gvall Hein. 1998. A chart-based frame- work for grammar checking: Initial studies. In Proceedings of Nodalida-98.",
686
- "links": null
687
- },
688
- "BIBREF5": {
689
- "ref_id": "b5",
690
- "title": "A prototype of a grammar checker for Czech",
691
- "authors": [
692
- {
693
- "first": "Tom~",
694
- "middle": [],
695
- "last": "Holan",
696
- "suffix": ""
697
- },
698
- {
699
- "first": "Vladislav",
700
- "middle": [],
701
- "last": "Kubofi",
702
- "suffix": ""
703
- }
704
- ],
705
- "year": 1997,
706
- "venue": "Proceedings of ANLP'97",
707
- "volume": "",
708
- "issue": "",
709
- "pages": "",
710
- "other_ids": {},
711
- "num": null,
712
- "urls": [],
713
- "raw_text": "Tom~ Holan, Vladislav Kubofi, and Mar- tin Pl\u00a3tek. 1997. A prototype of a gram- mar checker for Czech. In Proceedings of ANLP'97.",
714
- "links": null
715
- },
716
- "BIBREF6": {
717
- "ref_id": "b6",
718
- "title": "Techniques for automatically correcting words in text",
719
- "authors": [
720
- {
721
- "first": "Karen",
722
- "middle": [],
723
- "last": "Kukich",
724
- "suffix": ""
725
- }
726
- ],
727
- "year": 1992,
728
- "venue": "A CM Comput-_ ing Surveys",
729
- "volume": "24",
730
- "issue": "4",
731
- "pages": "377--439",
732
- "other_ids": {},
733
- "num": null,
734
- "urls": [],
735
- "raw_text": "Karen Kukich. 1992. Techniques for automati- cally correcting words in text. A CM Comput- _ ing Surveys, 24(4):377-439.",
736
- "links": null
737
- },
738
- "BIBREF7": {
739
- "ref_id": "b7",
740
- "title": "Linguistic issues in the development ofReGra: a grammar Checker for Brazilian Portuguese",
741
- "authors": [
742
- {
743
- "first": "Ricardo",
744
- "middle": [],
745
- "last": "Ronaldo Teixeira Martins",
746
- "suffix": ""
747
- },
748
- {
749
- "first": "Maria",
750
- "middle": [
751
- "Volpe"
752
- ],
753
- "last": "Hasegawa",
754
- "suffix": ""
755
- },
756
- {
757
- "first": "Gisele",
758
- "middle": [],
759
- "last": "Nunes",
760
- "suffix": ""
761
- },
762
- {
763
- "first": "Osvaldo Novais De",
764
- "middle": [],
765
- "last": "Monthila",
766
- "suffix": ""
767
- },
768
- {
769
- "first": "",
770
- "middle": [],
771
- "last": "Oliveira",
772
- "suffix": ""
773
- }
774
- ],
775
- "year": 1998,
776
- "venue": "Natural Language Engineering",
777
- "volume": "4",
778
- "issue": "4",
779
- "pages": "287--307",
780
- "other_ids": {},
781
- "num": null,
782
- "urls": [],
783
- "raw_text": "Ronaldo Teixeira Martins, Ricardo Hasegawa, Maria Volpe Nunes, Gisele Monthila, and Os- valdo Novais De Oliveira Jr. 1998. Linguistic issues in the development ofReGra: a gram- mar Checker for Brazilian Portuguese. Natu- ral Language Engineering, 4(4):287-307, De- cember.",
784
- "links": null
785
- },
786
- "BIBREF8": {
787
- "ref_id": "b8",
788
- "title": "Evaluation in the SCARRIE project",
789
- "authors": [
790
- {
791
- "first": "'",
792
- "middle": [],
793
- "last": "Patrizia",
794
- "suffix": ""
795
- },
796
- {
797
- "first": "Bradley",
798
- "middle": [],
799
- "last": "Paggio",
800
- "suffix": ""
801
- },
802
- {
803
- "first": "",
804
- "middle": [],
805
- "last": "Music",
806
- "suffix": ""
807
- }
808
- ],
809
- "year": 1998,
810
- "venue": "Proceedings of the First International Conference on Language Resources ~ Evaluation",
811
- "volume": "",
812
- "issue": "",
813
- "pages": "277--282",
814
- "other_ids": {},
815
- "num": null,
816
- "urls": [],
817
- "raw_text": "Patrizia' Paggio and Bradley Music. 1998. Eval- uation in the SCARRIE project. In Pro- ceedings of the First International Conference on Language Resources ~ Evaluation, pages 277-282. Granada, Spain.",
818
- "links": null
819
- },
820
- "BIBREF9": {
821
- "ref_id": "b9",
822
- "title": "Treatment of grammatical errors and evaluation in SCARRIE",
823
- "authors": [
824
- {
825
- "first": "Patrizia",
826
- "middle": [],
827
- "last": "Paggio",
828
- "suffix": ""
829
- }
830
- ],
831
- "year": 1999,
832
- "venue": "",
833
- "volume": "",
834
- "issue": "",
835
- "pages": "",
836
- "other_ids": {},
837
- "num": null,
838
- "urls": [],
839
- "raw_text": "Patrizia Paggio. 1999. Treatment of grammat- ical errors and evaluation in SCARRIE. In Hanne E. Thomsen and Sabine Kirchmeier-",
840
- "links": null
841
- },
842
- "BIBREF10": {
843
- "ref_id": "b10",
844
- "title": "Datalingvistisk Forenings drsmCde 1998 i KCbehavn, Proceedings, number 25 in LAMBDA",
845
- "authors": [
846
- {
847
- "first": "",
848
- "middle": [],
849
- "last": "Andersen",
850
- "suffix": ""
851
- }
852
- ],
853
- "year": null,
854
- "venue": "",
855
- "volume": "",
856
- "issue": "",
857
- "pages": "65--78",
858
- "other_ids": {},
859
- "num": null,
860
- "urls": [],
861
- "raw_text": "Andersen, editors, Datalingvistisk Forenings drsmCde 1998 i KCbehavn, Proceedings, num- ber 25 in LAMBDA, pages 65-78. Insti- tut for datalingvistik, Handelshcjskolen i Kcbenhaven.",
862
- "links": null
863
- },
864
- "BIBREF11": {
865
- "ref_id": "b11",
866
- "title": "Three types of grammatical errors in Danish",
867
- "authors": [
868
- {
869
- "first": "Claus",
870
- "middle": [],
871
- "last": "Povlsen",
872
- "suffix": ""
873
- }
874
- ],
875
- "year": 1998,
876
- "venue": "",
877
- "volume": "",
878
- "issue": "",
879
- "pages": "",
880
- "other_ids": {},
881
- "num": null,
882
- "urls": [],
883
- "raw_text": "Claus Povlsen. 1998. Three types of gram- matical errors in Danish. Technical report, Copenhagen: Center for Sprogteknologi.",
884
- "links": null
885
- },
886
- "BIBREF12": {
887
- "ref_id": "b12",
888
- "title": "Error typology for automatic proof-reading purposes",
889
- "authors": [
890
- {
891
- "first": "Olga",
892
- "middle": [],
893
- "last": "Rambell",
894
- "suffix": ""
895
- }
896
- ],
897
- "year": 1997,
898
- "venue": "",
899
- "volume": "",
900
- "issue": "",
901
- "pages": "",
902
- "other_ids": {},
903
- "num": null,
904
- "urls": [],
905
- "raw_text": "Olga Rambell. 1997. Error typology for auto- matic proof-reading purposes. Technical re- port, Uppsala: Uppsala University. :",
906
- "links": null
907
- },
908
- "BIBREF13": {
909
- "ref_id": "b13",
910
- "title": "Triphone analysis: a combined method for the correction of orthographical and typographical errors",
911
- "authors": [
912
- {
913
- "first": "Brigitte",
914
- "middle": [],
915
- "last": "Van Berkel",
916
- "suffix": ""
917
- },
918
- {
919
- "first": "Koenra~d De",
920
- "middle": [],
921
- "last": "Smedt",
922
- "suffix": ""
923
- }
924
- ],
925
- "year": 1988,
926
- "venue": "Proceedings of the 2nd conference on Applied Natural Language Processing",
927
- "volume": "",
928
- "issue": "",
929
- "pages": "77--83",
930
- "other_ids": {},
931
- "num": null,
932
- "urls": [],
933
- "raw_text": "Brigitte van Berkel and Koenra~d De Smedt. 1988. Triphone analysis: a combined method for the correction of orthographical and ty- pographical errors. In Proceedings of the 2nd conference on Applied Natural Language Pro- cessing, pages 77-83. ACL, Austin.",
934
- "links": null
935
- },
936
- "BIBREF14": {
937
- "ref_id": "b14",
938
- "title": "Detection and correction of morpho-syntactic errors in shift-reduce parsing",
939
- "authors": [],
940
- "year": 1991,
941
- "venue": "Tomita's Algorithm: Extensions and Applications, number 91-68 in Memoranda Informatica",
942
- "volume": "",
943
- "issue": "",
944
- "pages": "69--78",
945
- "other_ids": {},
946
- "num": null,
947
- "urls": [],
948
- "raw_text": "Theo Vosse. 1991. Detection and correction of morpho-syntactic errors in shift-reduce parsing. In R. Heemels, A. Nijholt, and K. Sikkel, editors, Tomita's Algorithm: Ex- tensions and Applications, number 91-68 in Memoranda Informatica, pages 69-78. Uni- versity of Twente/",
949
- "links": null
950
- },
951
- "BIBREF15": {
952
- "ref_id": "b15",
953
- "title": "Detecting and correcting morpho-syntactic errors in real texts",
954
- "authors": [],
955
- "year": 1992,
956
- "venue": "Proceedings of the Third Conference on Applied Natural Language Processing",
957
- "volume": "",
958
- "issue": "",
959
- "pages": "111--118",
960
- "other_ids": {},
961
- "num": null,
962
- "urls": [],
963
- "raw_text": "Theo Vosse. 1992. Detecting and correcting morpho-syntactic errors in real texts. In Pro- ceedings of the Third Conference on Applied Natural Language Processing, pages 111-118, Trento, Italy.",
964
- "links": null
965
- },
966
- "BIBREF16": {
967
- "ref_id": "b16",
968
- "title": "The Word Connection -Grammar-based Spelling Error Correction in Dutch",
969
- "authors": [
970
- {
971
- "first": "G",
972
- "middle": [],
973
- "last": "Theo",
974
- "suffix": ""
975
- },
976
- {
977
- "first": "",
978
- "middle": [],
979
- "last": "Vosse",
980
- "suffix": ""
981
- }
982
- ],
983
- "year": 1994,
984
- "venue": "",
985
- "volume": "",
986
- "issue": "",
987
- "pages": "",
988
- "other_ids": {},
989
- "num": null,
990
- "urls": [],
991
- "raw_text": "Theo G. Vosse. 1994. The Word Connection - Grammar-based Spelling Error Correction in Dutch. Ph.D. thesis, Rijksuniversiteit at Lei- den: the Netherlands. ISBN 90-75296-01-0.",
992
- "links": null
993
- }
994
- },
995
- "ref_entries": {
996
- "FIGREF0": {
997
- "num": null,
998
- "text": "and run-ons.",
999
- "type_str": "figure",
1000
- "uris": null
1001
- },
1002
- "FIGREF1": {
1003
- "num": null,
1004
- "text": "Test corpus evaluation believe they can be improved.",
1005
- "type_str": "figure",
1006
- "uris": null
1007
- }
1008
- }
1009
- }
1010
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1036.json DELETED
@@ -1,806 +0,0 @@
1
- {
2
- "paper_id": "A00-1036",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:29.087893Z"
6
- },
7
- "title": "Linguistic Knowledge can Improve Information Retrieval",
8
- "authors": [
9
- {
10
- "first": "William",
11
- "middle": [
12
- "A"
13
- ],
14
- "last": "Woods",
15
- "suffix": "",
16
- "affiliation": {},
17
- "email": ""
18
- },
19
- {
20
- "first": "Lawrence",
21
- "middle": [
22
- "A"
23
- ],
24
- "last": "Bookman",
25
- "suffix": "",
26
- "affiliation": {},
27
- "email": ""
28
- },
29
- {
30
- "first": "Ann",
31
- "middle": [],
32
- "last": "Houston",
33
- "suffix": "",
34
- "affiliation": {},
35
- "email": ""
36
- },
37
- {
38
- "first": "Robert",
39
- "middle": [
40
- "J"
41
- ],
42
- "last": "Kuhns",
43
- "suffix": "",
44
- "affiliation": {},
45
- "email": ""
46
- },
47
- {
48
- "first": "Paul",
49
- "middle": [],
50
- "last": "Martin",
51
- "suffix": "",
52
- "affiliation": {},
53
- "email": ""
54
- },
55
- {
56
- "first": "Stephen",
57
- "middle": [],
58
- "last": "Green",
59
- "suffix": "",
60
- "affiliation": {},
61
- "email": ""
62
- }
63
- ],
64
- "year": "",
65
- "venue": null,
66
- "identifiers": {},
67
- "abstract": "This paper describes the results of some experiments using a new approach to information access that combines techniques from natural language processing and knowledge representation with a penaltybased technique for relevance estimation and passage retrieval. Unlike many attempts to combine natural language processing with information retrieval, these results show substantial benefit from using linguistic knowledge.",
68
- "pdf_parse": {
69
- "paper_id": "A00-1036",
70
- "_pdf_hash": "",
71
- "abstract": [
72
- {
73
- "text": "This paper describes the results of some experiments using a new approach to information access that combines techniques from natural language processing and knowledge representation with a penaltybased technique for relevance estimation and passage retrieval. Unlike many attempts to combine natural language processing with information retrieval, these results show substantial benefit from using linguistic knowledge.",
74
- "cite_spans": [],
75
- "ref_spans": [],
76
- "eq_spans": [],
77
- "section": "Abstract",
78
- "sec_num": null
79
- }
80
- ],
81
- "body_text": [
82
- {
83
- "text": "1 Introduction An online information seeker often fails to find what is wanted because the words used in the request are different from the words used in the relevant material. Moreover, the searcher usually spends a significant amount of time reading retrieved material in order to determine whether it contains the information sought. To address these problems, a system has been developed at Sun Microsystems Laboratories (Ambroziak and Woods, 1998) that uses techniques from natural language processing and knowledge representation, with a technique for dynamic passage selection and scoring, to significantly improve retrieval performance. This system is able to locate specific passages in the indexed material where the requested information appears to be, and to score those passages with a penalty-based score that is highly correlated with the likelihood that they contain relevant information. This ability, which we call \"Precision Content Retrieval\" is achieved by combining a system for Conceptual Indexing with an algorithm for Relaxation-Ranking Specific Passage Retrieval.",
84
- "cite_spans": [
85
- {
86
- "start": 425,
87
- "end": 452,
88
- "text": "(Ambroziak and Woods, 1998)",
89
- "ref_id": "BIBREF0"
90
- }
91
- ],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "",
95
- "sec_num": null
96
- },
97
- {
98
- "text": "In this paper, we show how linguistic knowledge is used to improve search effectiveness in this system. This is of particular interest, since many previous attempts to use linguistic knowledge to improve information retrieval have met with little or mixed success (Fagan, 1989; Lewis and Sparck Jones, 1996; Sparck Jones, 1998; Varile and Zampolli, 1997; Voorhees, 1993; Mandala et al., 1999) (but see the latter for some successes as well).",
99
- "cite_spans": [
100
- {
101
- "start": 264,
102
- "end": 277,
103
- "text": "(Fagan, 1989;",
104
- "ref_id": "BIBREF2"
105
- },
106
- {
107
- "start": 278,
108
- "end": 307,
109
- "text": "Lewis and Sparck Jones, 1996;",
110
- "ref_id": "BIBREF4"
111
- },
112
- {
113
- "start": 308,
114
- "end": 327,
115
- "text": "Sparck Jones, 1998;",
116
- "ref_id": "BIBREF8"
117
- },
118
- {
119
- "start": 328,
120
- "end": 354,
121
- "text": "Varile and Zampolli, 1997;",
122
- "ref_id": "BIBREF9"
123
- },
124
- {
125
- "start": 355,
126
- "end": 370,
127
- "text": "Voorhees, 1993;",
128
- "ref_id": "BIBREF10"
129
- },
130
- {
131
- "start": 371,
132
- "end": 392,
133
- "text": "Mandala et al., 1999)",
134
- "ref_id": "BIBREF5"
135
- }
136
- ],
137
- "ref_spans": [],
138
- "eq_spans": [],
139
- "section": "",
140
- "sec_num": null
141
- },
142
- {
143
- "text": "* Lawrence Bookman is now at Torrent Systems, Inc.",
144
- "cite_spans": [],
145
- "ref_spans": [],
146
- "eq_spans": [],
147
- "section": "",
148
- "sec_num": null
149
- },
150
- {
151
- "text": "The conceptual indexing and retrieval system used for these experiments automatically extracts words and phrases from unrestricted text and organizes them into a semantic network that integrates syntactic, semantic, and morphological relationships. The resulting conceptual taxonomy (Woods, 1997) is used by a specific passage-retrieval algorithm to deal with many paraphrase relationships and to find specific passages of text where the information sought is likely to occur. It uses a lexicon containing syntactic, semantic, and morphological information about words, word senses, and phrases to provide a base source of semantic and morphological relationships that are used to organize the taxonomy. In addition, it uses an extensive system of knowledge-based morphological rules and functions to analyze words that are not already in its lexicon, in order to construct new lexical entries for previously unknown words (Woods, 2000) . In addition to rules for handling derived and inflected forms of known words, the system includes rules for lexical compounds and rules that are capable of making reasonable guesses for totally unknown words.",
152
- "cite_spans": [
153
- {
154
- "start": 283,
155
- "end": 296,
156
- "text": "(Woods, 1997)",
157
- "ref_id": "BIBREF12"
158
- },
159
- {
160
- "start": 923,
161
- "end": 936,
162
- "text": "(Woods, 2000)",
163
- "ref_id": "BIBREF13"
164
- }
165
- ],
166
- "ref_spans": [],
167
- "eq_spans": [],
168
- "section": "Conceptual Indexing",
169
- "sec_num": null
170
- },
171
- {
172
- "text": "A pilot version of this indexing and retrieval system, implemented in Lisp, uses a collection of approximately 1200 knowledge-based morphological rules to extend a core lexicon of approximately 39,000 words to give coverage that exceeds that of an English lexicon of more than 80,000 base forms (or 150,000 base plus inflected forms). Later versions of the conceptual indexing and retrieval system, implemented in C++, use a lexicon of approximately 150,000 word forms that is automatically generated by the Lisp-based morphological analysis from its core lexicon and an input word list. The base lexicon is extended further by an extensive name dictionary and by further morphological analysis of unknown words at indexing time. This paper will describe some experiments using several versions of this system. In particular, it will focus on the role that the linguistic knowledge sources play in its operation.",
173
- "cite_spans": [],
174
- "ref_spans": [],
175
- "eq_spans": [],
176
- "section": "Conceptual Indexing",
177
- "sec_num": null
178
- },
179
- {
180
- "text": "The lexicon used by the conceptual indexing system contains syntactic information that can be used for the analysis of phrases, as well as morphological and semantic information that is used to relate more specific concepts to more general concepts in the conceptual taxonomy. This information is integrated into the conceptual taxonomy by considering base forms of words to subsume their derived and inflected forms (\"root subsumption\") and more general terms to subsume more specific terms. The system uses these relationships as the basis for inferring subsumption relationships between more general phrases and more specific phrases according to the intensional subsumption logic of Woods (Woods, 1991) .",
181
- "cite_spans": [
182
- {
183
- "start": 693,
184
- "end": 706,
185
- "text": "(Woods, 1991)",
186
- "ref_id": "BIBREF11"
187
- }
188
- ],
189
- "ref_spans": [],
190
- "eq_spans": [],
191
- "section": "Conceptual Indexing",
192
- "sec_num": null
193
- },
194
- {
195
- "text": "The largest base lexicon used by this system currently contains semantic subsumption information for something in excess of 15,000 words. This information consists of basic \"kind of\" and \"instance of\" information such as the fact that book is a kind of document and washing is a kind of cleaning. The lexicon also records morphological roots and affixes for words that are derived or inflected forms of other words, and information about different word senses and their interrelationships. For example, the conceptual indexing system is able to categorize becomes black as a kind of color change because becomes is an inflected form of become, become is a kind of change, and black is a color. Similarly, color disruption is recognized as a kind of color change, because the system recognizes disruption as a derived form of disrupt, which is known in the lexicon to be a kind of damage, which is known to be a kind of change.",
196
- "cite_spans": [],
197
- "ref_spans": [],
198
- "eq_spans": [],
199
- "section": "Conceptual Indexing",
200
- "sec_num": null
201
- },
202
- {
203
- "text": "When using root subsumption as a technique for information retrieval, it is important to have a core lexicon that knows correct morphological analyses for words that the rules would otherwise analyze incorrectly. For example, the following are some examples of words that could be analyzed incorrectly if the correct interpretations were not specified in the lexicon: Although they are not always as humorous as the above examples, there are over 3,000 words in the core lexicon of 39,000 English words that would receive false morphological analyses like the above examples, if the words were not already in the lexicon.",
204
- "cite_spans": [],
205
- "ref_spans": [],
206
- "eq_spans": [],
207
- "section": "Conceptual Indexing",
208
- "sec_num": null
209
- },
210
- {
211
- "text": "delegate (de4.",
212
- "cite_spans": [],
213
- "ref_spans": [],
214
- "eq_spans": [],
215
- "section": "Conceptual Indexing",
216
- "sec_num": null
217
- },
218
- {
219
- "text": "The system we are evaluating uses a technique called \"relaxation ranking\" to find specific passages where as many as possible of the different elements of a query occur near each other, preferably in the same form and word order and preferably closer together. Such passages are ranked by a penalty score that measures the degree of deviation from an exact match of the requested phrase, with smaller penalties being preferred. Differences in morphological form and formal subsumption of index terms by query terms introduce small penalties, while intervening words, unexplained permutations of word order, and crossing sentence boundaries introduce more significant penalties. Elements of a query that cannot be found nearby introduce substantial penalties that depend on the syntactic categories of the missing words.",
220
- "cite_spans": [],
221
- "ref_spans": [],
222
- "eq_spans": [],
223
- "section": "Relaxation Ranking and Specific Passage Retrieval",
224
- "sec_num": "3"
225
- },
226
- {
227
- "text": "When the conceptual indexing system is presented with a query, the relaxation-ranking retrieval algorithm searches through the conceptual taxonomy for appropriately related concepts and uses the positions of those concepts in the indexed material to find specific passages that are likely to address the information needs of the request. This search can find relationships from base forms of words to derived forms and from more general terms to more specific terms, by following paths in the conceptual taxonomy.",
228
- "cite_spans": [],
229
- "ref_spans": [],
230
- "eq_spans": [],
231
- "section": "Relaxation Ranking and Specific Passage Retrieval",
232
- "sec_num": "3"
233
- },
234
- {
235
- "text": "For example, the following is a passage retrieved by this system, when applied to the UNIX \u00ae operating system online documentation (the \"man pages\"):",
236
- "cite_spans": [],
237
- "ref_spans": [],
238
- "eq_spans": [],
239
- "section": "Relaxation Ranking and Specific Passage Retrieval",
240
- "sec_num": "3"
241
- },
242
- {
243
- "text": "Query: print a message from the mail tool",
244
- "cite_spans": [],
245
- "ref_spans": [],
246
- "eq_spans": [],
247
- "section": "Relaxation Ranking and Specific Passage Retrieval",
248
- "sec_num": "3"
249
- },
250
- {
251
- "text": "Print sends copies of all the selected mail items to your default printer. If there are no selected items, mailtool sends copies of those items you axe currently... The indicated passage is ranked 6th in a returned list of found passages, indicated by the 6 in the above display. The number -2.84 is the penalty score assigned to the passage, and the subsequent words print, mail, mail, and mailtool indicate the words in the text that are matched to the corresponding content words in the input query. In this case, print is matched to print, message to mail, mail to mail, and tool to mailtool, respectively. This is followed by the content of the actual passage located. The information provided in these hit displays gives the information seeker a clear idea of why the passage was retrieved and enables the searcher to quickly skip down the hit list with little time spent looking at irrelevant passages. In this case, it was easy to identify that the 6th ranked hit was the best one and contained the relevant information.",
252
- "cite_spans": [],
253
- "ref_spans": [],
254
- "eq_spans": [],
255
- "section": "-2.84 print mail mail mailtool",
256
- "sec_num": "6."
257
- },
258
- {
259
- "text": "The retrieval of this passage involved use of a semantic subsumption relationship to match message to mail, because the lexical entry for mail recorded that it was a kind of message. It used a morphological root subsumption to match tool to mailtool because the morphological analyzer analyzed the unknown word mailtool as a compound of mail and tool and recorded that its root was tool and that it was a kind of tool modified by mail. Taking away the ability to morphologically analyze unknown words would have blocked the retrieval of this passage, as would eliminating the lexical subsumption entry that recorded mail as a kind of message.",
260
- "cite_spans": [],
261
- "ref_spans": [],
262
- "eq_spans": [],
263
- "section": "-2.84 print mail mail mailtool",
264
- "sec_num": "6."
265
- },
266
- {
267
- "text": "Like other approaches to passage retrieval (Kaszkiel and Zobel, 1997; Salton et al., 1993; Callan, 1994) , the relaxation-ranking retrieval algorithm identifies relevant passages rather than simply identifying whole documents. However, unlike approaches that involve segmenting the material into paragraphs or other small passages before indexing, this algorithm dynamically constructs relevant passages in response to requests. When responding to a request, it uses information in the index about positions of concepts in the text to identify relevant passages. In response to a single request, identified passages may range in size from a single word or phrase to several sentences or paragraphs, depending on how much context is required to capture the various elements of the request.",
268
- "cite_spans": [
269
- {
270
- "start": 43,
271
- "end": 69,
272
- "text": "(Kaszkiel and Zobel, 1997;",
273
- "ref_id": "BIBREF3"
274
- },
275
- {
276
- "start": 70,
277
- "end": 90,
278
- "text": "Salton et al., 1993;",
279
- "ref_id": "BIBREF6"
280
- },
281
- {
282
- "start": 91,
283
- "end": 104,
284
- "text": "Callan, 1994)",
285
- "ref_id": "BIBREF1"
286
- }
287
- ],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "-2.84 print mail mail mailtool",
291
- "sec_num": "6."
292
- },
293
- {
294
- "text": "In a user interface to the specific passage retrieval system, retrieved passages are reported to the user in increasing order of penalty, together with the rank number, penalty score, information about which target terms match the corresponding query terms, and the content of the identified passage with some surrounding context as illustrated above. In one version of this technology, results are presented in a hypertext interface that allows the user to click on any of the presented items to see that passage in its entire context in the source document. In addition, the user can be presented with a display of portions of the conceptual taxonomy related to the terms in the request. This frequently reveals useful generalizations of the request that would find additional relevant information, and it also conveys an understanding of what concepts have been found in the material that will be matched by the query terms. For example, in one experiment, searching the online documentation for the Emacs text editor, the request jump to end of file resulted in feedback showing that jump was classified as a kind of move in the conceptual taxonomy. This led to a reformulated request, move to end of file, which successfully retrieved the passage 9o to end of buffer.",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "-2.84 print mail mail mailtool",
299
- "sec_num": "6."
300
- },
301
- {
302
- "text": "In order to evaluate the effectiveness of the above techniques, a set of 90 queries was collected from a naive user of the UNIX operating system, 84 of which could be answered from the online documentation known as the man pages. A set of \"correct\" answers for each of these 84 queries was manually determined by an independent UNIX operating system expert, and a snapshot of the man pages collection was captured and indexed for retrieval. In order to compare this methodology with classical document retrieval techniques, we assign a ranking score to each document equal to the ra~king score of the best ranked passage that it contains.",
303
- "cite_spans": [],
304
- "ref_spans": [],
305
- "eq_spans": [],
306
- "section": "Experimental Evaluation",
307
- "sec_num": "4"
308
- },
309
- {
310
- "text": "In rating the performance of a given method, we compute average recall and precision values at 10 retrieved documents, and we also compute a \"success rate\" which is simply the percentage of queries for which an acceptable answer occurs in the top ten hits. The success rate is the principal factor on which we base our evaluations, since for this application, the user is not interested in subsequent answers once an acceptable answer has been found, and finding one answer for each of two requests is a substantially better result than finding two answers to one request and none for another.",
311
- "cite_spans": [],
312
- "ref_spans": [],
313
- "eq_spans": [],
314
- "section": "Experimental Evaluation",
315
- "sec_num": "4"
316
- },
317
- {
318
- "text": "These experiments were conducted using an experimental retrieval system that combined a Lispbased language processing stage with a C++ implementation of a conceptual indexer. The linguistic knowledge sources used in these experiments included a core lexicon of approximately 18,000 words, a substantial set of morphological rules, and specialized morphological algorithms covering inflections, prefixes, suffixes, lexical compounding, and a variety of special forms, including numbers, ordinals, Roman numerals, dates, phone numbers, and acronyms. In addition, they made use of a lexical subsumption taxonomy of approximately 3000 lexical subsumption relations, and a small set of semantic entailment axioms (e.g., display entails see, but is not a kind of see). This system is described in (Woods, 1997) . The database was a snapshot of the local man pages (frozen at the time of the experiment so that it wouldn't change during the experiment), consisting of approximately 1800 files of varying lengths and constituting a total of approximately 10 megabytes of text. Table 1 shows the results of comparing three versions of this technology with a textbook implementation of the standard tfid] algorithm (Salton, 1989) and with the SearchItWMsearch application developed at Sun Microsystems, Inc., which combines a Precision (10 docs) 2.9% 7.4% 7.3% not measured not measured simple morphological query expansion with a stateof-the-art commercial search engine. In the table, Recall II refers to the full conceptual indexing and search system with all of its knowledge sources and rules. The line labeled \"w/o morph\" refers to this system with its dynamic morphological rules turned off, and the line labeled \"w/o knowledge\" refers to this system with all of its knowledge sources and rules turned off. The table presents the success rate and the measured recall and precision values for 10 retrieved documents. We measured recall and precision at the 10 document level because internal studies of searching behavior had shown that users tended to give up if an answer was not found in the first ten ranked hits. We measured success rate, rather than recall and precision, for our ablation studies, because standard recall and precision measures are not sensitive to the distinction between finding multiple answers to a single request versus finding at least one answer for more requests. Table 1 shows that for this task, the relaxationranking passage retrieval algorithm without its supplementary knowledge sources (Recall II w/o knowledge) is roughly comparable in performance (42.9% versus 44.0% success rate) to a state-of-the-art commercial search engine (SearchIt) at the pure document retrieval task (neglecting the added benefit of locating the specific passages). Adding the knowledge in the core lexicon (which includes morphological relationships, semantic subsumption axioms, and entailment relationships), but without morphological analysis of unknown words (Recall II w/o morph), significantly improves these results (from 42.9% to 50.0%). Further adding the morphological analysis capability that automatically analyzes unknown words (deriving additional morphological relationships and some semantic subsumption relationships) significantly improves that result (from 50.0% to 60.7%). In contrast, we found that adding the same semantic subsumption relationships to the commercial search engine, using its provided thesaurus capability degraded its results, and results were still degraded when we added only those facts that we knew would help find relevant documents.",
319
- "cite_spans": [
320
- {
321
- "start": 791,
322
- "end": 804,
323
- "text": "(Woods, 1997)",
324
- "ref_id": "BIBREF12"
325
- },
326
- {
327
- "start": 1205,
328
- "end": 1219,
329
- "text": "(Salton, 1989)",
330
- "ref_id": "BIBREF7"
331
- }
332
- ],
333
- "ref_spans": [
334
- {
335
- "start": 1069,
336
- "end": 1076,
337
- "text": "Table 1",
338
- "ref_id": "TABREF1"
339
- },
340
- {
341
- "start": 1316,
342
- "end": 1336,
343
- "text": "Precision (10 docs)",
344
- "ref_id": "TABREF1"
345
- },
346
- {
347
- "start": 2392,
348
- "end": 2399,
349
- "text": "Table 1",
350
- "ref_id": "TABREF1"
351
- }
352
- ],
353
- "eq_spans": [],
354
- "section": "Experimental Evaluation",
355
- "sec_num": "4"
356
- },
357
- {
358
- "text": "It turned out that the additional relevant documents found were more than offset by additional irrelevant documents that were also ranked more highly.",
359
- "cite_spans": [],
360
- "ref_spans": [],
361
- "eq_spans": [],
362
- "section": "Discussion",
363
- "sec_num": "5"
364
- },
365
- {
366
- "text": "As mentioned above, comparing the relaxationranking algorithm with document retrieval systems measures only a part of the benefit of the specific passage retrieval methodology. Fully evaluating the quality and ranking of the retrieved passages involves a great many subtleties. However, two informal evaluations have been conducted that :shed some light on the benefits. The first of these was a pilot study of the technology at a telecommunications company. In that study, one user found that she could use a single query to the conceptual indexing system to find both of the items of information necessary to complete a task that formerly required searching two separate databases. The conclusion of that study was that the concept retrieval technology performs well enough to be useful to a person talking live with a customer. It was observed that the returned hits can be compared with one another easily and quickly by eye, and attention is taken directly to the relevant content of a large document: The automatic indexing was considered a plus compared with manual methods of content indexing. It was observed that an area of great potential may be in a form of knowledge management that involves organizing and providing intelligent access to small, unrelated \"nuggets\" of textual knowledge that are not amenable to conventional database archival or categorization.",
367
- "cite_spans": [],
368
- "ref_spans": [],
369
- "eq_spans": [],
370
- "section": "Anecdotal Evaluation of Specific Passage Retrieval Benefits",
371
- "sec_num": "6"
372
- },
373
- {
374
- "text": "A second experiment was conducted by the Human Resources Webmaster of a high-tech company, an experienced user of search engines who used this technology to index his company's internal HR web site. He then measured the time it took him to process 15 typical HR requests, first using conventional search tools that he had available, and then using the Conceptual Indexing technology. In both cases, he measured the time it took him to either find the answer or to conclude that the answer wasn't in the indexed material. His measured times for the total suite were 55 minutes using the conventional tools and 11 minutes using the conceptual indexing technology. Of course, this was an uncontrolled experiment, and there is some potential that information learned from searching with the traditional tools (which were apparently used first) might have provided some benefit when using the conceptual indexing technology. However, the fact that he found things with the latter that he did not find with the former and the magnitude of the time difference suggests that there is an effect, albeit perhaps not as great as the measurements. As a result of this experience, he concluded that he would expect many users to take much longer to find materials or give up, when using the traditional tools. He anticipated that after finding some initial materials, more time would be required, as users would end up having to call people for additional information. He estimated that users could spend up to an hour trying to get the information they needed...having to call someone, wait to make contact and finally get the information they needed. Using the conceptual indexing search engine, he expected that these times would be at least halved.",
375
- "cite_spans": [],
376
- "ref_spans": [],
377
- "eq_spans": [],
378
- "section": "Anecdotal Evaluation of Specific Passage Retrieval Benefits",
379
- "sec_num": "6"
380
- },
381
- {
382
- "text": "We have described some experiments using linguistic knowledge in an information retrieval system in which passages within texts are dynamically found in response to a query and are scored and ranked based on a relaxation of constraints. This is a different approach from previous methods of passage retrieval and from previous attempts to use linguistic knowledge in information retrieval. These experiments show that linguistic knowledge can significantly improve information retrieval performance when incorporated into a knowledge-based relaxation-ranking algorithm for specific passage retrieval.",
383
- "cite_spans": [],
384
- "ref_spans": [],
385
- "eq_spans": [],
386
- "section": "Conclusion",
387
- "sec_num": "7"
388
- },
389
- {
390
- "text": "The linguistic knowledge considered here includes the use of morphological relationships between words, taxonomic relationships between concepts, and general semantic entailment relationships between words and concepts. We have shown that the combination of these three knowledge sources can significantly improve performance in finding appropriate answers to specific queries when incorporated into a relaxation-ranking algorithm. It appears that the penalty-based relaxation-ranking algorithm figures crucially in this success, since the addition of such linguistic knowledge to traditional information retrieval models typically degrades retrieval performance rather than improving it, a pattern that was borne out in our own experiments.",
391
- "cite_spans": [],
392
- "ref_spans": [],
393
- "eq_spans": [],
394
- "section": "Conclusion",
395
- "sec_num": "7"
396
- }
397
- ],
398
- "back_matter": [
399
- {
400
- "text": "Many other people have been involved in creating the conceptual indexing and retrieval system de-scribed here. These include: Gary Adams, Jacek Ambroziak, Cookie Callahan, Chris Colby, Jim Flowers, Ellen Hays, Patrick Martin, Peter Norvig, Tony Passera, Philip Resnik, Robert Sproull, and Mark Torrance.Sun, Sun Microsystems, and SearchIt are trademarks or registered trademarks of Sun Microsystems, Inc. in the U.S. and other countries.UNIX is a registered trademark in the United States and other countries, exclusively licensed through X/Open Company, Ltd. UNIX est une marque enregistree aux Etats-Unis et dans d'autres pays et licenci~e exclusivement par X/Open Company Ltd.",
401
- "cite_spans": [],
402
- "ref_spans": [],
403
- "eq_spans": [],
404
- "section": "Acknowledgments",
405
- "sec_num": null
406
- }
407
- ],
408
- "bib_entries": {
409
- "BIBREF0": {
410
- "ref_id": "b0",
411
- "title": "Natural language technology in precision content retrieval",
412
- "authors": [
413
- {
414
- "first": "Jacek",
415
- "middle": [],
416
- "last": "Ambroziak",
417
- "suffix": ""
418
- },
419
- {
420
- "first": "A",
421
- "middle": [],
422
- "last": "William",
423
- "suffix": ""
424
- },
425
- {
426
- "first": "",
427
- "middle": [],
428
- "last": "Woods",
429
- "suffix": ""
430
- }
431
- ],
432
- "year": 1998,
433
- "venue": "International Conference on Natural Language Processing and Industrial Applications",
434
- "volume": "",
435
- "issue": "",
436
- "pages": "",
437
- "other_ids": {},
438
- "num": null,
439
- "urls": [],
440
- "raw_text": "Jacek Ambroziak and William A. Woods. 1998. Natural language technology in precision content retrieval. In International Conference on Natural Language Processing and Industrial Applications, Moncton, New Brunswick, Canada, August. www.stm.com/research/techrep/1998/abstract- 69.html.",
441
- "links": null
442
- },
443
- "BIBREF1": {
444
- "ref_id": "b1",
445
- "title": "Passage-level evidgnce in document retrieval",
446
- "authors": [
447
- {
448
- "first": "Jamie",
449
- "middle": [
450
- "P"
451
- ],
452
- "last": "Callan",
453
- "suffix": ""
454
- }
455
- ],
456
- "year": 1994,
457
- "venue": "SIGIR",
458
- "volume": "",
459
- "issue": "",
460
- "pages": "302--309",
461
- "other_ids": {},
462
- "num": null,
463
- "urls": [],
464
- "raw_text": "Jamie P. Callan. 1994. Passage-level evidgnce in document retrieval. SIGIR, pages 302-309.",
465
- "links": null
466
- },
467
- "BIBREF2": {
468
- "ref_id": "b2",
469
- "title": "The effectiveness of a nonsyntactic approach to automatic phrase indexing for document retrieval",
470
- "authors": [
471
- {
472
- "first": "J",
473
- "middle": [
474
- "L"
475
- ],
476
- "last": "Fagan",
477
- "suffix": ""
478
- }
479
- ],
480
- "year": 1989,
481
- "venue": "Journal of the American Society for Information Science",
482
- "volume": "40",
483
- "issue": "2",
484
- "pages": "115--132",
485
- "other_ids": {},
486
- "num": null,
487
- "urls": [],
488
- "raw_text": "J. L. Fagan. 1989. The effectiveness of a nonsyntac- tic approach to automatic phrase indexing for doc- ument retrieval. Journal of the American Society for Information Science, 40(2):115-132, March.",
489
- "links": null
490
- },
491
- "BIBREF3": {
492
- "ref_id": "b3",
493
- "title": "Passage retrieval revisited. SIGIR",
494
- "authors": [
495
- {
496
- "first": "Martin",
497
- "middle": [],
498
- "last": "Kaszkiel",
499
- "suffix": ""
500
- },
501
- {
502
- "first": "Justin",
503
- "middle": [],
504
- "last": "Zobel",
505
- "suffix": ""
506
- }
507
- ],
508
- "year": 1997,
509
- "venue": "",
510
- "volume": "",
511
- "issue": "",
512
- "pages": "302--309",
513
- "other_ids": {},
514
- "num": null,
515
- "urls": [],
516
- "raw_text": "Martin Kaszkiel and Justin Zobel. 1997. Passage retrieval revisited. SIGIR, pages 302-309.",
517
- "links": null
518
- },
519
- "BIBREF4": {
520
- "ref_id": "b4",
521
- "title": "Natural language processing for information retrieval",
522
- "authors": [
523
- {
524
- "first": "D",
525
- "middle": [],
526
- "last": "David",
527
- "suffix": ""
528
- },
529
- {
530
- "first": "Karen",
531
- "middle": [
532
- "Sparck"
533
- ],
534
- "last": "Lewis",
535
- "suffix": ""
536
- },
537
- {
538
- "first": "",
539
- "middle": [],
540
- "last": "Jones",
541
- "suffix": ""
542
- }
543
- ],
544
- "year": 1996,
545
- "venue": "CACM",
546
- "volume": "39",
547
- "issue": "1",
548
- "pages": "92--101",
549
- "other_ids": {},
550
- "num": null,
551
- "urls": [],
552
- "raw_text": "David D. Lewis and Karen Sparck Jones. 1996. Nat- ural language processing for information retrieval. CACM, 39(1):92-101.",
553
- "links": null
554
- },
555
- "BIBREF5": {
556
- "ref_id": "b5",
557
- "title": "Combining multiple evidence from different types of thesaurus for query expansion",
558
- "authors": [
559
- {
560
- "first": "Rila",
561
- "middle": [],
562
- "last": "Mandala",
563
- "suffix": ""
564
- },
565
- {
566
- "first": "Takenobu",
567
- "middle": [],
568
- "last": "Tokunaga",
569
- "suffix": ""
570
- },
571
- {
572
- "first": "Hozumi",
573
- "middle": [],
574
- "last": "Tanaka",
575
- "suffix": ""
576
- }
577
- ],
578
- "year": 1999,
579
- "venue": "Proceedings on the 22nd annual international A CM SIGIR conference on Research and development in information retrieval. ACM-SIGIR",
580
- "volume": "",
581
- "issue": "",
582
- "pages": "",
583
- "other_ids": {},
584
- "num": null,
585
- "urls": [],
586
- "raw_text": "Rila Mandala, Takenobu Tokunaga, and Hozumi Tanaka. 1999. Combining multiple evidence from different types of thesaurus for query expansion. In Proceedings on the 22nd annual international A CM SIGIR conference on Research and develop- ment in information retrieval. ACM-SIGIR.",
587
- "links": null
588
- },
589
- "BIBREF6": {
590
- "ref_id": "b6",
591
- "title": "Approaches to passage retrieval in full text information systems",
592
- "authors": [
593
- {
594
- "first": "Gerald",
595
- "middle": [],
596
- "last": "Salton",
597
- "suffix": ""
598
- },
599
- {
600
- "first": "James",
601
- "middle": [],
602
- "last": "Allan",
603
- "suffix": ""
604
- },
605
- {
606
- "first": "Chris",
607
- "middle": [],
608
- "last": "Buckley",
609
- "suffix": ""
610
- }
611
- ],
612
- "year": 1993,
613
- "venue": "SIGIR",
614
- "volume": "",
615
- "issue": "",
616
- "pages": "49--58",
617
- "other_ids": {},
618
- "num": null,
619
- "urls": [],
620
- "raw_text": "Gerald Salton, James Allan, and Chris Buckley. 1993. Approaches to passage retrieval in full text information systems. SIGIR, pages 49-58.",
621
- "links": null
622
- },
623
- "BIBREF7": {
624
- "ref_id": "b7",
625
- "title": "Automatic Text Processing",
626
- "authors": [
627
- {
628
- "first": "Gerard",
629
- "middle": [],
630
- "last": "Salton",
631
- "suffix": ""
632
- }
633
- ],
634
- "year": 1989,
635
- "venue": "",
636
- "volume": "",
637
- "issue": "",
638
- "pages": "",
639
- "other_ids": {},
640
- "num": null,
641
- "urls": [],
642
- "raw_text": "Gerard Salton. 1989. Automatic Text Processing. Addison Wesley, Reading, MA.",
643
- "links": null
644
- },
645
- "BIBREF8": {
646
- "ref_id": "b8",
647
- "title": "A look back and a look forward",
648
- "authors": [
649
- {
650
- "first": "Karen Sparck",
651
- "middle": [],
652
- "last": "Jones",
653
- "suffix": ""
654
- }
655
- ],
656
- "year": 1998,
657
- "venue": "SIGIR",
658
- "volume": "",
659
- "issue": "",
660
- "pages": "13--29",
661
- "other_ids": {},
662
- "num": null,
663
- "urls": [],
664
- "raw_text": "Karen Sparck Jones. 1998. A look back and a look forward. SIGIR, pages 13-29.",
665
- "links": null
666
- },
667
- "BIBREF9": {
668
- "ref_id": "b9",
669
- "title": "Survey of the State of the Art in Human Language Technology",
670
- "authors": [
671
- {
672
- "first": "Giovanni",
673
- "middle": [],
674
- "last": "Varile",
675
- "suffix": ""
676
- },
677
- {
678
- "first": "Antonio",
679
- "middle": [],
680
- "last": "Zampolli",
681
- "suffix": ""
682
- }
683
- ],
684
- "year": 1997,
685
- "venue": "",
686
- "volume": "",
687
- "issue": "",
688
- "pages": "",
689
- "other_ids": {},
690
- "num": null,
691
- "urls": [],
692
- "raw_text": "Giovanni Varile and Antonio Zampolli, editors. 1997. Survey of the State of the Art in Human Language Technology. Cambridge Univ. Press.",
693
- "links": null
694
- },
695
- "BIBREF10": {
696
- "ref_id": "b10",
697
- "title": "Using wordnet to disambiguate word senses for text retrieval",
698
- "authors": [
699
- {
700
- "first": "Ellen",
701
- "middle": [
702
- "M"
703
- ],
704
- "last": "Voorhees",
705
- "suffix": ""
706
- }
707
- ],
708
- "year": 1993,
709
- "venue": "Proceedings of 16th ACM SIGIR Conference",
710
- "volume": "",
711
- "issue": "",
712
- "pages": "",
713
- "other_ids": {},
714
- "num": null,
715
- "urls": [],
716
- "raw_text": "Ellen M. Voorhees. 1993. Using wordnet to disam- biguate word senses for text retrieval. In Pro- ceedings of 16th ACM SIGIR Conference. ACM- SIG1R.",
717
- "links": null
718
- },
719
- "BIBREF11": {
720
- "ref_id": "b11",
721
- "title": "Understanding subsumption and taxonomy: A framework for progress",
722
- "authors": [
723
- {
724
- "first": "William",
725
- "middle": [
726
- "A"
727
- ],
728
- "last": "Woods",
729
- "suffix": ""
730
- }
731
- ],
732
- "year": 1991,
733
- "venue": "Principles of Semantic Networks: Explorations in the Representation o/ Knowledge",
734
- "volume": "",
735
- "issue": "",
736
- "pages": "45--94",
737
- "other_ids": {},
738
- "num": null,
739
- "urls": [],
740
- "raw_text": "William A. Woods. 1991. Understanding subsump- tion and taxonomy: A framework for progress. In John Sowa, editor, Principles of Semantic Networks: Explorations in the Representation o/ Knowledge, pages 45-94. Morgan Kaufmann, San Mateo, CA.",
741
- "links": null
742
- },
743
- "BIBREF12": {
744
- "ref_id": "b12",
745
- "title": "Conceptual indexing: A better way to organize knowledge",
746
- "authors": [
747
- {
748
- "first": "William",
749
- "middle": [
750
- "A"
751
- ],
752
- "last": "Woods",
753
- "suffix": ""
754
- }
755
- ],
756
- "year": 1997,
757
- "venue": "",
758
- "volume": "",
759
- "issue": "",
760
- "pages": "",
761
- "other_ids": {},
762
- "num": null,
763
- "urls": [],
764
- "raw_text": "William A. Woods. 1997. Conceptual indexing: A better way to organize knowledge. Technical Report SMLI TR-97-61, Sun Microsystems Laboratories, Mountain View, CA, April. www.sun.com/research/techrep/1997/abstract- 61.html.",
765
- "links": null
766
- },
767
- "BIBREF13": {
768
- "ref_id": "b13",
769
- "title": "Aggressive morphology for robust ]exical coverage",
770
- "authors": [
771
- {
772
- "first": "A",
773
- "middle": [],
774
- "last": "William",
775
- "suffix": ""
776
- },
777
- {
778
- "first": "",
779
- "middle": [],
780
- "last": "Woods",
781
- "suffix": ""
782
- }
783
- ],
784
- "year": 2000,
785
- "venue": "",
786
- "volume": "",
787
- "issue": "",
788
- "pages": "",
789
- "other_ids": {},
790
- "num": null,
791
- "urls": [],
792
- "raw_text": "William A. Woods. 2000. Aggressive morphology for robust ]exical coverage. In (these proceedings).",
793
- "links": null
794
- }
795
- },
796
- "ref_entries": {
797
- "TABREF1": {
798
- "content": "<table><tr><td/><td/><td>Recall</td></tr><tr><td>System tfidf</td><td>Success Rate 28.6%</td><td>(10 docs) 14.8%</td></tr><tr><td>SearchIt system</td><td>44.0%</td><td>28.5%</td></tr><tr><td>Recall II</td><td>60.7%</td><td>38.6%</td></tr><tr><td>w/o morph</td><td>50.0%</td><td>not measured</td></tr><tr><td>w/o knowledge</td><td>42.9%</td><td>not measured</td></tr></table>",
799
- "num": null,
800
- "text": "A comparison of different retrieval techniques.",
801
- "type_str": "table",
802
- "html": null
803
- }
804
- }
805
- }
806
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1037.json DELETED
@@ -1,1123 +0,0 @@
1
- {
2
- "paper_id": "A00-1037",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:36.515000Z"
6
- },
7
- "title": "Domain-Specific Knowledge Acquisition from Text",
8
- "authors": [
9
- {
10
- "first": "Dan",
11
- "middle": [],
12
- "last": "Moldovan",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Southern Methodist University Dallas",
17
- "location": {
18
- "postCode": "75275-0122",
19
- "settlement": "Texas"
20
- }
21
- },
22
- "email": "[email protected]"
23
- },
24
- {
25
- "first": "Roxana",
26
- "middle": [],
27
- "last": "Girju",
28
- "suffix": "",
29
- "affiliation": {
30
- "laboratory": "",
31
- "institution": "University of Southern Methodist University Dallas",
32
- "location": {
33
- "postCode": "75275-0122",
34
- "settlement": "Texas"
35
- }
36
- },
37
- "email": "[email protected]"
38
- },
39
- {
40
- "first": "Vasile",
41
- "middle": [],
42
- "last": "Rus",
43
- "suffix": "",
44
- "affiliation": {
45
- "laboratory": "",
46
- "institution": "University of Southern Methodist University Dallas",
47
- "location": {
48
- "postCode": "75275-0122",
49
- "settlement": "Texas"
50
- }
51
- },
52
- "email": "[email protected]"
53
- }
54
- ],
55
- "year": "",
56
- "venue": null,
57
- "identifiers": {},
58
- "abstract": "In many knowledge intensive applications, it is necessary to have extensive domain-specific knowledge in addition to general-purpose knowledge bases. This paper presents a methodology for discovering domain-specific concepts and relationships in an attempt to extend WordNet. The method was tested on five seed concepts selected from the financial domain: interest rate, stock market, inflation, economic growth, and employment.",
59
- "pdf_parse": {
60
- "paper_id": "A00-1037",
61
- "_pdf_hash": "",
62
- "abstract": [
63
- {
64
- "text": "In many knowledge intensive applications, it is necessary to have extensive domain-specific knowledge in addition to general-purpose knowledge bases. This paper presents a methodology for discovering domain-specific concepts and relationships in an attempt to extend WordNet. The method was tested on five seed concepts selected from the financial domain: interest rate, stock market, inflation, economic growth, and employment.",
65
- "cite_spans": [],
66
- "ref_spans": [],
67
- "eq_spans": [],
68
- "section": "Abstract",
69
- "sec_num": null
70
- }
71
- ],
72
- "body_text": [
73
- {
74
- "text": "1 Desiderata for Automated Knowledge Acquisition The need for knowledge The knowledge is infinite and no matter how large a knowledge base is, it is not possible to store all the concepts and procedures for all domains. Even if that were possible, the knowledge is generative and there are no guarantees that a system will have the latest information all the time. And yet, if we are to build common-sense knowledge processing systems in the future, it is necessary to have general-purpose and domain-specific knowledge that is up to date. Our inability to build large knowledge bases without much effort has impeded many ANLP developments.",
75
- "cite_spans": [],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "",
79
- "sec_num": null
80
- },
81
- {
82
- "text": "The most successful current Information Extraction systems rely on hand coded linguistic rules representing lexico-syntactic patterns capable of matching natural language expressions of events. Since the rules are hand-coded it is difficult to port systems across domains. Question answering, inference, summarization, and other applications can benefit from large linguistic knowledge bases.",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "",
87
- "sec_num": null
88
- },
89
- {
90
- "text": "The basic idea A possible solution to the problem of rapid development of flexible knowledge bases is to design an automatic knowledge acquisition system that extracts knowledge from texts for the purpose of merging it with a core ontological knowledge base. The attempt to create a knowledge base manually is time consuming and error prone, even for small application domains, and we believe that automatic knowledge acquisition and classification is the only viable solution to large-scale, knowledge intensive applications.",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "",
95
- "sec_num": null
96
- },
97
- {
98
- "text": "This paper presents an interactive method that acquires new concepts and connections associated with user-selected seed concepts, and adds them to the WordNet linguistic knowledge structure (Fellbaum 1998) . The sources of the new knowledge are texts acquired from the Internet or other corpora. At the present time, our system works in a semi-automatic mode, in the sense that it acquires concepts and relations automatically, but their validation is done by the user.",
99
- "cite_spans": [
100
- {
101
- "start": 190,
102
- "end": 205,
103
- "text": "(Fellbaum 1998)",
104
- "ref_id": null
105
- }
106
- ],
107
- "ref_spans": [],
108
- "eq_spans": [],
109
- "section": "",
110
- "sec_num": null
111
- },
112
- {
113
- "text": "We believe that domain knowledge should not be acquired in a vacuum; it should expand an existent ontology with a skeletal structure built on consistent and acceptable principles. The method presented in this paper is applicable to any Machine Readable Dictionary. However, we chose WordNet because it is freely available and widely used.",
114
- "cite_spans": [],
115
- "ref_spans": [],
116
- "eq_spans": [],
117
- "section": "",
118
- "sec_num": null
119
- },
120
- {
121
- "text": "This work was inspired in part by Marti Hearst's paper (Hearst 1998) where she discovers manually lexico-syntactic patterns for the HYPERNYMY relation in WordNet.",
122
- "cite_spans": [
123
- {
124
- "start": 55,
125
- "end": 68,
126
- "text": "(Hearst 1998)",
127
- "ref_id": "BIBREF4"
128
- }
129
- ],
130
- "ref_spans": [],
131
- "eq_spans": [],
132
- "section": "Related work",
133
- "sec_num": null
134
- },
135
- {
136
- "text": "Much of the work in pattern extraction from texts was done for improving the performance of Information Extraction systems. Research in this area was done by (Kim and Moldovan 1995) (Riloff 1996) , (Soderland 1997) and others.",
137
- "cite_spans": [
138
- {
139
- "start": 158,
140
- "end": 181,
141
- "text": "(Kim and Moldovan 1995)",
142
- "ref_id": null
143
- },
144
- {
145
- "start": 182,
146
- "end": 195,
147
- "text": "(Riloff 1996)",
148
- "ref_id": null
149
- },
150
- {
151
- "start": 198,
152
- "end": 214,
153
- "text": "(Soderland 1997)",
154
- "ref_id": null
155
- }
156
- ],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "Related work",
160
- "sec_num": null
161
- },
162
- {
163
- "text": "The MindNet (Richardson 1998) project at Microsoft is an attempt to transform the Longman Dictionary of Contemporary English (LDOCE) into a form of knowledge base for text processing.",
164
- "cite_spans": [],
165
- "ref_spans": [],
166
- "eq_spans": [],
167
- "section": "Related work",
168
- "sec_num": null
169
- },
170
- {
171
- "text": "Woods studied knowledge representation and classification for long time (Woods 1991) , and more recently is trying to automate the construction of taxonomies by extracting concepts directly from texts (Woods 1997) .",
172
- "cite_spans": [
173
- {
174
- "start": 72,
175
- "end": 84,
176
- "text": "(Woods 1991)",
177
- "ref_id": null
178
- },
179
- {
180
- "start": 201,
181
- "end": 213,
182
- "text": "(Woods 1997)",
183
- "ref_id": "BIBREF11"
184
- }
185
- ],
186
- "ref_spans": [],
187
- "eq_spans": [],
188
- "section": "Related work",
189
- "sec_num": null
190
- },
191
- {
192
- "text": "The Knowledge Acquisition from Text (KAT) system is presented next. It consists of four parts: (1) discovery of new concepts, (2) discovery of new lexical patterns, (3) discovery of new relationships reflected by the lexical patterns, and (4) the classification and integration of the knowledge discovered with a WordNet -like knowledge base.",
193
- "cite_spans": [],
194
- "ref_spans": [],
195
- "eq_spans": [],
196
- "section": "Related work",
197
- "sec_num": null
198
- },
199
- {
200
- "text": "2.1 Discover new concepts Select seed concepts. New domain knowledge can be acquired around some seed concepts that a user considers important. In this paper we focus on the financial domain, and use: interest rate, stock market, inflation, economic growth, and employment as seed concepts. The knowledge we seek to acquire relates to one or more of these concepts, and consists of new concepts not defined in WordNet and new relations that link these concepts with other concepts, some of which are in WordNet.",
201
- "cite_spans": [],
202
- "ref_spans": [],
203
- "eq_spans": [],
204
- "section": "KAT System",
205
- "sec_num": "2"
206
- },
207
- {
208
- "text": "For example, from the sentence: When the US economy enters a boom, mortgage interest rates rise, the system discovers: (1) the new concept mortgage interest rate not defined in WordNet but related to the seed concept interest rate, and (2) the state of the US economy and the value of mortgage interest rate are in a DIRECT RELATIONSHIP.",
209
- "cite_spans": [],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "KAT System",
213
- "sec_num": "2"
214
- },
215
- {
216
- "text": "In WordNet, a concept is represented as a synset that contains words sharing the same meaning. In our experiments, we extend the seed words to their corresponding synset. For example, stock market is synonym with stock exchange and securities market, and we aim to learn concepts related to all these terms, not only to stock market.",
217
- "cite_spans": [],
218
- "ref_spans": [],
219
- "eq_spans": [],
220
- "section": "KAT System",
221
- "sec_num": "2"
222
- },
223
- {
224
- "text": "Extract sentences. Queries are formed with each seed concept to extract documents from the Internet and other possible sources. The documents retrieved are further processed such that only the sentences that contain the seed concepts are retained. This way, an arbitrarily large corpus .4 is formed of sentences containing the seed concepts. We limit the size of this corpus to 1000 sentences per seed concept.",
225
- "cite_spans": [],
226
- "ref_spans": [],
227
- "eq_spans": [],
228
- "section": "KAT System",
229
- "sec_num": "2"
230
- },
231
- {
232
- "text": "Parse sentences. Each sentence in this corpus is first part-of-speech (POS) tagged then parsed. We use Brill's POS tagger and our own parser. The output of the POS tagger for the example above is:",
233
- "cite_spans": [],
234
- "ref_spans": [],
235
- "eq_spans": [],
236
- "section": "KAT System",
237
- "sec_num": "2"
238
- },
239
- {
240
- "text": "When/WRB the/DW U.~./NNP economy/NN enters/VBZ a/DT boom/NN ,/, mortgage/NN inter-est_rates/NNS rise/vBP ./.",
241
- "cite_spans": [],
242
- "ref_spans": [],
243
- "eq_spans": [],
244
- "section": "KAT System",
245
- "sec_num": "2"
246
- },
247
- {
248
- "text": "The syntactic parser output is:",
249
- "cite_spans": [],
250
- "ref_spans": [],
251
- "eq_spans": [],
252
- "section": "KAT System",
253
- "sec_num": "2"
254
- },
255
- {
256
- "text": "TOP (S (SBAR (WHADVP (WRB When)) (S (NP (DT the) (NNP U.S.) (NN economy)) (VP (VBZ enters) (NP (DT a) (NN boom) (, ,))))) (NP (NN mortgage) (NNS interest_rates)) (VP (VI3P rise)))",
257
- "cite_spans": [],
258
- "ref_spans": [],
259
- "eq_spans": [],
260
- "section": "KAT System",
261
- "sec_num": "2"
262
- },
263
- {
264
- "text": "Extract new concepts. In this paper only noun concepts are considered. Since, most likely, oneword nouns are already defined in WordNet, the focus here is on compound nouns and nouns with modifiers that have meaning but are not in WordNet.",
265
- "cite_spans": [],
266
- "ref_spans": [],
267
- "eq_spans": [],
268
- "section": "KAT System",
269
- "sec_num": "2"
270
- },
271
- {
272
- "text": "The new concepts directly related to the seeds are extracted from the noun phrases (NPs) that contain the seeds. In the example above, we see that the seed belongs to the NP: mortgage interest rate.",
273
- "cite_spans": [],
274
- "ref_spans": [],
275
- "eq_spans": [],
276
- "section": "KAT System",
277
- "sec_num": "2"
278
- },
279
- {
280
- "text": "This way, a list of NPs containing the seeds is assembled automatically from the parsed texts. Every such NP is considered a potential new concept. This is only the \"raw material\" from which actual concepts are discovered.",
281
- "cite_spans": [],
282
- "ref_spans": [],
283
- "eq_spans": [],
284
- "section": "KAT System",
285
- "sec_num": "2"
286
- },
287
- {
288
- "text": "In some noun phrases the seed is the head noun,",
289
- "cite_spans": [],
290
- "ref_spans": [],
291
- "eq_spans": [],
292
- "section": "KAT System",
293
- "sec_num": "2"
294
- },
295
- {
296
- "text": "i.e. [word, word,..see~ [mortgage_interest_rate] , since it is defined in the on-line dictionary OneLook Dictionaries (http://www.onelook.com). Procedure 1.3. User validation. Since currently we lack a formal definition of a concept, it is not possible to completely automate the discovery of concepts. The human inspects the list of noun phrases and decides whether to accept or decline each concept.",
297
- "cite_spans": [
298
- {
299
- "start": 5,
300
- "end": 23,
301
- "text": "[word, word,..see~",
302
- "ref_id": null
303
- },
304
- {
305
- "start": 24,
306
- "end": 48,
307
- "text": "[mortgage_interest_rate]",
308
- "ref_id": null
309
- }
310
- ],
311
- "ref_spans": [],
312
- "eq_spans": [],
313
- "section": "KAT System",
314
- "sec_num": "2"
315
- },
316
- {
317
- "text": "Texts represent a rich source of information from which in addition to concepts we can also discover relations between concepts. We are interested in discovering semantic relationships that link the concepts extracted above with other concepts, some of which may be in WordNet. The approach is to search for lexico-syntactic patterns comprising the concepts of interest. The semantic relations from WordNet are the first we search for, as it is only natural to add more of these relations to enhance the WordNet knowledge base. However, since the focus is on the acquisition of domain-specific knowledge, there are semantic relations between concepts other than the WordNet relations that are important. These new relations can be discovered automatically from the clauses and sentences in which the seeds occur.",
318
- "cite_spans": [],
319
- "ref_spans": [],
320
- "eq_spans": [],
321
- "section": "Discover lexlco-syntactic patterns",
322
- "sec_num": "2.2"
323
- },
324
- {
325
- "text": "Pick a semantic relation R. These can be Word-Net semantic relations or any other relations defined by the user. So far, we have experimented with the WordNet HYPERNYMY (or so-called IS-A) relation, and three other relations. By inspecting a few sentences containing interest rate one can notice that INFLUENCE is a frequently used relation. The two other relations are CAUSE and EQUIVALENT.",
326
- "cite_spans": [],
327
- "ref_spans": [],
328
- "eq_spans": [],
329
- "section": "Discover lexlco-syntactic patterns",
330
- "sec_num": "2.2"
331
- },
332
- {
333
- "text": "Pick a pair of concepts Ci, C# among which R holds. These may be any noun concepts. In the context of finance domain, some examples of concepts linked by the INFLUENCE relation are: interest rate INFLUENCES earnings, or credit worthiness INFLUENCES interest rate.",
334
- "cite_spans": [],
335
- "ref_spans": [],
336
- "eq_spans": [],
337
- "section": "Discover lexlco-syntactic patterns",
338
- "sec_num": "2.2"
339
- },
340
- {
341
- "text": "Extract lexico-syntactic patterns Ci :P Cj. Search any corpus B, different from ,4 for all instances where Ci and Cj occur in the same sentence. Extract the lexico-syntactic patterns that link the two concepts. For example~ from the sentence : The graph indicates the impact on earnings from several different interest rate scenarios, the generally applicable pattern extracted is: impact on NP2 from NP1 This pattern corresponds unambiguously to the relation R we started with, namely INFLUENCE. Thus we conclude: INFLUENCE(NPI, NP2).",
342
- "cite_spans": [],
343
- "ref_spans": [],
344
- "eq_spans": [],
345
- "section": "Discover lexlco-syntactic patterns",
346
- "sec_num": "2.2"
347
- },
348
- {
349
- "text": "Another example is: As the credit worthiness decreases, the interest rate increases. From this sentence we extract another lexical pattern that expresses the INFLUENCE relation: [as NP1 vbl, NP2 vb$] & [vbl and vb2 are antonyms] This pattern is rather complex since it contains not only the lexical part but also the verb condition that needs to be satisfied.",
350
- "cite_spans": [
351
- {
352
- "start": 178,
353
- "end": 190,
354
- "text": "[as NP1 vbl,",
355
- "ref_id": null
356
- },
357
- {
358
- "start": 191,
359
- "end": 228,
360
- "text": "NP2 vb$] & [vbl and vb2 are antonyms]",
361
- "ref_id": null
362
- }
363
- ],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Discover lexlco-syntactic patterns",
367
- "sec_num": "2.2"
368
- },
369
- {
370
- "text": "This procedure repeats for all relations R.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Discover lexlco-syntactic patterns",
375
- "sec_num": "2.2"
376
- },
377
- {
378
- "text": "2.3 Discover new relationships between concepts Let us denote with Cs the seed-related concepts found with Procedures 1.1 through 1.3. We search now corpus ,4 for the occurrence of patterns ~ discovered above such that one of their two concepts is a concept Cs.",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Discover lexlco-syntactic patterns",
383
- "sec_num": "2.2"
384
- },
385
- {
386
- "text": "Search corpus ,4 for a pattern ~. Using a lexicosyntactic pattern P, one at a time, search corpus ,4 for its occurrence. If found, search further whether or not one of the NPs is a seed-related concept Cs.",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Discover lexlco-syntactic patterns",
391
- "sec_num": "2.2"
392
- },
393
- {
394
- "text": "Identify new concepts Cn. Part of the pattern 7 ~ are two noun phrases, one of which is Cs. The head noun from the other noun phrase is a concept Cn we are looking for. This may be a WordNet concept, and if it is not it will be added to the list of concepts discovered.",
395
- "cite_spans": [],
396
- "ref_spans": [],
397
- "eq_spans": [],
398
- "section": "Discover lexlco-syntactic patterns",
399
- "sec_num": "2.2"
400
- },
401
- {
402
- "text": "Form relation R(Cs, Cn). Since each pattern 7 ~ is a linguistic expression of its corresponding semantic relation R, we conclude R(Cs,Cn) (this is interpreted \"C8 is relation R Cn)'). These steps are repeated for all patterns.",
403
- "cite_spans": [],
404
- "ref_spans": [],
405
- "eq_spans": [],
406
- "section": "Discover lexlco-syntactic patterns",
407
- "sec_num": "2.2"
408
- },
409
- {
410
- "text": "User intervention to accept or reject relationships is necessary mainly due to our system inability of handling coreference resolution and other complex linguistic phenomena.",
411
- "cite_spans": [],
412
- "ref_spans": [],
413
- "eq_spans": [],
414
- "section": "Discover lexlco-syntactic patterns",
415
- "sec_num": "2.2"
416
- },
417
- {
418
- "text": "integration Next, a taxonomy needs to be created that is consistent with WordNet. In addition to creating a taxonomy, this step is also useful for validating the concepts acquired above. The classification is based on the subsumption principle (Schmolze and Lipkis 1983) , (Woods 1991) .",
419
- "cite_spans": [
420
- {
421
- "start": 244,
422
- "end": 270,
423
- "text": "(Schmolze and Lipkis 1983)",
424
- "ref_id": "BIBREF8"
425
- },
426
- {
427
- "start": 273,
428
- "end": 285,
429
- "text": "(Woods 1991)",
430
- "ref_id": null
431
- }
432
- ],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "Knowledge classification and",
436
- "sec_num": "2.4"
437
- },
438
- {
439
- "text": "This algorithm provides the overall steps for the classification of concepts within the context of Word-Net. Figure 1 shows the inputs of the Classification Algorithm and suggests that the classification is an iterative process. In addition to WordNet, the inputs consist of the corpus ,4, the sets of concepts Cs and Cn, and the relationships 7~. Let's denote with C = Cs U Cn the union of the seed related concepts with the new concepts. All these concepts need to be classified. Step 1. From the set of relationships 7\"~ discovered in Part 3, pick all the HYPERNYMY relations. From the way these relations were developed, there are two possibilities:",
440
- "cite_spans": [],
441
- "ref_spans": [
442
- {
443
- "start": 109,
444
- "end": 117,
445
- "text": "Figure 1",
446
- "ref_id": "FIGREF0"
447
- }
448
- ],
449
- "eq_spans": [],
450
- "section": "Knowledge classification and",
451
- "sec_num": "2.4"
452
- },
453
- {
454
- "text": "(1) A HYPERNYMY relation links a WordNet concept Cw with another concept from the set C denoted with CAw , or (2) A HYPERNYMY relation links a concept Cs with a concept Cn.",
455
- "cite_spans": [
456
- {
457
- "start": 101,
458
- "end": 109,
459
- "text": "CAw , or",
460
- "ref_id": null
461
- }
462
- ],
463
- "ref_spans": [],
464
- "eq_spans": [],
465
- "section": "Knowledge classification and",
466
- "sec_num": "2.4"
467
- },
468
- {
469
- "text": "Concepts C~w are immediately linked to Word-Net and added to the knowledge base. The concepts from case (2) are also added to the knowledge base but they form at this point only some isolated islands since are not yet linked to the rest of the knowledge base.",
470
- "cite_spans": [],
471
- "ref_spans": [],
472
- "eq_spans": [],
473
- "section": "Knowledge classification and",
474
- "sec_num": "2.4"
475
- },
476
- {
477
- "text": "Step 2. Search corpus `4 for all the patterns associated with the HYPERNYMY relation that may link Step 3. Classify all concepts in set Ce using Procedures 4.1 through 4.5 below.",
478
- "cite_spans": [],
479
- "ref_spans": [],
480
- "eq_spans": [],
481
- "section": "Knowledge classification and",
482
- "sec_num": "2.4"
483
- },
484
- {
485
- "text": "Step 4. Repeat Step 3 for all the concepts in set Cc several times till no more changes occur. This reclassification is necessary since the insertion of a concept into the knowledge base may perturb the ordering of other surrounding concepts in the hierarchy.",
486
- "cite_spans": [],
487
- "ref_spans": [],
488
- "eq_spans": [],
489
- "section": "Knowledge classification and",
490
- "sec_num": "2.4"
491
- },
492
- {
493
- "text": "Step 5. Add the rest of relationships 7~ other than the HYPERNYMY to the new knowledge base. The HYPERNYMY relations have already been used in the Classification Algorithm, but the other relations, i.e. INFLUENCE, CAUSE and EQUIVALENT need to be added to the knowledge base.",
494
- "cite_spans": [],
495
- "ref_spans": [],
496
- "eq_spans": [],
497
- "section": "Knowledge classification and",
498
- "sec_num": "2.4"
499
- },
500
- {
501
- "text": "Procedure 4.1. Classify a concept of the form [word, head] with respect to concept [head] .",
502
- "cite_spans": [
503
- {
504
- "start": 46,
505
- "end": 58,
506
- "text": "[word, head]",
507
- "ref_id": null
508
- },
509
- {
510
- "start": 83,
511
- "end": 89,
512
- "text": "[head]",
513
- "ref_id": null
514
- }
515
- ],
516
- "ref_spans": [],
517
- "eq_spans": [],
518
- "section": "Concept classification procedures",
519
- "sec_num": null
520
- },
521
- {
522
- "text": "It is assumed here that the [head] concept exists in WordNet simply because in many instances the \"head\" is the \"seed\" concept, and because frequently the head is a single word common noun usually defined in WordNet. In this procedure we consider only those head nouns that do not have any hyponyms since the other case when the head has other concepts under it is more complex and is treated by Procedure 4.4. Here \"word\" is a noun or an adjective. For a relative classification of two such concepts, the ontological relations between headz and head2 and between word1 and words, if exist, are extended to the two concepts. We distinguish here three possibilities:",
523
- "cite_spans": [],
524
- "ref_spans": [],
525
- "eq_spans": [],
526
- "section": "Concept classification procedures",
527
- "sec_num": null
528
- },
529
- {
530
- "text": "1. heady subsumes heads and word1 subsumes word2. In this case [wordz, headl] In the previous work on knowledge classification it was assumed that the concepts were accompanied by rolesets and values (Schmolze and Lipkis 1983) , (Woods 1991) , and others. Knowledge classifiers are part of almost any knowledge representation system.",
531
- "cite_spans": [
532
- {
533
- "start": 63,
534
- "end": 77,
535
- "text": "[wordz, headl]",
536
- "ref_id": null
537
- },
538
- {
539
- "start": 200,
540
- "end": 226,
541
- "text": "(Schmolze and Lipkis 1983)",
542
- "ref_id": "BIBREF8"
543
- },
544
- {
545
- "start": 229,
546
- "end": 241,
547
- "text": "(Woods 1991)",
548
- "ref_id": null
549
- }
550
- ],
551
- "ref_spans": [],
552
- "eq_spans": [],
553
- "section": "Concept classification procedures",
554
- "sec_num": null
555
- },
556
- {
557
- "text": "However, the problem we face here is more difficult. While in build-by-hand knowledge representation systems, the relations and values defining concepts are readily available, here we have to extract them from text. Fortunately, one can take advantage of the glossary definitions that are associated with concepts in WordNet and other dictionaries. One approach is to identify a set of semantic relations into which the verbs used in the gloss definitions are mapped into for the purpose of working with a manageable set of relations that may describe the concepts restrictions. In WordNet these basic relations are already identified and it is easy to map every verb into such a semantic relation.",
558
- "cite_spans": [],
559
- "ref_spans": [],
560
- "eq_spans": [],
561
- "section": "Concept classification procedures",
562
- "sec_num": null
563
- },
564
- {
565
- "text": "As far as the newly discovered concepts are concerned, their defining relations need to be retrieved from texts. Human assistance is required, at least for now, to pinpoint the most characteristic relations that define a concept.",
566
- "cite_spans": [],
567
- "ref_spans": [],
568
- "eq_spans": [],
569
- "section": "Concept classification procedures",
570
- "sec_num": null
571
- },
572
- {
573
- "text": "Below is a two step algorithm that we envision for the relative classification of two concepts A and B.",
574
- "cite_spans": [],
575
- "ref_spans": [],
576
- "eq_spans": [],
577
- "section": "Concept classification procedures",
578
- "sec_num": null
579
- },
580
- {
581
- "text": "Let's us denote with ARaCa and BRbCb the relationships that define concepts A and B respectively. These are similar to rolesets and values. Figure 4 it is shown the classification of concept monetary policy that has been discovered. By default this concept is placed under policy. However in WordNet there is a hierarchy fiscal policy -IS-Aeconomic policy -IS-A -policy. The question is where exactly to place monetary policy in this hierarchy.",
582
- "cite_spans": [],
583
- "ref_spans": [
584
- {
585
- "start": 140,
586
- "end": 148,
587
- "text": "Figure 4",
588
- "ref_id": null
589
- }
590
- ],
591
- "eq_spans": [],
592
- "section": "Concept classification procedures",
593
- "sec_num": null
594
- },
595
- {
596
- "text": "The gloss of economic policy indicates that it is MADE BY Government, and that it CONTROLS economic growth-(here we simplified the explanation and used economy instead of economic growth). The gloss of fiscal policy leads to relations MADE BY Government, CONTROLS budget, and CONTROLS taxation. The concept money supply was found by Procedure 1.2 in several dictionaries, and its dictionary definition leads to relations MADE BY Federal Government, and CONTROLS money supply. In Word-Net Government subsumes Federal Government, and economy HAS PART money. All necessary conditions are satisfied for economic policy to subsume monetary policy. However, fiscal policy does not subsume monetary policy since monetary policy does not control budget or taxation, or any of their hyponyms.",
597
- "cite_spans": [],
598
- "ref_spans": [],
599
- "eq_spans": [],
600
- "section": "Concept classification procedures",
601
- "sec_num": null
602
- },
603
- {
604
- "text": "Procedure 4.5 Merge a structure of concepts with the rest of the knowledge base.",
605
- "cite_spans": [],
606
- "ref_spans": [],
607
- "eq_spans": [],
608
- "section": "Concept classification procedures",
609
- "sec_num": null
610
- },
611
- {
612
- "text": "It is possible that structures consisting of several inter-connected concepts are formed in isolation of the main knowledge base as a result of some procedures. The task here is to merge such structures with the main knowledge base such that the new knowledge base will be consistent with both the structure and the main knowledge base. This is done by bridging whenever possible the structure concepts and the main knowledge base concepts. It is possible that as a result of this merging procedure, some HYPERNYMY relations either from the structure or the main knowledge base will be destroyed to keep the consistency. An example is shown in Figure 5 .",
613
- "cite_spans": [],
614
- "ref_spans": [
615
- {
616
- "start": 644,
617
- "end": 652,
618
- "text": "Figure 5",
619
- "ref_id": "FIGREF1"
620
- }
621
- ],
622
- "eq_spans": [],
623
- "section": "Concept classification procedures",
624
- "sec_num": null
625
- },
626
- {
627
- "text": "Example : The following HYPERNYMY relationships were discovered in Part 3: HYPERNYMY(financial market,capital market) HYPERNYMY(fInancial market,money market) HYPERNYMY(capital market,stock market) The structure obtained from these relationships along with a part of WordNet hierarchy is shown in Figure 5 . An attempt is made to merge the new structure with WordNet. To these relations it corresponds a structure as shown in Figure 5 . An attempt is made to merge this structure with Word-Net. Searching WordNet for all concepts in the structure we find money market and stock market in WordNet where as capital market and financial market are not. Figure 5 shows how the structure merges with WordNet and moreover how concepts that were unrelated in WordNet (i.e. stock market and money market) become connected through the new structure. It is also interesting to notice that the IS-A link in WordNet from money market to market is interrupted by the insertion of financial market in-between them.",
628
- "cite_spans": [],
629
- "ref_spans": [
630
- {
631
- "start": 297,
632
- "end": 305,
633
- "text": "Figure 5",
634
- "ref_id": "FIGREF1"
635
- },
636
- {
637
- "start": 426,
638
- "end": 434,
639
- "text": "Figure 5",
640
- "ref_id": "FIGREF1"
641
- },
642
- {
643
- "start": 650,
644
- "end": 658,
645
- "text": "Figure 5",
646
- "ref_id": "FIGREF1"
647
- }
648
- ],
649
- "eq_spans": [],
650
- "section": "Concept classification procedures",
651
- "sec_num": null
652
- },
653
- {
654
- "text": "The KAT Algorithm has been implemented, and when given some seed concepts, it produces new concepts, patterns and relationships between concepts in an interactive mode. Table 1 shows the number of concepts extracted from a 5000 sentence corpus, in which each sentence contains at least one of the five seed concepts. The NPs were automatically searched in Word-Net and other on-line dictionaries. There were 3745 distinct noun phrases of interest extracted; the rest contained only the seeds or repetitions. Most of the ",
655
- "cite_spans": [],
656
- "ref_spans": [
657
- {
658
- "start": 169,
659
- "end": 176,
660
- "text": "Table 1",
661
- "ref_id": "TABREF7"
662
- }
663
- ],
664
- "eq_spans": [],
665
- "section": "Implementation and Results",
666
- "sec_num": "3"
667
- },
668
- {
669
- "text": ":~ INFLUENCE(NPI,NP2)",
670
- "cite_spans": [],
671
- "ref_spans": [],
672
- "eq_spans": [],
673
- "section": "Implementation and Results",
674
- "sec_num": "3"
675
- },
676
- {
677
- "text": "Phillips, a British economist, stated in 1958 that high inflation causes low unemployment rates. The Bank of Israel governor said that the ti;ht economic policy would have an immediate impact on inflation this year. As the economy picks up steam, so does inflation.",
678
- "cite_spans": [],
679
- "ref_spans": [],
680
- "eq_spans": [],
681
- "section": "Implementation and Results",
682
- "sec_num": "3"
683
- },
684
- {
685
- "text": "Higher interest rates are normally associated with weaker bond markets.",
686
- "cite_spans": [],
687
- "ref_spans": [],
688
- "eq_spans": [],
689
- "section": "Implementation and Results",
690
- "sec_num": "3"
691
- },
692
- {
693
- "text": "On the other hand, if interest rates go down, bonds go up, and your bond becomes more valuable.",
694
- "cite_spans": [],
695
- "ref_spans": [],
696
- "eq_spans": [],
697
- "section": "Implementation and Results",
698
- "sec_num": "3"
699
- },
700
- {
701
- "text": "The effects of inflation on debtors and creditors varies as the actual inflation is compared to the expected one. There exists an inverse relationship between unemployment rates and inflation, best illustrated by the Phillips Curve.",
702
- "cite_spans": [],
703
- "ref_spans": [],
704
- "eq_spans": [],
705
- "section": "Implementation and Results",
706
- "sec_num": "3"
707
- },
708
- {
709
- "text": "Irish employment is also largely a function of the past high birth rate. We believe that the Treasury bonds (and thus interest rates) are in a downward cycle. processing in Part 1 is taken by the parser. The human intervention to accept or decline concepts takes about 4 min./seed. The next step was to search for lexico-syntactic patterns. We considered one WordNet semantic relation, HYPERNYMY and three other relations that we found relevant for the domain, namely INFLU-ENCE, CAUSE and EQUIVALENT. For each relation, a pair of related words was selected and searched for on the Internet. The first 500 sentences/relation were retained. A human selected and validated semiautomatically the patterns for each sentence. A sample of the results is shown in Table 2 . A total of 22 patterns were obtained and their selection and validation took approximately 35 minutes/relation. Next, the patterns are searched for on the 5000 sentence corpus (Part 3). The procedure provided a total of 43 new concepts and 166 relationships in which at least one of the seeds occurred. From these relationships, by inspection, we have accepted 63 and rejected 102, procedure which took about 7 minutes. Table 3 lists some of the 63 relationships discovered. Applications An application in need of domain-specific knowledge is Question Answering. The concepts and the relationships acquired can be useful in answering difficult questions that normally cannot be easily answered just by using the information from WordNet. Consider the processing of the following questions after the new domain knowledge has been acquired: QI: What factors have an impact on the interest rate? Q2: What happens with the employment when the economic growth rises? Q3: How does deflation influence prices? Figure 6 shows a portion of the new domain knowledge that is relevant to these questions. The first question can be easily answered by extracting the relationships that point to the concept interest rate. The factors that influence the interest rate are Fed, inflation, economic growth, and employment.",
710
- "cite_spans": [],
711
- "ref_spans": [
712
- {
713
- "start": 757,
714
- "end": 764,
715
- "text": "Table 2",
716
- "ref_id": "TABREF6"
717
- },
718
- {
719
- "start": 1187,
720
- "end": 1194,
721
- "text": "Table 3",
722
- "ref_id": "TABREF8"
723
- },
724
- {
725
- "start": 1770,
726
- "end": 1778,
727
- "text": "Figure 6",
728
- "ref_id": "FIGREF2"
729
- }
730
- ],
731
- "eq_spans": [],
732
- "section": "Implementation and Results",
733
- "sec_num": "3"
734
- },
735
- {
736
- "text": "The last two questions ask for more detailed information about the complex relationship among these concepts. Following the path from the deflation concept up to prices, the system learns that deflation influences direct proportionally real interest rate, and real interest rate has an inverse proportional impact on prices. Both these relationships came from the sentence: Thus, the deflation and the real interest rate are positively correlated, and so a higher real interest rate leads to falling prices.",
737
- "cite_spans": [],
738
- "ref_spans": [],
739
- "eq_spans": [],
740
- "section": "Implementation and Results",
741
- "sec_num": "3"
742
- },
743
- {
744
- "text": "This method may be adapted to acquire information when the question concepts are not in the knowledge base. Procedures may be invoked to discover these concepts and the relations in which they may be used.",
745
- "cite_spans": [],
746
- "ref_spans": [],
747
- "eq_spans": [],
748
- "section": "Implementation and Results",
749
- "sec_num": "3"
750
- },
751
- {
752
- "text": "The knowledge acquisition technology described above is applicable to any domain, by simply selecting appropriate seed concepts. We started with five concepts interest rate, stock market, inflation, economic growth, and employment and from a corpus of 5000 sentences we acquired a total of 362 concepts of which 319 contain the seeds and 43 relate to these via selected relations. There were 22 distinct le:dco-syntactic patterns discovered used in 63 instances. Most importantly, the new concepts can be integrated with an existing ontology.",
753
- "cite_spans": [],
754
- "ref_spans": [],
755
- "eq_spans": [],
756
- "section": "Conclusions",
757
- "sec_num": "5"
758
- },
759
- {
760
- "text": "The method works in an interactive mode where the user accepts or declines concepts, patterns and relationships. The manual operation took on average 40 minutes per seed for the 5000 sentence corpus. KAT is useful considering that most of the knowledge base construction today is done manually.",
761
- "cite_spans": [],
762
- "ref_spans": [],
763
- "eq_spans": [],
764
- "section": "Conclusions",
765
- "sec_num": "5"
766
- },
767
- {
768
- "text": "Complex linguistic phenomena such as coreference resolution, word sense disambiguation, and others have to be dealt with in order to increase the automation of the knowledge acquisition system. Without a good handling of these problems the results are not always accurate and human intervention is necessary.",
769
- "cite_spans": [],
770
- "ref_spans": [],
771
- "eq_spans": [],
772
- "section": "Conclusions",
773
- "sec_num": "5"
774
- }
775
- ],
776
- "back_matter": [],
777
- "bib_entries": {
778
- "BIBREF0": {
779
- "ref_id": "b0",
780
- "title": "LIBOR) HYPERNYMY(leading stock market",
781
- "authors": [
782
- {
783
- "first": "",
784
- "middle": [],
785
- "last": "Hypeanymy",
786
- "suffix": ""
787
- }
788
- ],
789
- "year": null,
790
- "venue": "",
791
- "volume": "",
792
- "issue": "",
793
- "pages": "",
794
- "other_ids": {},
795
- "num": null,
796
- "urls": [],
797
- "raw_text": "HYPEaNYMY(interest rate, LIBOR) HYPERNYMY(leading stock market, New York Stock Exchange)",
798
- "links": null
799
- },
800
- "BIBREF1": {
801
- "ref_id": "b1",
802
- "title": "HYPERNYMY(market risks, interest rate risk) HYPERNYMY(Capital markets, stock markets) CAUSE(inflation, unemployment) CAUSE(labour shortage, wage inflation) CAUSE(excessive demand, inflation INFLUENCE_DIRECT_PROPORTIONALYI economy, inflation) INFLUENCE_DIRECT_PROPORT1ONALY settlements",
803
- "authors": [],
804
- "year": null,
805
- "venue": "",
806
- "volume": "",
807
- "issue": "",
808
- "pages": "",
809
- "other_ids": {},
810
- "num": null,
811
- "urls": [],
812
- "raw_text": "HYPERNYMY(market risks, interest rate risk) HYPERNYMY(Capital markets, stock markets) CAUSE(inflation, unemployment) CAUSE(labour shortage, wage inflation) CAUSE(excessive demand, inflation INFLUENCE_DIRECT_PROPORTIONALYI economy, inflation) INFLUENCE_DIRECT_PROPORT1ONALY settlements, interest rate) INFLUENCE..DIRECT..",
813
- "links": null
814
- },
815
- "BIBREF2": {
816
- "ref_id": "b2",
817
- "title": "interest rates, dollars) INFLUENCE_DIRECT_PROPORTIONALY~ oil prices, inflation) INFLUENCE_DIRECT_PROPORTIONALY' inflation, nominal interest rates) INFLUENCE..DIRECT_PROPORTIONALY~ deflation, real interest rates) INFLUENCE-DIRECT-PROPORTIONALY currencles,lnflation) INFLUENCE_INVERSE_PROPORTIONALY unemployment rates, inflation) INFLUENCE_INVERSE-PKOPOKTIONALY monetary policies, inflation) INFLUENCE_INVERSE_PROPORTIONALY economy, interest rates) INFLUENCE_INVERSE..PROPORTIONALY inflation, unemployment rates) INFLUENCE.JNVERSE-PROPORTIONALY credit worthiness, interest rate) INFLUENCE_INVERSE-PROPORTIONALYlinterest rates, bonds) INFLUENCE(Internal Revenue Service",
818
- "authors": [
819
- {
820
- "first": "U",
821
- "middle": [
822
- "S"
823
- ],
824
- "last": "Proportionaly~",
825
- "suffix": ""
826
- }
827
- ],
828
- "year": null,
829
- "venue": "",
830
- "volume": "",
831
- "issue": "",
832
- "pages": "",
833
- "other_ids": {},
834
- "num": null,
835
- "urls": [],
836
- "raw_text": "PROPORTIONALY~ U.S. interest rates, dollars) INFLUENCE_DIRECT_PROPORTIONALY~ oil prices, inflation) INFLUENCE_DIRECT_PROPORTIONALY' inflation, nominal interest rates) INFLUENCE..DIRECT_PROPORTIONALY~ deflation, real interest rates) INFLUENCE-DIRECT-PROPORTIONALY currencles,lnflation) INFLUENCE_INVERSE_PROPORTIONALY unemployment rates, inflation) INFLUENCE_INVERSE-PKOPOKTIONALY monetary policies, inflation) INFLUENCE_INVERSE_PROPORTIONALY economy, interest rates) INFLUENCE_INVERSE..PROPORTIONALY inflation, unemployment rates) INFLUENCE.JNVERSE-PROPORTIONALY credit worthiness, interest rate) INFLUENCE_INVERSE-PROPORTIONALYlinterest rates, bonds) INFLUENCE(Internal Revenue Service, interest rates) INFLUENCE(economic growth, share prices)",
837
- "links": null
838
- },
839
- "BIBREF3": {
840
- "ref_id": "b3",
841
- "title": "EQUIVALENT(big mistakes, high inflation rates of 1970s) EQUIVALENT(fixed interest rate, coupon) References Christiane Fellbaum. WordNet -An Electronic Lezical Database",
842
- "authors": [],
843
- "year": 1998,
844
- "venue": "",
845
- "volume": "",
846
- "issue": "",
847
- "pages": "",
848
- "other_ids": {},
849
- "num": null,
850
- "urls": [],
851
- "raw_text": "EQUIVALENT(big mistakes, high inflation rates of 1970s) EQUIVALENT(fixed interest rate, coupon) References Christiane Fellbaum. WordNet -An Electronic Lezical Database, MIT Press, Cambridge, MA, 1998.",
852
- "links": null
853
- },
854
- "BIBREF4": {
855
- "ref_id": "b4",
856
- "title": "Automated Discovery of WordNet Relations",
857
- "authors": [
858
- {
859
- "first": "Marti",
860
- "middle": [],
861
- "last": "Hearst",
862
- "suffix": ""
863
- }
864
- ],
865
- "year": 1998,
866
- "venue": "WordNet: An Electronic Lezical Database and Some of its Applications",
867
- "volume": "",
868
- "issue": "",
869
- "pages": "",
870
- "other_ids": {},
871
- "num": null,
872
- "urls": [],
873
- "raw_text": "Marti Hearst. Automated Discovery of WordNet Rela- tions. In WordNet: An Electronic Lezical Database and Some of its Applications, editor Fellbaum, C., MIT Press, Cambridge, MA, 1998.",
874
- "links": null
875
- },
876
- "BIBREF5": {
877
- "ref_id": "b5",
878
- "title": "Acquisition of Linguistic Patterns for knowledge-based information extraction",
879
- "authors": [
880
- {
881
- "first": "J",
882
- "middle": [],
883
- "last": "Kim",
884
- "suffix": ""
885
- },
886
- {
887
- "first": "D",
888
- "middle": [],
889
- "last": "Moldovan",
890
- "suffix": ""
891
- }
892
- ],
893
- "year": null,
894
- "venue": "IEEE Transactions on Knowledge and Data Engineering",
895
- "volume": "7",
896
- "issue": "5",
897
- "pages": "713--724",
898
- "other_ids": {},
899
- "num": null,
900
- "urls": [],
901
- "raw_text": "J. Kim and D. Moldovan. Acquisition of Linguistic Patterns for knowledge-based information extraction. IEEE Transactions on Knowledge and Data Engineer- ing 7(5): pages 713-724.",
902
- "links": null
903
- },
904
- "BIBREF6": {
905
- "ref_id": "b6",
906
- "title": "A Description Classifier for the Predicate Calculus",
907
- "authors": [
908
- {
909
- "first": "R",
910
- "middle": [],
911
- "last": "Macgregor ; Stephen",
912
- "suffix": ""
913
- },
914
- {
915
- "first": "D",
916
- "middle": [],
917
- "last": "Richardson",
918
- "suffix": ""
919
- },
920
- {
921
- "first": "William",
922
- "middle": [
923
- "B"
924
- ],
925
- "last": "Dolan",
926
- "suffix": ""
927
- },
928
- {
929
- "first": "Lucy",
930
- "middle": [],
931
- "last": "Vanderwende",
932
- "suffix": ""
933
- }
934
- ],
935
- "year": 1994,
936
- "venue": "MindNet: acquiring and structuring semantic information from text. Proceedings of ACL-Coling",
937
- "volume": "",
938
- "issue": "",
939
- "pages": "1098--1102",
940
- "other_ids": {},
941
- "num": null,
942
- "urls": [],
943
- "raw_text": "R. MacGregor. A Description Classifier for the Predicate Calculus. Proceedings of the 12th National Conference on Artificial Intelligence (AAAI94), pp. 213-220, 1994. Stephen D. Richardson, William B. Dolan, Lucy Vander- wende. MindNet: acquiring and structuring seman- tic information from text. Proceedings of ACL-Coling 1998, pages 1098-1102.",
944
- "links": null
945
- },
946
- "BIBREF7": {
947
- "ref_id": "b7",
948
- "title": "Automatically'Generating Extraction Patterns from Untagged Text",
949
- "authors": [
950
- {
951
- "first": "Ellen",
952
- "middle": [],
953
- "last": "Riloff",
954
- "suffix": ""
955
- }
956
- ],
957
- "year": null,
958
- "venue": "Proceedings of the Thirteenth National Conference on Artificial Intelligence",
959
- "volume": "",
960
- "issue": "",
961
- "pages": "1044--1049",
962
- "other_ids": {},
963
- "num": null,
964
- "urls": [],
965
- "raw_text": "Ellen Riloff. Automatically'Generating Extraction Pat- terns from Untagged Text. In Proceedings of the Thir- teenth National Conference on Artificial Intelligence, 1044-1049. The AAAI Press/MIT Press.",
966
- "links": null
967
- },
968
- "BIBREF8": {
969
- "ref_id": "b8",
970
- "title": "Classification in the KL-ONE knowledge representation system",
971
- "authors": [
972
- {
973
- "first": "J",
974
- "middle": [
975
- "G"
976
- ],
977
- "last": "Schmolze",
978
- "suffix": ""
979
- },
980
- {
981
- "first": "T",
982
- "middle": [],
983
- "last": "Lipkis",
984
- "suffix": ""
985
- }
986
- ],
987
- "year": 1983,
988
- "venue": "Proceedings of 8th Int'l Joint Conference on Artificial Intelligence (IJCAI83)",
989
- "volume": "",
990
- "issue": "",
991
- "pages": "",
992
- "other_ids": {},
993
- "num": null,
994
- "urls": [],
995
- "raw_text": "J.G. Schmolze and T. Lipkis. Classification in the KL- ONE knowledge representation system. Proceedings of 8th Int'l Joint Conference on Artificial Intelligence (IJCAI83), 1983.",
996
- "links": null
997
- },
998
- "BIBREF9": {
999
- "ref_id": "b9",
1000
- "title": "Learning to extract text-based information from the world wide web",
1001
- "authors": [
1002
- {
1003
- "first": "S",
1004
- "middle": [],
1005
- "last": "Soderland",
1006
- "suffix": ""
1007
- }
1008
- ],
1009
- "year": null,
1010
- "venue": "the Proceedings of the Third International Conference on Knowledge Discover# and Data Mining (KDD-97)",
1011
- "volume": "",
1012
- "issue": "",
1013
- "pages": "",
1014
- "other_ids": {},
1015
- "num": null,
1016
- "urls": [],
1017
- "raw_text": "S. Soderland. Learning to extract text-based informa- tion from the world wide web. In the Proceedings of the Third International Conference on Knowledge Dis- cover# and Data Mining (KDD-97).",
1018
- "links": null
1019
- },
1020
- "BIBREF10": {
1021
- "ref_id": "b10",
1022
- "title": "Understanding Subsumption and Taxonomy: A Framework for Progress",
1023
- "authors": [],
1024
- "year": 1991,
1025
- "venue": "the Principles of Semantic Networks: Explorations in the Representation of Knowledge",
1026
- "volume": "",
1027
- "issue": "",
1028
- "pages": "45--94",
1029
- "other_ids": {},
1030
- "num": null,
1031
- "urls": [],
1032
- "raw_text": "Text REtrieval Conference. http://trec.nist.gov 1999 W.A. Woods. Understanding Subsumption and Taxon- omy: A Framework for Progress. In the Principles of Semantic Networks: Explorations in the Represen- tation of Knowledge, Morgan Kaufmann, San Mateo, Calif. 1991, pages 45-94.",
1033
- "links": null
1034
- },
1035
- "BIBREF11": {
1036
- "ref_id": "b11",
1037
- "title": "A Better way to Organize Knowledge",
1038
- "authors": [
1039
- {
1040
- "first": "W",
1041
- "middle": [
1042
- "A"
1043
- ],
1044
- "last": "Woods",
1045
- "suffix": ""
1046
- }
1047
- ],
1048
- "year": 1997,
1049
- "venue": "Technical Report of Sun Microsystems Inc",
1050
- "volume": "",
1051
- "issue": "",
1052
- "pages": "",
1053
- "other_ids": {},
1054
- "num": null,
1055
- "urls": [],
1056
- "raw_text": "W.A. Woods. A Better way to Organize Knowledge. Technical Report of Sun Microsystems Inc., 1997.",
1057
- "links": null
1058
- }
1059
- },
1060
- "ref_entries": {
1061
- "FIGREF0": {
1062
- "num": null,
1063
- "type_str": "figure",
1064
- "uris": null,
1065
- "text": "Wo,aN=l C\u00b0~Tr~ A Co.=i= ~. C\u00b0, V.=~tio.~=a~[ I The knowledge classification diagram"
1066
- },
1067
- "FIGREF1": {
1068
- "num": null,
1069
- "type_str": "figure",
1070
- "uris": null,
1071
- "text": "Merging a structure of concepts with WordNet"
1072
- },
1073
- "FIGREF2": {
1074
- "num": null,
1075
- "type_str": "figure",
1076
- "uris": null,
1077
- "text": "A sample of concepts and relations acquired from the 5000 sentence corpus. Legend: continue lines represent influence inverse proportionally, dashed lines represent influence direct proportionally, and dotted lines represent influence (the direction of the relationship was not specified in the text)."
1078
- },
1079
- "TABREF2": {
1080
- "html": null,
1081
- "num": null,
1082
- "content": "<table><tr><td>thus linked by a relation nYPERNYMY(interest_rate,</td></tr><tr><td>mortgage_interest_rate).</td></tr></table>",
1083
- "type_str": "table",
1084
- "text": "The classification is based on the simple idea that a compound concept [word, head] is ontologically subsumed by concept [head]. For example, mortgage_interest_rate is a kind of interest_rate,"
1085
- },
1086
- "TABREF3": {
1087
- "html": null,
1088
- "num": null,
1089
- "content": "<table><tr><td colspan=\"2\">4. When neither [wordl head] nor [words head] are</td></tr><tr><td colspan=\"2\">in the knowledge base, then place [wordl word~</td></tr><tr><td colspan=\"2\">head] under the [head]. The example in Figure</td></tr><tr><td colspan=\"2\">3 corresponds to case 3.</td></tr><tr><td colspan=\"2\">components ;y/</td></tr><tr><td>radio components</td><td>automobile components /</td></tr><tr><td colspan=\"2\">automobile radio components</td></tr><tr><td colspan=\"2\">Figure 3: Classification of a compound concept with respect to its ~ concepts</td><td>subsumes [word2, heads]. The subsumption may not al-ways be a direct connection; sometimes it may</td></tr><tr><td colspan=\"2\">Since we do not deal here with the sentence seman-tics, it is not possible to completely determine the meaning of [word1 word2 head], as it may be either [((word1 word2) head)] or [(word1 (words head))] of-ten depending on the sentence context. In the example of Figure 3 there is only one mean-ing, i.e. [(automobile radio) components]. However, in the case of ~erformance skiing equipment] there are two valid interpretations, namely [(performance skiing) equipment] and ~erformance (skiing equip-ment)].</td><td>consist of a chain of subsumption relations since subsumption is (usually) a transitive relation (Woods 1991). An example is shown in Fig-ure 2a; in WordNet, A particular case of this is when head1 is iden-tical with head2. 2. Another case is when there is no direct sub-sumption relation in WordNet between word1 and words, and/or head1 and heads, but there are a common subsuming concepts, for each pair. When such concepts are found, pick</td></tr><tr><td colspan=\"2\">Procedure 4.4 Classify a concept [word1, head] with</td><td>the most specific common subsumer (MSCS) concepts of word1 and words, and of head1</td></tr><tr><td colspan=\"2\">The task here is to identify the most specific sub-sumer (MSS) from all the concepts under the head that subsumes [wordx, head]. By default, [wordl head] is placed under [head], however, since it may be more specific than other hyponyms of [head], a more complex classification analysis needs to be im-plemented.</td><td>and head2, respectively. Then form a concept [MSCS(wordz, words), MSCS(headl, head2)] and place [word1 headz] and [words heads] un-der it. This is exemplified in Figure 2b. In WordNet, country 3. In all other cases, no subsumption relation is es-</td></tr><tr><td/><td/><td>tablished between the two concepts. For exam-</td></tr><tr><td/><td/><td>ple, we cannot say whether Asian_country dis-</td></tr><tr><td/><td/><td>count_rate is more or less abstract then Japan</td></tr><tr><td/><td/><td>interest_rate.</td></tr><tr><td/><td colspan=\"2\">Procedure 4.3. Classify concept [word1 words head].</td></tr><tr><td/><td colspan=\"2\">Several poss!bilities exist: 1. When there is already a concept [words head]</td></tr><tr><td/><td/><td>in the knowledge base under the [head], then</td></tr><tr><td/><td/><td>place [wordl words head] under concept [words</td></tr><tr><td/><td/><td>head].</td></tr><tr><td/><td/><td>2. When there is already a concept [wordz head]</td></tr><tr><td/><td/><td>in the knowledge base under the [head], then</td></tr><tr><td/><td/><td>place [wordl word2 head] under concept [wordl</td></tr><tr><td/><td/><td>head].</td></tr><tr><td/><td colspan=\"2\">3. When both cases 1 and 2 are true then place</td></tr><tr><td/><td/><td>[wordz word2 head] under both concepts.</td></tr></table>",
1090
- "type_str": "table",
1091
- "text": "Asian_country subsumes Japan and interest_rate subsumes discount_rate. Subsumes Japan and Germany, and interest_rate subsumes discount_rate and prime_interest_rate."
1092
- },
1093
- "TABREF5": {
1094
- "html": null,
1095
- "num": null,
1096
- "content": "<table><tr><td>[I Relations I</td><td>Lexico-syntactic Patterns</td><td>Examples</td></tr><tr><td>H</td><td/><td>WordNet Relations</td></tr><tr><td colspan=\"2\">HYPERNYMY I NP1 [&lt;be&gt;] a kind of NP2</td><td>Thus, New Relations</td></tr><tr><td>CAUSE</td><td>NPI [&lt;be&gt;] cause NP2</td><td/></tr><tr><td/><td>=~ CAUSE(NPI,NP2)</td><td/></tr><tr><td>INFLUENCE</td><td>NP1 impact on NP2</td><td/></tr><tr><td/><td>INFLU~NCZ(NP1,NP2)</td><td/></tr><tr><td/><td>As NP1 vb, so &lt;do&gt; NP2</td><td/></tr><tr><td/><td>=&gt; INFLUENCE(NPI,NP2)</td><td/></tr><tr><td/><td>NP1 &lt;be&gt; associated with NP2</td><td/></tr><tr><td/><td>=&gt; INFLUENCE(NP1,NP2)</td><td/></tr><tr><td/><td>INFLUENCE(NP2,NPI)</td><td/></tr><tr><td/><td>As/if/when NP1 vbl, NP2 vb2. -{-</td><td/></tr><tr><td/><td>vbl, vb2 ----antonyms / go in</td><td/></tr><tr><td/><td>opposite directions</td><td/></tr><tr><td/><td>::~ INFLUENCE(NPI,NP2)</td><td/></tr><tr><td/><td>the effect(s) of NP1 on/upon NP2</td><td/></tr><tr><td/><td>::&gt; INFLUENCE(NPI,NP2)</td><td/></tr><tr><td/><td>inverse relationship between</td><td/></tr><tr><td/><td>NPI and NP2</td><td/></tr><tr><td/><td>=&gt; INFLUENCE(NP1,NP2)</td><td/></tr><tr><td/><td>=~ INFLUENCE(NP2,NP1)</td><td/></tr><tr><td/><td>NP2 &lt;be&gt; function of NP1</td><td/></tr><tr><td/><td>=# INFLUENCZ(NP1,NP2)</td><td/></tr><tr><td/><td>NP1 (and thus NP2)</td><td/></tr></table>",
1097
- "type_str": "table",
1098
- "text": "LIBOR is a kind of interest rate, as it is charged I ::~ HYPERNYMY(NPI,NP2) on deposits between banks in the Eurodolar market."
1099
- },
1100
- "TABREF6": {
1101
- "html": null,
1102
- "num": null,
1103
- "content": "<table><tr><td/><td>la</td><td/><td>I bl</td><td colspan=\"2\">c Id</td><td>I e II</td></tr><tr><td>concepts (NPs)</td><td colspan=\"2\">773</td><td>382</td><td>833</td><td>921</td><td>.</td></tr><tr><td colspan=\"5\">Total concepts extracted with Procedurel</td><td/><td/></tr><tr><td>Concepts found</td><td/><td/><td/><td/><td/><td/></tr><tr><td>in WordNet</td><td/><td>2</td><td>0</td><td>1</td><td>0</td><td>2</td></tr><tr><td>Concepts</td><td>Concepts</td><td/><td/><td/><td/><td/></tr><tr><td>found in</td><td>with seed</td><td>6</td><td>0</td><td>3</td><td>0</td><td>0</td></tr><tr><td>on-line</td><td>head</td><td/><td/><td/><td/><td/></tr><tr><td>dictionaries,</td><td>Concepts</td><td/><td/><td/><td/><td/></tr><tr><td>but not in</td><td>with seed</td><td>7</td><td>0</td><td>1</td><td>1</td><td>1</td></tr><tr><td>WordNet</td><td>not head</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">I C\u00b0ncepts accepted</td><td/><td/><td>I</td><td/><td/></tr><tr><td>by human</td><td/><td>78</td><td>62</td><td>58</td><td>60</td><td>37</td></tr></table>",
1104
- "type_str": "table",
1105
- "text": "Examples of lexico-syntactic patterns and semantic relations derived from the 5000 sentence corpus"
1106
- },
1107
- "TABREF7": {
1108
- "html": null,
1109
- "num": null,
1110
- "content": "<table><tr><td>from the corpus related to (a) interest rate, (b) stock market, (c)</td></tr><tr><td>inflation, (d) economic 9rowth, a~ld (e) employment.</td></tr></table>",
1111
- "type_str": "table",
1112
- "text": "Results showing the number of new concepts learned"
1113
- },
1114
- "TABREF8": {
1115
- "html": null,
1116
- "num": null,
1117
- "content": "<table><tr><td>sentence corpus</td></tr></table>",
1118
- "type_str": "table",
1119
- "text": "A part of the relationships derived from the 5000"
1120
- }
1121
- }
1122
- }
1123
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1038.json DELETED
@@ -1,540 +0,0 @@
1
- {
2
- "paper_id": "A00-1038",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:52.768508Z"
6
- },
7
- "title": "Large-scale Controlled Vocabulary Indexing for Named Entities",
8
- "authors": [
9
- {
10
- "first": "Mark",
11
- "middle": [],
12
- "last": "Wasson",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "LEXIS-NEXIS",
17
- "location": {
18
- "postCode": "9443, 45342",
19
- "settlement": "Springboro Pike Miamisburg",
20
- "region": "Ohio",
21
- "country": "USA"
22
- }
23
- },
24
- "email": "[email protected]"
25
- }
26
- ],
27
- "year": "",
28
- "venue": null,
29
- "identifiers": {},
30
- "abstract": "A large-scale controlled vocabulary indexing system is described. The system currently covers almost 70,000 named entity topics, and applies to documents from thousands of news publications. Topic definitions are built through substantially automated knowledge engineering.",
31
- "pdf_parse": {
32
- "paper_id": "A00-1038",
33
- "_pdf_hash": "",
34
- "abstract": [
35
- {
36
- "text": "A large-scale controlled vocabulary indexing system is described. The system currently covers almost 70,000 named entity topics, and applies to documents from thousands of news publications. Topic definitions are built through substantially automated knowledge engineering.",
37
- "cite_spans": [],
38
- "ref_spans": [],
39
- "eq_spans": [],
40
- "section": "Abstract",
41
- "sec_num": null
42
- }
43
- ],
44
- "body_text": [
45
- {
46
- "text": "The goal of the Entity Indexing R&D program at LEXIS-NEXIS is to add controlled vocabulary indexing for named entities to searchable fields in appropriate news documents across thousands of news publications, where documents include both incoming news articles as well as news articles already in the LEXIS-NEXIS archives. A controlled vocabulary term (CVT) is a consistently specified topic indicator that users can incorporate into their queries in order to retrieve documents about the corresponding topic. When a CVT is added to an appropriate field in a document, it can be included in a Boolean query using",
47
- "cite_spans": [],
48
- "ref_spans": [],
49
- "eq_spans": [],
50
- "section": "Introduction",
51
- "sec_num": "1"
52
- },
53
- {
54
- "text": "The initial Entity Indexing release focused on companies as topics. For company indexing, the primary CFT is a standard form of the company name. When we add a company CVT to a document, we often also want to add secondary CFTs to the document that specify attributes of that company. Attributes may include the ticker symbol, SIC product codes, industry codes and company headquarters information. Secondary CVTs allow customers to easily search on groups of companies that have one or more attributes in common, such as searching for documents about banks in our set of companies that are headquartered in Utah.",
55
- "cite_spans": [],
56
- "ref_spans": [],
57
- "eq_spans": [],
58
- "section": "field-name(controlled vocabulary term)",
59
- "sec_num": null
60
- },
61
- {
62
- "text": "It is generally easy to get high recall with Boolean queries when searching for documents about named entities. Typically the query will only need a short form of the entity's name. For example, the query American will retrieve virtually every document that mentions American Airlines. Of course, this query results in poor precision due to the ambiguity of American. The problem we wanted to address with controlled vocabulary indexing is to help online customers limit their search results to only those documents that contain a major reference to the topic, that is, to documents that are substantially about the topic.",
63
- "cite_spans": [],
64
- "ref_spans": [],
65
- "eq_spans": [],
66
- "section": "field-name(controlled vocabulary term)",
67
- "sec_num": null
68
- },
69
- {
70
- "text": "Because of the volume of news data we have, it is necessary that we fully automate the document categorization and indexing step in our data preparation process. LEXIS-NEXIS adds 100,000 news articles daily to its collection of over 2 billion documents.",
71
- "cite_spans": [],
72
- "ref_spans": [],
73
- "eq_spans": [],
74
- "section": "field-name(controlled vocabulary term)",
75
- "sec_num": null
76
- },
77
- {
78
- "text": "For marketing and product positioning reasons, we want to provide indexing for tens of thousands of companies, where companies are targeted based on their presence on the New York, American and NASDAQ exchanges or on revenue-based criteria. Although such selection criteria help us explain the product feature to customers, it does not ensure that the targeted companies actually appear all that often in the news. In fact, for many targeted companies there is little training and test data available. Our company indexing system should address the following business product requirements:",
79
- "cite_spans": [],
80
- "ref_spans": [],
81
- "eq_spans": [],
82
- "section": "field-name(controlled vocabulary term)",
83
- "sec_num": null
84
- },
85
- {
86
- "text": "\u2022 Assign primary and corresponding secondary CVTs to appropriate documents",
87
- "cite_spans": [],
88
- "ref_spans": [],
89
- "eq_spans": [],
90
- "section": "field-name(controlled vocabulary term)",
91
- "sec_num": null
92
- },
93
- {
94
- "text": "\u2022 Add CVTs only to those documents that contain major references to the topic(s)",
95
- "cite_spans": [],
96
- "ref_spans": [],
97
- "eq_spans": [],
98
- "section": "field-name(controlled vocabulary term)",
99
- "sec_num": null
100
- },
101
- {
102
- "text": "\u2022 Process documents fast; target 30,000 characters per CPU second",
103
- "cite_spans": [],
104
- "ref_spans": [],
105
- "eq_spans": [],
106
- "section": "field-name(controlled vocabulary term)",
107
- "sec_num": null
108
- },
109
- {
110
- "text": "\u2022 Apply tens of thousands of topics to the data in a single pass * Minimize the cost of developing and maintaining topic definitions Also, we target 90% recall and 95% precision when using the CVTs to retrieve major reference documents about the corresponding companies.",
111
- "cite_spans": [],
112
- "ref_spans": [],
113
- "eq_spans": [],
114
- "section": "field-name(controlled vocabulary term)",
115
- "sec_num": null
116
- },
117
- {
118
- "text": "The Carnegie Group's Text Categorization Shell (TCS) (Hayes, 1992) uses shallow knowledge engineering techniques to categorize documents with respect to large sets of predefined topics. Each topic requires the development of a rule set that includes terms, contextual information, weighting, if-then rules and other pattern matching operations. This initially involved a manual, iterative approach to rule development, although Hayes (1992) discusses their intent to explore ways to automate this. TCS accuracy is quite good. One application deployed at Reuters achieved 94% recall and 84% precision. Other reported tests achieved recall and precision rates of 90% or better.",
119
- "cite_spans": [
120
- {
121
- "start": 53,
122
- "end": 66,
123
- "text": "(Hayes, 1992)",
124
- "ref_id": "BIBREF3"
125
- },
126
- {
127
- "start": 428,
128
- "end": 440,
129
- "text": "Hayes (1992)",
130
- "ref_id": "BIBREF3"
131
- }
132
- ],
133
- "ref_spans": [],
134
- "eq_spans": [],
135
- "section": "Related Work",
136
- "sec_num": "2"
137
- },
138
- {
139
- "text": "SRA's NameTag (Krupka, 1995) A problem for pattern recognition approaches has to do with our requirement to assign CVTs. Pattern recognition approaches extract patterns such as company names as they appear in the text. Limited coreference resolution may link variant forms of names with one another to support choosing the best variant as a \"semi-controlled\" vocabulary term, but this does not allow for the assignment of true primary and secondary CVTs. SRA has attempted to address this through its Name Resolver function, which reconciles extracted names with an authority file, but the authority file also limits scope of coverage for CVTs to those that are defined and maintained in the authority file. The system must also go beyond straight recognition in order to make a distinction between documents with major references to the targeted entities and documents with lesser or passing references. SRA's NameTag addresses this with the calculation of relevance scores for each set of linked variant forms.",
140
- "cite_spans": [
141
- {
142
- "start": 14,
143
- "end": 28,
144
- "text": "(Krupka, 1995)",
145
- "ref_id": "BIBREF4"
146
- }
147
- ],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "Related Work",
151
- "sec_num": "2"
152
- },
153
- {
154
- "text": "Preliminary research suggests that recognizing named entities in data and queries may lead to a significant improvement in retrieval quality (Thompson & Dozier, 1999) . Such an approach may complement Entity Indexing, but it does not yet meet the controlled vocabulary indexing and accuracy requirements for Entity Indexing.",
155
- "cite_spans": [
156
- {
157
- "start": 141,
158
- "end": 166,
159
- "text": "(Thompson & Dozier, 1999)",
160
- "ref_id": "BIBREF6"
161
- }
162
- ],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "Related Work",
166
- "sec_num": "2"
167
- },
168
- {
169
- "text": "Our own Term-based Topic Identification (TFI) system (Leigh, 1991) combines knowledge engineering with limited learning in support of document categorization and indexing by CVTs. We have used TI'I since 1990 to support a number of topically-related news or legal document collections. Categories are defined through topic definitions. A definition includes terms to look up, term weights, term frequency thresholds, document selection scoring thresholds, one or more CVTs, and source-specific document structure information. Although creating TI'I topic definitions is primarily an iterative, manual task, limited regressionbased supervised learning tools are available to help identify functionally redundant terms, and to suggest term weights, frequency thresholds and scoring thresholds. When these tools have been used in building topic definitions, recall and precision have topped 90% in almost all tests.",
170
- "cite_spans": [
171
- {
172
- "start": 53,
173
- "end": 66,
174
- "text": "(Leigh, 1991)",
175
- "ref_id": "BIBREF5"
176
- }
177
- ],
178
- "ref_spans": [],
179
- "eq_spans": [],
180
- "section": "Related Work",
181
- "sec_num": "2"
182
- },
183
- {
184
- "text": "\"ITI was originally proposed as a tool to support controlled vocabulary indexing, and most early tests focused on narrowly defined legal and news topics such as insurable interest and earthquakes.",
185
- "cite_spans": [],
186
- "ref_spans": [],
187
- "eq_spans": [],
188
- "section": "Approach",
189
- "sec_num": "3"
190
- },
191
- {
192
- "text": "TH was also tested on a number of companies, people, organizations and places as topics. TTI was first put into production to categorize documents by broadly-defined topics such as Europe political and business news and federal tax law.",
193
- "cite_spans": [],
194
- "ref_spans": [],
195
- "eq_spans": [],
196
- "section": "Approach",
197
- "sec_num": "3"
198
- },
199
- {
200
- "text": "When we began investigating the possibility of creating Entity Indexing, TTI was a natural starting point. It had demonstrated high accuracy and flexibility across a variety of topics and data types. Three problems were also apparent. First, TTI would not scale to support several thousand topics. Second, it took a long time to build a topic definition, about one staff day each. Third, topics were defined on a publication-specific basis. With then-700 publications in our news archives in combination with our scale goals and the time needed to build topic definitions, the definition building costs were too high. We needed to scale the technology, and we needed to substantially automate the topic definition-building process.",
201
- "cite_spans": [],
202
- "ref_spans": [],
203
- "eq_spans": [],
204
- "section": "Approach",
205
- "sec_num": "3"
206
- },
207
- {
208
- "text": "For Entity Indexing, we addressed scale concerns through software tuning, substantially improved memory management, a more efficient hash function in the lookup algorithm, and moving domainspecific functionality from topic definitions into the software. The rest of this paper focuses on the cost of building the definitions.",
209
- "cite_spans": [],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "Approach",
213
- "sec_num": "3"
214
- },
215
- {
216
- "text": "In order to reduce definition building costs, we originally believed that we would focus on increasing our reliance on TTrs training tools. Training data would have to include documents from a variety of publications if we were to be able to limit definitions to one per topic regardless of the publications covered.",
217
- "cite_spans": [],
218
- "ref_spans": [],
219
- "eq_spans": [],
220
- "section": "Analyzing Companies in the News",
221
- "sec_num": "3.1"
222
- },
223
- {
224
- "text": "Unfortunately the data did not cooperate. Using a list of all companies on the major U.S. stock exchanges, we randomly selected 89 companies for investigation. Boolean searches were used to retrieve documents that mentioned those companies. We found that several of these companies were rarely mentioned in the news. One company was not mentioned at all in our news archives, a second one was mentioned only once, and twelve were mentioned only in passing. Several appeared as major references in only a few documents. In a second investigation involving 40,000 companies from various sources, fully half appeared in zero or only one news document in one two-year window. We questioned whether we even wanted to create topic definitions for such rarely occurring companies. Again, marketing and product positioning reasons dictated that we do so: it is easier to tell customers that the product feature covers all companics that meet one of a few criteria than it is to give customers a list of the individual companies covered. It is also reasonable to assume that public and larger companies may appear in the news at some future point.",
225
- "cite_spans": [],
226
- "ref_spans": [],
227
- "eq_spans": [],
228
- "section": "Analyzing Companies in the News",
229
- "sec_num": "3.1"
230
- },
231
- {
232
- "text": "While analyzing news articles about these companies, we noted how company names and their variants were used. For most companies discussed in documents that contained major references to the company, some form of the full company name typically appears in the leading text. Shorter variants typically are used for subsequent mentions as well as in the headline. Corresponding ticker symbols often appear in the headline or leading text, but only after a variant of the company name appears. In some publications, ticker symbols are used as shorter variants throughout the document. Acronyms are somewhat rare; when they are used, they behave like other shorter variants of the name. We assign weights to term variants based on term length and the presence or absence of company designators. Longer variants with designators are regarded as less ambiguous than shorter variants without designators, and thus have higher weights. A table of particularly problematic one-word variants, such as American, National and General, is used to make a weighting distinction between these and other one-word variants. One-word variants are also marked so they do not match lower case strings during lookup. A label is assigned to each variant to indicate its function and relative strength in the document categorization process.",
233
- "cite_spans": [],
234
- "ref_spans": [],
235
- "eq_spans": [],
236
- "section": "Company Name Usage",
237
- "sec_num": "3.2"
238
- },
239
- {
240
- "text": "Our company controlled vocabulary indexing process requires definition builders to provide a primary CVT and zero or more secondary CVTs for each targeted company. The CVTs are the primary input to the automatic topic definition generation process. If the definition builder provides the company name and ticker symbol to be used as CVTs for some company, as in",
241
- "cite_spans": [],
242
- "ref_spans": [],
243
- "eq_spans": [],
244
- "section": "Generating Topic Defmitions",
245
- "sec_num": "3.3"
246
- },
247
- {
248
- "text": "the following definition can be generated automatically: That two acronyms were generated points out a potential problem with using robust variant generation as a means to automatically build topic definitions. Overgeneration will produce some name variants that have nothing to do with the company. However, although overgeneration of variants routinely occurs, testing showed that such overgeneration has little adverse effect.",
249
- "cite_spans": [],
250
- "ref_spans": [],
251
- "eq_spans": [],
252
- "section": "#CO = Samson Computing Supply lnc. #TS = SMCS (NrSE)",
253
- "sec_num": null
254
- },
255
- {
256
- "text": "#NAME1 =",
257
- "cite_spans": [],
258
- "ref_spans": [],
259
- "eq_spans": [],
260
- "section": "#CO = Samson Computing Supply lnc. #TS = SMCS (NrSE)",
261
- "sec_num": null
262
- },
263
- {
264
- "text": "This approach to automatically generating topic definitions is successful for most companies, including those that appear rarely in our data, because most company names and their variants have consistent structure and use patterns. There are exceptions. Some companies are so well-lmown that they often appear in news articles without the corresponding full company name. Large companies and companies with high visibility (e.g., Microsoft, AT&T, NBC and Kmart) are among these. Other companies simply have unusual names. Our authority file is an editable text file where definition builders not only store and maintain the primary and secondary CVTs for each company, but it also allows builders to specify exception information that can be used to override any or all of the results of the automatic definition generation process. In addition, builders can use two additional labels to identify especially strong name variants (e.g., IBM for International Business Machines) and related terms whose presence in a document provide disambiguating context (e.g., Delta, airport and flights for American Airlines, often referred to only as American). For our initial release of 15,000 companies, 17% of the definitions had some manual intervention beyond providing primary and secondary CVTs. Entity definitions built entirely manually usually took less than thirty minutes apiece. Overall, on average less than five minutes were spent per topic on definition building. This includes the time used to identify the targeted companies and add their primary and secondary CVTs to the authority file. Populating the authority file is required regardless of the technical approach used.",
265
- "cite_spans": [],
266
- "ref_spans": [],
267
- "eq_spans": [],
268
- "section": "#CO = Samson Computing Supply lnc. #TS = SMCS (NrSE)",
269
- "sec_num": null
270
- },
271
- {
272
- "text": "All topic definitions contain a set of labeled terms to look up. The document categorization process combines these into a large lookup table. A lookup step applies the table to a document and records term frequency information. If a match occurs in the headline or leading text, extra \"frequency\" is recorded in order to place extra emphasis on lookup matches in those parts of the document. If the same term is in several definitions (e.g., American is a short name variant in hundreds of definitions), frequency information is recorded for each definition.",
273
- "cite_spans": [],
274
- "ref_spans": [],
275
- "eq_spans": [],
276
- "section": "Applying Definitions to Documents",
277
- "sec_num": "3.4"
278
- },
279
- {
280
- "text": "Once the end of the document is reached, frequency and term label-based weights are used to calculate a score for each topic. If the score exceeds some threshold, corresponding CVTs are added to the document. Typically a few matches of high-weight terms or a variety of lower-weighted terms are necessary to produce scores above the threshold. A document may be about more than one targeted topic.",
281
- "cite_spans": [],
282
- "ref_spans": [],
283
- "eq_spans": [],
284
- "section": "Applying Definitions to Documents",
285
- "sec_num": "3.4"
286
- },
287
- {
288
- "text": "The tools used to build and maintain topic definitions were implemented in C/C++ on UNIX-based workstations. The document categorization process was implemented in PL1 and a proprietary lexical scanner, and operates in a mainframe MVS environment.",
289
- "cite_spans": [],
290
- "ref_spans": [],
291
- "eq_spans": [],
292
- "section": "System Implementation",
293
- "sec_num": "3.5"
294
- },
295
- {
296
- "text": "In the fmal pre-release test, Entity Indexing was applied to more than 13,500 documents from 250 publications. Each document in the test was reviewed by a data analyst. Several of these were also reviewed by a researcher to verify the analysts' consistency with the formal evaluation criteria. Recall was 92.0% and precision was 96.5% when targeting documents with major references. Additional spot tests were done after the process was applied in production to archived documents and to incoming documents. These tests routinely showed recall and precision to be in the 90% to 96% range on over 100,000 documents examined.",
297
- "cite_spans": [],
298
- "ref_spans": [],
299
- "eq_spans": [],
300
- "section": "Evaluation",
301
- "sec_num": "4"
302
- },
303
- {
304
- "text": "Some recall errors were due to company names with unusual structure. Many such problems can be addressed through manual intervention in the topic definitions. Some publication styles also led to recall errors. One publisher introduces a variety of unanticipated abbreviations, such as Intnl for International. Trade publications tend to use only short forms of company names even for lesser known companies. Those companies may be wellknown only within an industry and thus to the audience of the trade publication. These types of problems can be addressed through manual intervention in the topic definitions, although for the abbreviations problem this is little more than patching.",
305
- "cite_spans": [],
306
- "ref_spans": [],
307
- "eq_spans": [],
308
- "section": "Evaluation",
309
- "sec_num": "4"
310
- },
311
- {
312
- "text": "Capitalized and all upper case text in headlines and section headings was a routine source of precision errors. These often led to unwanted term matching, particularly affecting acronyms and one-word company name variants. Different companies with similar names also led to precision problems. This was particularly tree for subsidiaries of the same company whose names differed only by geographically-distinct company designators.",
313
- "cite_spans": [],
314
- "ref_spans": [],
315
- "eq_spans": [],
316
- "section": "Evaluation",
317
- "sec_num": "4"
318
- },
319
- {
320
- "text": "Entity Indexing applies in production to tens of thousands of documents daily from thousands of news publications. Further tests have shown that we can reach comparably high levels of accuracy for company topics when Entity Indexing is applied to financial, patents and public records sources. Accuracy rates dropped about 10% in tests applied to case law parties.",
321
- "cite_spans": [],
322
- "ref_spans": [],
323
- "eq_spans": [],
324
- "section": "Discussion",
325
- "sec_num": "5"
326
- },
327
- {
328
- "text": "Since the initial completion of the company indexing work, Entity Indexing has been extended to cover more companies and other named entities, including people, organizations and places. Topic definitions have been built for almost 70,000 entities, including over 30,000 companies. Entity Indexing applies these definitions during a single pass of the data, processing more than 86,000 characters (approximately 16 news documents) per CPU second.",
329
- "cite_spans": [],
330
- "ref_spans": [],
331
- "eq_spans": [],
332
- "section": "Discussion",
333
- "sec_num": "5"
334
- },
335
- {
336
- "text": "The approach used for the other named entity types is similar to that for companies. However, because most of the place names we targeted lacked useful internal structure, manual intervention was a part of creating all 800 definitions for places. Accuracy rates for people, organization and geographic indexing are comparable to those for company indexing.",
337
- "cite_spans": [],
338
- "ref_spans": [],
339
- "eq_spans": [],
340
- "section": "Discussion",
341
- "sec_num": "5"
342
- },
343
- {
344
- "text": "Knowledge engineering can be a bottleneck in building large-scale applications, which is why machine learning-based approaches are often preferred, but there has been little work in quantifying the difference between the two approaches in linguistic tasks (Brill & Ngai, 1999) . In our case, adopting a machine learning-based approach was a problem not just because we lacked annotated training data, but for many of the topics we were required to target we had little or no available data at all. However, because of the regularity we observed in company name variants and their use across a variety of news sources, we determined that the knowledge engineering task would be quite repetitive and thus could be automated for most companies.",
345
- "cite_spans": [
346
- {
347
- "start": 256,
348
- "end": 276,
349
- "text": "(Brill & Ngai, 1999)",
350
- "ref_id": "BIBREF2"
351
- }
352
- ],
353
- "ref_spans": [],
354
- "eq_spans": [],
355
- "section": "Discussion",
356
- "sec_num": "5"
357
- },
358
- {
359
- "text": "Our Entity Indexing system meets all of our business requirements for accuracy, scale and performance. We have also substantially automated the definition building process. Even if a number of documents were available for each targeted entity, it is unlikely that we would see the time needed to populate the authority file and to create and annotate appropriate training data in a machine learning-based approach fall much below the under five minutes we average per definition with our chosen approach.",
360
- "cite_spans": [],
361
- "ref_spans": [],
362
- "eq_spans": [],
363
- "section": "Discussion",
364
- "sec_num": "5"
365
- }
366
- ],
367
- "back_matter": [
368
- {
369
- "text": "I would like to thank colleagues Mary Jane Battle, Ellen Hamilton, Tom Kresin, Sharon Leigh, Mark Shewhart, Chris White, Christi Wilson and others for their roles in the research, development and ongoing production support of Entity Indexing.",
370
- "cite_spans": [],
371
- "ref_spans": [],
372
- "eq_spans": [],
373
- "section": "Acknowledgments",
374
- "sec_num": null
375
- }
376
- ],
377
- "bib_entries": {
378
- "BIBREF1": {
379
- "ref_id": "b1",
380
- "title": "An Intelligent Multilingual Information Browsing and Retrieval System Using Information Extraction",
381
- "authors": [],
382
- "year": null,
383
- "venue": "Proceedings of the Fifth Conference on Applied Natural Language Processing",
384
- "volume": "",
385
- "issue": "",
386
- "pages": "",
387
- "other_ids": {},
388
- "num": null,
389
- "urls": [],
390
- "raw_text": "An Intelligent Multilingual Information Browsing and Retrieval System Using Information Extrac- tion. Proceedings of the Fifth Conference on Ap- plied Natural Language Processing.",
391
- "links": null
392
- },
393
- "BIBREF2": {
394
- "ref_id": "b2",
395
- "title": "Man vs. Machine: A Case Study in Base Noun Phrase Learning",
396
- "authors": [
397
- {
398
- "first": "E",
399
- "middle": [],
400
- "last": "Brill",
401
- "suffix": ""
402
- },
403
- {
404
- "first": "G",
405
- "middle": [],
406
- "last": "Ngai",
407
- "suffix": ""
408
- }
409
- ],
410
- "year": 1999,
411
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics",
412
- "volume": "",
413
- "issue": "",
414
- "pages": "",
415
- "other_ids": {},
416
- "num": null,
417
- "urls": [],
418
- "raw_text": "Brill, E., & Ngai, G. (1999). Man vs. Machine: A Case Study in Base Noun Phrase Learning. Pro- ceedings of the 37th Annual Meeting of the As- sociation for Computational Linguistics.",
419
- "links": null
420
- },
421
- "BIBREF3": {
422
- "ref_id": "b3",
423
- "title": "Intelligent High-volume Text Processing Using Shallow, Domain-specific Techniques",
424
- "authors": [
425
- {
426
- "first": "P",
427
- "middle": [],
428
- "last": "Hayes",
429
- "suffix": ""
430
- }
431
- ],
432
- "year": 1992,
433
- "venue": "Text-based Intelligent Systems. Lawrence Erlbaum",
434
- "volume": "",
435
- "issue": "",
436
- "pages": "",
437
- "other_ids": {},
438
- "num": null,
439
- "urls": [],
440
- "raw_text": "Hayes, P. (1992). Intelligent High-volume Text Processing Using Shallow, Domain-specific Techniques. In P. Jacobs (ed.), Text-based Intel- ligent Systems. Lawrence Erlbaum.",
441
- "links": null
442
- },
443
- "BIBREF4": {
444
- "ref_id": "b4",
445
- "title": "SRA. Description of the SRA System as Used for MUC-6",
446
- "authors": [
447
- {
448
- "first": "G",
449
- "middle": [],
450
- "last": "Krupka",
451
- "suffix": ""
452
- }
453
- ],
454
- "year": 1995,
455
- "venue": "Proceedings of the Sixth Message Understanding Conference (MUC-6)",
456
- "volume": "",
457
- "issue": "",
458
- "pages": "",
459
- "other_ids": {},
460
- "num": null,
461
- "urls": [],
462
- "raw_text": "Krupka, G. (1995). SRA. Description of the SRA System as Used for MUC-6. Proceedings of the Sixth Message Understanding Conference (MUC-6).",
463
- "links": null
464
- },
465
- "BIBREF5": {
466
- "ref_id": "b5",
467
- "title": "The Use of Natural Language Processing in the Development of Topic Specific Databases",
468
- "authors": [
469
- {
470
- "first": "S",
471
- "middle": [],
472
- "last": "Leigh",
473
- "suffix": ""
474
- }
475
- ],
476
- "year": 1991,
477
- "venue": "Proceedings of the Twelfth National Online Meeting",
478
- "volume": "",
479
- "issue": "",
480
- "pages": "",
481
- "other_ids": {},
482
- "num": null,
483
- "urls": [],
484
- "raw_text": "Leigh, S. (1991). The Use of Natural Language Processing in the Development of Topic Specific Databases. Proceedings of the Twelfth National Online Meeting.",
485
- "links": null
486
- },
487
- "BIBREF6": {
488
- "ref_id": "b6",
489
- "title": "Name Recognition and Retrieval Performance",
490
- "authors": [
491
- {
492
- "first": "P",
493
- "middle": [],
494
- "last": "Thompson",
495
- "suffix": ""
496
- },
497
- {
498
- "first": "C",
499
- "middle": [],
500
- "last": "Dozier",
501
- "suffix": ""
502
- }
503
- ],
504
- "year": 1999,
505
- "venue": "",
506
- "volume": "",
507
- "issue": "",
508
- "pages": "",
509
- "other_ids": {},
510
- "num": null,
511
- "urls": [],
512
- "raw_text": "Thompson, P., & Dozier, C. (1999). Name Recog- nition and Retrieval Performance. In T.",
513
- "links": null
514
- },
515
- "BIBREF7": {
516
- "ref_id": "b7",
517
- "title": "Natural Language Information Retrieval",
518
- "authors": [
519
- {
520
- "first": "",
521
- "middle": [],
522
- "last": "Strzalkowski",
523
- "suffix": ""
524
- }
525
- ],
526
- "year": null,
527
- "venue": "",
528
- "volume": "",
529
- "issue": "",
530
- "pages": "",
531
- "other_ids": {},
532
- "num": null,
533
- "urls": [],
534
- "raw_text": "Strzalkowski (ed.), Natural Language Informa- tion Retrieval. Kluwer Academic,",
535
- "links": null
536
- }
537
- },
538
- "ref_entries": {}
539
- }
540
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1039.json DELETED
@@ -1,1166 +0,0 @@
1
- {
2
- "paper_id": "A00-1039",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:12.645894Z"
6
- },
7
- "title": "Unsupervised Discovery of Scenario-Level Patterns for Information Extraction",
8
- "authors": [
9
- {
10
- "first": "Roman",
11
- "middle": [],
12
- "last": "Yangarber",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": ""
16
- },
17
- {
18
- "first": "Ralph",
19
- "middle": [],
20
- "last": "Grishman",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- }
25
- ],
26
- "year": "",
27
- "venue": null,
28
- "identifiers": {},
29
- "abstract": "Information Extraction (IE) systems are commonly based on pattern matching. Adapting an IE system to a new scenario entails the construction of a new pattern base-a timeconsuming and expensive process. We have implemented a system for finding patterns automatically from un-annotated text. Starting with a small initial set of seed patterns proposed by the user, the system applies an incremental discovery procedure to identify new patterns. We present experiments with evaluations which show that the resulting patterns exhibit high precision and recall.",
30
- "pdf_parse": {
31
- "paper_id": "A00-1039",
32
- "_pdf_hash": "",
33
- "abstract": [
34
- {
35
- "text": "Information Extraction (IE) systems are commonly based on pattern matching. Adapting an IE system to a new scenario entails the construction of a new pattern base-a timeconsuming and expensive process. We have implemented a system for finding patterns automatically from un-annotated text. Starting with a small initial set of seed patterns proposed by the user, the system applies an incremental discovery procedure to identify new patterns. We present experiments with evaluations which show that the resulting patterns exhibit high precision and recall.",
36
- "cite_spans": [],
37
- "ref_spans": [],
38
- "eq_spans": [],
39
- "section": "Abstract",
40
- "sec_num": null
41
- }
42
- ],
43
- "body_text": [
44
- {
45
- "text": "The task of Information Extraction (I-E) is the selective extraction of meaning from free natural language text. I \"Meaning\" is understood here in terms of a fixed set of semantic objects--entities, relationships among entities, and events in which entities participate. The semantic objects belong to a small number of types, all having fixed regular structure, within a fixed and closely circumscribed subject domain. The extracted objects are then stored in a relational database. In this paper, we use the nomenclature accepted in current IE literature; the term subject domain denotes a class of textual documents to be processed, e.g., \"business news,\" and scenario denotes the specific topic of interest within the domain, i.e., the set of facts to be extracted. One example of a scenario is \"management succession,\" the topic of MUC-6 (the Sixth Message Understanding Conference); in this scenario the system seeks to identify events in which corporate managers left 1For general references on IE, cf., e.g., (Pazienza, 1997; muc, 1995; muc, 1993) . their posts or assumed new ones. We will consider this scenario in detail in a later section describing experiments. IE systems today are commonly based on pattern matching. The patterns are regular expressions, stored in a \"pattern base\" containing a general-purpose component and a substantial domain-and scenario-specific component.",
46
- "cite_spans": [
47
- {
48
- "start": 1017,
49
- "end": 1033,
50
- "text": "(Pazienza, 1997;",
51
- "ref_id": null
52
- },
53
- {
54
- "start": 1034,
55
- "end": 1044,
56
- "text": "muc, 1995;",
57
- "ref_id": null
58
- },
59
- {
60
- "start": 1045,
61
- "end": 1055,
62
- "text": "muc, 1993)",
63
- "ref_id": null
64
- }
65
- ],
66
- "ref_spans": [],
67
- "eq_spans": [],
68
- "section": "Introduction",
69
- "sec_num": null
70
- },
71
- {
72
- "text": "Portability and performance are two major problem areas which are recognized as impeding widespread use of IE. This paper presents a novel approach, which addresses both of these problems by automatically discovering good patterns for a new scenario. The viability of our approach is tested and evaluated with an actual IE system.",
73
- "cite_spans": [],
74
- "ref_spans": [],
75
- "eq_spans": [],
76
- "section": "Introduction",
77
- "sec_num": null
78
- },
79
- {
80
- "text": "In the next section we describe the problem in more detail in the context of our IE system; sections 2 and 3 describe our algorithm for pattern discovery; section 4 describes our experimental results, followed by comparison with prior work and discussion, in section 5.",
81
- "cite_spans": [],
82
- "ref_spans": [],
83
- "eq_spans": [],
84
- "section": "Introduction",
85
- "sec_num": null
86
- },
87
- {
88
- "text": "1 The IE System",
89
- "cite_spans": [],
90
- "ref_spans": [],
91
- "eq_spans": [],
92
- "section": "Introduction",
93
- "sec_num": null
94
- },
95
- {
96
- "text": "Our IE system, among others, contains a a backend core engine, at the heart of which is a regular-e~xpression pattern matcher. The engine draws on attendant knowledge bases (KBs) of varying degrees of domain-specificity. The KB components are commonly factored out to make the systems portable to new scenarios. There are four customizable knowledge bases in our IE system: the Lexicon contains general dictionaries and scenario-specific terms; the concept base groups terms into classes; the predicate base describes the logical structure of events to be extracted, and the pattern base contains patterns that catch the events in text.",
97
- "cite_spans": [],
98
- "ref_spans": [],
99
- "eq_spans": [],
100
- "section": "Introduction",
101
- "sec_num": null
102
- },
103
- {
104
- "text": "Each KB has a. substantial domain-specific component, which must be modified when mov-ing to new domains and scenarios. The system allows the user (i.e. scenario developer) to start with example sentences in text which contain events of interest, the candidates, and generalize them into patterns. However, the user is ultimately responsible for finding all the candidates, which amounts to manually processing example sentences in a very large training corpus. Should s/he fail to provide an example of a particular class of syntactic/semantic construction, the system has no hope of recovering the corresponding events. Our experience has shown that (1) the process of discovering candidates is highly expensive, and (2) gaps in patterns directly translate into gaps in coverage. How can the system help automate the process of discovering new good candidates? The system should find examples of all common linguistic constructs relevant to a scenario. While there has been prior research on identifying the primary lexical patterns of a sub-language or corpus (Grishman et al., 1986; Riloff, 1996) , the task here is more complex, since we are typically not provided in advance with a sub-corpus of relevant passages; these passages must themselves be found as part of the discovery process.",
105
- "cite_spans": [
106
- {
107
- "start": 1063,
108
- "end": 1086,
109
- "text": "(Grishman et al., 1986;",
110
- "ref_id": "BIBREF2"
111
- },
112
- {
113
- "start": 1087,
114
- "end": 1100,
115
- "text": "Riloff, 1996)",
116
- "ref_id": "BIBREF14"
117
- }
118
- ],
119
- "ref_spans": [],
120
- "eq_spans": [],
121
- "section": "Introduction",
122
- "sec_num": null
123
- },
124
- {
125
- "text": "The difficulty is that one of the best indications of the relevance of the passages is precisely the presence of these constructs. Because of this circularity, we propose to acquire the constructs and passages in tandem.",
126
- "cite_spans": [],
127
- "ref_spans": [],
128
- "eq_spans": [],
129
- "section": "Introduction",
130
- "sec_num": null
131
- },
132
- {
133
- "text": "We outline our procedure for automatic acquisition of patterns; details are elaborated in later sections. The procedure is unsupervised in that it does not require the training corpus to be manually annotated with events of interest, nor a pro-classified corpus with relevance judgements, nor any feedback or intervention from the user 2. The idea is to combine IR-style document selection with an iterative relaxation process; this is similar to techniques used elsewhere in NLP, and is inspired in large part, if remotely, by the work of (Kay and RSscheisen, 1993) on automatic alignment of sentences and words in a bilingual corpus. There, the reasoning was: sentences that are translations of each 2however, it may be supervised after each iteration, where the user can answer yes/no questions to improve the quality of the results other are good indicators that words they contain are translation pairs; conversely, words that are translation pairs indicate that the sentences which contain them correspond to one another.",
134
- "cite_spans": [
135
- {
136
- "start": 540,
137
- "end": 566,
138
- "text": "(Kay and RSscheisen, 1993)",
139
- "ref_id": "BIBREF6"
140
- }
141
- ],
142
- "ref_spans": [],
143
- "eq_spans": [],
144
- "section": "Solution",
145
- "sec_num": "2"
146
- },
147
- {
148
- "text": "In our context, we observe that documents that are relevant to the scenario will necessarily contain good patterns; conversely, good patterns are strong indicators of relevant documents. The outline of our approach is as follows. (2) an initial set of trusted scenario patterns, as chosen ad hoc by the user--the seed; as will be seen, the seed can be quite small--two or three patterns seem to suffice. (3) an initial (possibly empty) set of concept classes",
149
- "cite_spans": [],
150
- "ref_spans": [],
151
- "eq_spans": [],
152
- "section": "Solution",
153
- "sec_num": "2"
154
- },
155
- {
156
- "text": "The pattern set induces a binary partition (a split) on the corpus: on any document, either zero or more than zero patterns will match. Thus the universe of documents, U, is partitioned into the relevant sub-corpus, R, vs. the non-relevant sub-corpus, R = U -R, with respect to the given pattern set. Actually, the documents are assigned weights which are 1 for documents matched by the trusted seed, and 0 otherwise. 3 2. Search for new candidate patterns: (a) Automatically convert each sentence in the corpus,into a set of candidate patterns, 4",
157
- "cite_spans": [],
158
- "ref_spans": [],
159
- "eq_spans": [],
160
- "section": "Solution",
161
- "sec_num": "2"
162
- },
163
- {
164
- "text": "(b) Generalize each pattern by replacing each lexical item which is a member of a concept class by the class name.",
165
- "cite_spans": [],
166
- "ref_spans": [],
167
- "eq_spans": [],
168
- "section": "Solution",
169
- "sec_num": "2"
170
- },
171
- {
172
- "text": "(c) Working from the relevant documents, select those patterns whose distribution is strongly correlated with other relevant documents (i.e., much more 3R represents the trusted truth through the discovery iterations, since it was induced by the manually-selected seed.",
173
- "cite_spans": [],
174
- "ref_spans": [],
175
- "eq_spans": [],
176
- "section": "Solution",
177
- "sec_num": "2"
178
- },
179
- {
180
- "text": "4Here, for each clause in the sentence we extract a tuple of its major roles: the head of the subject, the verb group, the object, object complement, as described below. This tuple is considered to be a pattern for the present purposes of discovery; it is a skeleton for the rich, syntactically transformed patterns our system uses in the extraction phase. densely distributed among the relevant documents than among the nonrelevant ones). The idea is to consider those candidate patterns, p, which meet the density, criterion:",
181
- "cite_spans": [],
182
- "ref_spans": [],
183
- "eq_spans": [],
184
- "section": "Solution",
185
- "sec_num": "2"
186
- },
187
- {
188
- "text": "IHnRI IRI -->> IHnUI IUI",
189
- "cite_spans": [],
190
- "ref_spans": [],
191
- "eq_spans": [],
192
- "section": "Solution",
193
- "sec_num": "2"
194
- },
195
- {
196
- "text": "where H = H(p) is the set of documents where p hits.",
197
- "cite_spans": [],
198
- "ref_spans": [],
199
- "eq_spans": [],
200
- "section": "Solution",
201
- "sec_num": "2"
202
- },
203
- {
204
- "text": "(d) Based on co-occurrence with the chosen patterns, extend the concept classes.",
205
- "cite_spans": [],
206
- "ref_spans": [],
207
- "eq_spans": [],
208
- "section": "Solution",
209
- "sec_num": "2"
210
- },
211
- {
212
- "text": "classes to the user for review, retaining those relevant to the scenario.",
213
- "cite_spans": [],
214
- "ref_spans": [],
215
- "eq_spans": [],
216
- "section": "Optional: Present the new candidates and",
217
- "sec_num": "3."
218
- },
219
- {
220
- "text": "The new pattern set induces a new partition on the corpus. With this pattern set, return to step 1. Repeat the procedure until no more patterns can be added.",
221
- "cite_spans": [],
222
- "ref_spans": [],
223
- "eq_spans": [],
224
- "section": "4.",
225
- "sec_num": null
226
- },
227
- {
228
- "text": "3 Methodology 3.1 Pre-proeessing: Normalization Before applying the discovery procedure, we subject the corpus to several stages of preprocessing. First, we apply a name recognition module, and replace each name with a token describing its class, e.g. C-Person, C-Company, etc. We collapse together all numeric expressions, currency values, dates, etc., using a single token to designate each of these classes.",
229
- "cite_spans": [],
230
- "ref_spans": [],
231
- "eq_spans": [],
232
- "section": "4.",
233
- "sec_num": null
234
- },
235
- {
236
- "text": "We then apply a parser to perform syntactic normalization to transform each clause into a common predicate-argument structure. We use the general-purpose dependency parser of English, based on the FDG formalism (Tapanainen and J~rvinen, 1997) and developed by the Research Unit for Multilingual Language Technology at the University of Helsinki, and Conexor Oy. The parser (modified to understand the name labels attached in the previous step) is used for reducing such variants as passive and relative clauses to a tuple, consisting of several elements.",
237
- "cite_spans": [
238
- {
239
- "start": 211,
240
- "end": 242,
241
- "text": "(Tapanainen and J~rvinen, 1997)",
242
- "ref_id": "BIBREF15"
243
- }
244
- ],
245
- "ref_spans": [],
246
- "eq_spans": [],
247
- "section": "Syntactic Analysis",
248
- "sec_num": "3.2"
249
- },
250
- {
251
- "text": "1. For each claus, the first element is the subject, a \"semantic\" subject of a non-finite sentence or agent of the passive. 5",
252
- "cite_spans": [],
253
- "ref_spans": [],
254
- "eq_spans": [],
255
- "section": "Syntactic Analysis",
256
- "sec_num": "3.2"
257
- },
258
- {
259
- "text": "2. The second element is the verb.",
260
- "cite_spans": [],
261
- "ref_spans": [],
262
- "eq_spans": [],
263
- "section": "Syntactic Analysis",
264
- "sec_num": "3.2"
265
- },
266
- {
267
- "text": "3. The third element is the object, certain object-like adverbs, subject of the passive or subject complement 6 4. The fourth element is a phrase which refers to the object or the subject. A typical example of such an argument is an object complement, such as Company named John Smith president. Another instance is the so-called copredicatire (Nichols, 1978) , in the parsing system (J~irvinen and . A copredicative refers to a subject or an object, though this distinction is typically difficult to resolve automatically/ Clausal tuples also contain a locative modifier, and a temporal modifier. We used a corpus of 5,963 articles from the Wall Street Journal, randomly chosen. The parsed articles yielded a total of 250,000 clausal tuples, of which 135,000 were distinct.",
268
- "cite_spans": [
269
- {
270
- "start": 344,
271
- "end": 359,
272
- "text": "(Nichols, 1978)",
273
- "ref_id": "BIBREF10"
274
- }
275
- ],
276
- "ref_spans": [],
277
- "eq_spans": [],
278
- "section": "Syntactic Analysis",
279
- "sec_num": "3.2"
280
- },
281
- {
282
- "text": "Because tuples may not repeat with sufficient frequency to obtain reliable statistics, each tuple is reduced to a set of pairs: e.g., a verbobject pair, a subject-object pair, etc. Each pair is used as a generalized pattern during the candidate selection stage. Once we have identified pairs which are relevant to the scenario, we use them to construct or augment concept classes, by grouping together the missing roles, (for example, a class of verbs which occur with a relevant subject-object pair: \"company (hire/fire/expel...} person\"). This is similar to work by several other groups which aims to induce semantic classes through syntactic co-occurrence analysis (Riloff and Jones, 1999; Pereira et al., 1993; Dagan et al., 1993; Hirschman et al., 1975) , although in .our case the contexts are limited to selected patterns, relevant to the scenario. SE.g., \"John sleeps\", \"John is appointed by Company\", \"I saw a dog which sleeps\", \"She asked John to buy a car\". 6E.g., \"John is appointed by Company\", \"John is the president of Company\", \"I saw a dog which sleeps\", The dog which I saw sleeps. 7For example, \"She gave us our coffee black\", \"Company appointed John Smith as president\".",
283
- "cite_spans": [
284
- {
285
- "start": 668,
286
- "end": 692,
287
- "text": "(Riloff and Jones, 1999;",
288
- "ref_id": "BIBREF13"
289
- },
290
- {
291
- "start": 693,
292
- "end": 714,
293
- "text": "Pereira et al., 1993;",
294
- "ref_id": "BIBREF12"
295
- },
296
- {
297
- "start": 715,
298
- "end": 734,
299
- "text": "Dagan et al., 1993;",
300
- "ref_id": "BIBREF0"
301
- },
302
- {
303
- "start": 735,
304
- "end": 758,
305
- "text": "Hirschman et al., 1975)",
306
- "ref_id": "BIBREF4"
307
- }
308
- ],
309
- "ref_spans": [],
310
- "eq_spans": [],
311
- "section": "Generalization and Concept Classes",
312
- "sec_num": "3.3"
313
- },
314
- {
315
- "text": "Here we present the results from experiments we conducted on the MUC-6 scenario, \"management succession\". The discovery procedure was seeded with a small pattern set, namely:",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "Pattern Discovery",
320
- "sec_num": "3.4"
321
- },
322
- {
323
- "text": "Subject Verb Direct Object C-Company C-Appoint C-Person C-Person C-Resign",
324
- "cite_spans": [],
325
- "ref_spans": [],
326
- "eq_spans": [],
327
- "section": "Pattern Discovery",
328
- "sec_num": "3.4"
329
- },
330
- {
331
- "text": "Documents are assigned relevance scores on a scale between 0 and 1. The seed patterns are accepted as ground truth; thus the documents they match have relevance 1. On subsequent iterations, the newly accepted patterns are not trusted as absolutely. On iteration number i q-1, each pattern p is assigned a precision measure, based on the relevance of the documents it matches: Pc(P) --Igl is the conditional probability of relevance. We further impose two support criteria: we distrust such frequent patterns where [HA U{ > a[U[ as uninformative, and rare patterns for which [H A R[ </3 as noise. \u00b0 At the end of each iteration, the system selects the pattern with the highest score, L(p), and adds it to the seed set. The documents which the winning pattern hits are added to the relevant set. The pattern search is then restarted.",
332
- "cite_spans": [],
333
- "ref_spans": [],
334
- "eq_spans": [],
335
- "section": "Pattern Discovery",
336
- "sec_num": "3.4"
337
- },
338
- {
339
- "text": "Relevance The above is a simplification of the actual procedure, in several important respects.",
340
- "cite_spans": [],
341
- "ref_spans": [],
342
- "eq_spans": [],
343
- "section": "Re-computatlon of Document",
344
- "sec_num": "3.5"
345
- },
346
- {
347
- "text": "Only generalized patterns are considered for candidacy, with one or more slots filled with wild-cards. In computing the score of the generalized pattern, we do not take into consideration all possible values of the wild-card role. We instead constrain the wild-card to those values which themselves in turn produce patterns with high scores. These values then become members of a new class, which is output in tandem with the winning pattern 1\u00b0 Ssimilarly to (Riloff, 1996) \u00b0U denotes the universe of documents. We used c~ = 0.i and ~-----2.",
348
- "cite_spans": [
349
- {
350
- "start": 459,
351
- "end": 473,
352
- "text": "(Riloff, 1996)",
353
- "ref_id": "BIBREF14"
354
- }
355
- ],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "Re-computatlon of Document",
359
- "sec_num": "3.5"
360
- },
361
- {
362
- "text": "1\u00b0The classes are currently unused by subsequent iterations; this important issue is considered in future work.",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Re-computatlon of Document",
367
- "sec_num": "3.5"
368
- },
369
- {
370
- "text": "Preci+l(p) = 1 {H(p){ ~ Reli(d) (2) dEH(p)",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Re-computatlon of Document",
375
- "sec_num": "3.5"
376
- },
377
- {
378
- "text": "where Reli(d) is the relevance of the document from the previous iteration, and H(p) is the set of documents where p matched. More generally, if K is a classifier consisting of a set of patterns, we can define H(K) as the set of documents where all of patterns p E K match, and the \"cumulative\" precision 11 of K as",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Re-computatlon of Document",
383
- "sec_num": "3.5"
384
- },
385
- {
386
- "text": "Preci+l(K) = 1 ~ Reli(d) (3) IH(K)[ riCH(K)",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Re-computatlon of Document",
391
- "sec_num": "3.5"
392
- },
393
- {
394
- "text": "Once the new winning pattern is accepted, the relevance scores of the documents are readjusted as follows. For each document d which is matched by some (non-empty) subset of the currently accepted patterns, we can view that subset of patterns as a classifier K d = {py}. These patterns determine the new relevance score of the document",
395
- "cite_spans": [],
396
- "ref_spans": [],
397
- "eq_spans": [],
398
- "section": "Re-computatlon of Document",
399
- "sec_num": "3.5"
400
- },
401
- {
402
- "text": "Reli+l(d) = max (Rel~(d),Prec~+l(Kd)) (4)",
403
- "cite_spans": [],
404
- "ref_spans": [],
405
- "eq_spans": [],
406
- "section": "Re-computatlon of Document",
407
- "sec_num": "3.5"
408
- },
409
- {
410
- "text": "This ensures that the relevance score grows monotonically, and only when there is sufficient positive evidence, as the patterns in effect vote \"conjunctively\" on the documents. The results which follow use this measure.",
411
- "cite_spans": [],
412
- "ref_spans": [],
413
- "eq_spans": [],
414
- "section": "Re-computatlon of Document",
415
- "sec_num": "3.5"
416
- },
417
- {
418
- "text": "Thus in the formulas above, R is not simply the count of the relevant documents, but is rather their cumulative relevance. The two formulas, (3) and (4), capture the mutual dependency of patterns and documents; this recomputation and growing of precision and relevance scores is at the heart of the procedure.",
419
- "cite_spans": [],
420
- "ref_spans": [],
421
- "eq_spans": [],
422
- "section": "Re-computatlon of Document",
423
- "sec_num": "3.5"
424
- },
425
- {
426
- "text": "11Of course, this measure is defined only when H(K) # 0.",
427
- "cite_spans": [],
428
- "ref_spans": [],
429
- "eq_spans": [],
430
- "section": "Re-computatlon of Document",
431
- "sec_num": "3.5"
432
- },
433
- {
434
- "text": "An objective measure of goodness of a pattern o. 9 is not trivial to establish since the patterns cannot be used for extraction directly, without be-o. s ing properly incorporated into the knowledge base. Thus, the discovery procedure does not o. v lend itself easily to MUC-style evaluations, since 0.6 a pattern lacks information about which events it induces and which slots its arguments should 0.5 fill.",
435
- "cite_spans": [],
436
- "ref_spans": [],
437
- "eq_spans": [],
438
- "section": "1",
439
- "sec_num": null
440
- },
441
- {
442
- "text": "However, it is possible to apply some objec-o. a tive measures of performance. One way we evaluated the system is by noting that in addition o. to growing the pattern set, the procedure also grows the relevance of documents. The latter o. 2 can be objectively evaluated.",
443
- "cite_spans": [],
444
- "ref_spans": [],
445
- "eq_spans": [],
446
- "section": "1",
447
- "sec_num": null
448
- },
449
- {
450
- "text": "We used a test corpus of 100 MUC-6 formaltraining documents (which were included in the o main development corpus of about 6000 documents) plus another 150 documents picked at random from the main corpus and judged by hand. These judgements constituted the ground truth and were used only for evaluation, (not in the discovery procedure). Figure 1 shows the recall/precision measures with respect to the test corpus of 250 documents, over a span of 60 generations, starting with the seed set in table 3.4. The Seed patterns matched 184 of the 5963 documents, yielding an initial recall of .11 and precision of .93; by the last generation it searched through 982 documents with non-zero relevance, and ended with .80 precision and .78 recall. This facet of the discovery procedure is closely related to the MUC '%ext-filtering\" sub-task, where the systems are judged at the level of documents rather than event slots. It is interesting to compare the results with other MUC-6 participants, shown anonymously in figure 2. Considering recall and precision separately, the discovery procedure attains values comparable to those achieved by some of the participants, all of which were either heavily-supervised or manually coded systems. It is important to bear in mind that the discovery procedure had no benefit of training material, or any information beyond the seed pattern set. Figure 2 shows two evaluations of our discovery procedure, tested against the original MUC-6 corpus of 100 documents, and against our test corpus, which consists of an additional 150 documents judged manually. The two plots in the figure show a slight difference in results, indicating that in some sense, the MUC corpus was more \"random\", or that our expanded corpus was somewhat skewed in favor of more common patterns that the system is able to find more easily.",
451
- "cite_spans": [],
452
- "ref_spans": [
453
- {
454
- "start": 339,
455
- "end": 347,
456
- "text": "Figure 1",
457
- "ref_id": null
458
- },
459
- {
460
- "start": 1379,
461
- "end": 1387,
462
- "text": "Figure 2",
463
- "ref_id": null
464
- }
465
- ],
466
- "eq_spans": [],
467
- "section": "0.1",
468
- "sec_num": null
469
- },
470
- {
471
- "text": "The graphs shown in Figures 1 and 2 are based on an \"objective\" measure we adopted during the experiments. This is the same measure of relevance used internally by the discovery procedure on each iteration (relative to the \"truth\" of relevance scores of the previous iteration), and is not quite the standard measure used for text filtering in IR. According to this measure, the system gets a score for each document based on the relevance which it assigned to the document. Thus if the system .assigned relevance of X percent to a relevant document, it only received X Figure 3 : Results on the MUC corpus percent on the recall score for classifying that document correctly. Similarly, if the system assigned relevance Y to an irrelevant document, it was penalized only for the mis-classified Y percent on the precision score. To make our results more comparable to those of other MUC competitors, we chose a cut-off point and force the system to make a binary relevance decision on each document. The cut-off of 0.5 seemed optimal from empirical observations. Figure 3 shows a noticeable improvement in scores, when using our continuous, \"objective\" measure, vs. the cut-off measure, with the entire graph essentially translated to the right for a gain of almost 10 percentage points of recall.",
472
- "cite_spans": [],
473
- "ref_spans": [
474
- {
475
- "start": 20,
476
- "end": 35,
477
- "text": "Figures 1 and 2",
478
- "ref_id": null
479
- },
480
- {
481
- "start": 570,
482
- "end": 578,
483
- "text": "Figure 3",
484
- "ref_id": null
485
- },
486
- {
487
- "start": 1062,
488
- "end": 1070,
489
- "text": "Figure 3",
490
- "ref_id": null
491
- }
492
- ],
493
- "eq_spans": [],
494
- "section": "Choice of Evaluation Metric",
495
- "sec_num": "4.3"
496
- },
497
- {
498
- "text": "i i i i i im~ ! ! ! i i DiE c ! i i i i i i i i 0 . '",
499
- "cite_spans": [],
500
- "ref_spans": [],
501
- "eq_spans": [],
502
- "section": "Choice of Evaluation Metric",
503
- "sec_num": "4.3"
504
- },
505
- {
506
- "text": "Another effective, if simple, measure of performanceis how many of the patterns the procedure found, and comparing them with those used by an extraction engine which was manually constructed for the same task. Our MUC-6 system used approximately 75 clause level patterns, with 30 distinct verbal heads. In one conservative experiment, we observed that the discovery procedure found 17 of these verbs, or 57%. However, it also found at least 8 verbs the manual system lacked, which seemed relevant to the scenario: company-bring-person- [as\u00f7officer] 12",
507
- "cite_spans": [
508
- {
509
- "start": 536,
510
- "end": 548,
511
- "text": "[as\u00f7officer]",
512
- "ref_id": null
513
- }
514
- ],
515
- "ref_spans": [],
516
- "eq_spans": [],
517
- "section": "Evaluating Patterns",
518
- "sec_num": "4.4"
519
- },
520
- {
521
- "text": "person-rejoin-company- [as + o25cer] person- { ret , conti, e, remai, ,stay}-[as + o25cer] person-pursue-interest At the risk of igniting a philosophical debate over what is or is not relevant to a scenario, we note that the first four of these verbs are evidently essential to the scenario in the strictest definition, since they imply changes of post. The next three are \"staying\" verbs, and are actually also needed, since higher-level inferences required in tracking events for long-range merging over documents, require knowledge of persons occupying posts, rather than only assuming or leaving them. The most curious one is \"person-pursue-interesf'; surprisingly, it too is useful, even in the strictest MUC sense, cf., (muc, 1995) . Systems are judged on filling a slot called \"other-organization\", indicating from or to which company the person came or went. This pattern is consistently used in text to indi- cate that the person left to pursue other, undisclosed interests, the knowledge of which would relieve the system from seeking other information in order to fill this slot. This is to say that here strict evaluation is elusive.",
522
- "cite_spans": [
523
- {
524
- "start": 23,
525
- "end": 36,
526
- "text": "[as + o25cer]",
527
- "ref_id": null
528
- },
529
- {
530
- "start": 45,
531
- "end": 90,
532
- "text": "{ ret , conti, e, remai, ,stay}-[as + o25cer]",
533
- "ref_id": null
534
- },
535
- {
536
- "start": 726,
537
- "end": 737,
538
- "text": "(muc, 1995)",
539
- "ref_id": null
540
- }
541
- ],
542
- "ref_spans": [],
543
- "eq_spans": [],
544
- "section": "person-come-[to+eompanv]-[as+oZScer]",
545
- "sec_num": null
546
- },
547
- {
548
- "text": "Some of the prior research has emphasized interactive tools to convert examples to extraction patterns, cf. (Yangarber and Grishman, 1997) , while others have focused on methods for automatically converting a corpus annotated with extraction examples into such patterns (Lehnert et al., 1992; Fisher et al., 1995; Miller et al., 1998) . These methods, however, do not reduce the burden of finding the examples to annotate. With either approach, the portability bottleneck is shifted from the problem of building patterns to that of finding good candidates. The prior work most closely related to this study is (Riloff, 1996) , which, along with (Riloff, 1993) , seeks automatic methods for filling slots in event templates. However, the prior work differs from that presented here in several crucial respects; firstly, the prior work does not attempt to find entire events, after the fashion of MUC's highest-level scenario-template task. Rather the patterns produced by those systems identify NPs that fill individual slots, without specifying how these slots may be combined at a later stage into complete event templates. The present work focuses on directly discovering event-level, multi-slot relational patterns. Secondly, the prior work either relies on a set of documents with relevance judgements to find slot fillers where they are relevant to events, (Riloff, 1996) , or utilizes an un-classified corpus containing a very high proportion of relevant documents to find all instances of a semantic class, (Riloff and Jones, 1999) . By contrast, our procedure requires no relevance judgements, and works on the assumption that the corpus is balanced and the proportion of relevant documents is small. Classifying documents by hand, although admittedly easier than tagging event instances in text for automatic training, is still a formidable task. When we prepared the test corpus, it took 5 hours to mark 150 short documents.",
549
- "cite_spans": [
550
- {
551
- "start": 108,
552
- "end": 138,
553
- "text": "(Yangarber and Grishman, 1997)",
554
- "ref_id": "BIBREF16"
555
- },
556
- {
557
- "start": 270,
558
- "end": 292,
559
- "text": "(Lehnert et al., 1992;",
560
- "ref_id": "BIBREF7"
561
- },
562
- {
563
- "start": 293,
564
- "end": 313,
565
- "text": "Fisher et al., 1995;",
566
- "ref_id": "BIBREF1"
567
- },
568
- {
569
- "start": 314,
570
- "end": 334,
571
- "text": "Miller et al., 1998)",
572
- "ref_id": "BIBREF8"
573
- },
574
- {
575
- "start": 610,
576
- "end": 624,
577
- "text": "(Riloff, 1996)",
578
- "ref_id": "BIBREF14"
579
- },
580
- {
581
- "start": 645,
582
- "end": 659,
583
- "text": "(Riloff, 1993)",
584
- "ref_id": "BIBREF13"
585
- },
586
- {
587
- "start": 1362,
588
- "end": 1376,
589
- "text": "(Riloff, 1996)",
590
- "ref_id": "BIBREF14"
591
- },
592
- {
593
- "start": 1514,
594
- "end": 1538,
595
- "text": "(Riloff and Jones, 1999)",
596
- "ref_id": "BIBREF13"
597
- }
598
- ],
599
- "ref_spans": [],
600
- "eq_spans": [],
601
- "section": "Discussion and Current Work",
602
- "sec_num": "5"
603
- },
604
- {
605
- "text": "The presented results indicate that our method of corpus analysis can be used to rapidly identify a large number of relevant patterns without pre-classifying a large training corpus. We are at the early stages of understanding how to optimally tune these techniques, and there are number of areas that need refinement. We are working on capturing the rich information about concept classes which is currently returned as part of our pattern discovery procedure, to build up a concept dictionary in tandem with the pattern base. We are also considering the proper selection of weights and thresholds for controlling the rankings of patterns and documents, criteria for terminating the iteration process, and for dynamic adjustments of these weights. We feel that the generalization technique in pattern discovery offers a great opportunity for combating sparseness of data, though this requires further research. Lastly, we are studying these algorithms under several unrelated scenarios to determine to what extent scenario-specific phenomena affect their performance.",
606
- "cite_spans": [],
607
- "ref_spans": [],
608
- "eq_spans": [],
609
- "section": "Discussion and Current Work",
610
- "sec_num": "5"
611
- }
612
- ],
613
- "back_matter": [],
614
- "bib_entries": {
615
- "BIBREF0": {
616
- "ref_id": "b0",
617
- "title": "Contextual word similarity and estimation from sparse data",
618
- "authors": [
619
- {
620
- "first": "Shaul",
621
- "middle": [],
622
- "last": "Ido Dagan",
623
- "suffix": ""
624
- },
625
- {
626
- "first": "Shaul",
627
- "middle": [],
628
- "last": "Marcus",
629
- "suffix": ""
630
- },
631
- {
632
- "first": "",
633
- "middle": [],
634
- "last": "Markovitch",
635
- "suffix": ""
636
- }
637
- ],
638
- "year": 1993,
639
- "venue": "Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics",
640
- "volume": "",
641
- "issue": "",
642
- "pages": "31--37",
643
- "other_ids": {},
644
- "num": null,
645
- "urls": [],
646
- "raw_text": "Ido Dagan, Shaul Marcus, and Shaul Markovitch. 1993. Contextual word simi- larity and estimation from sparse data. In Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics, pages 31-37, Columbus, OH, June.",
647
- "links": null
648
- },
649
- "BIBREF1": {
650
- "ref_id": "b1",
651
- "title": "Description of the UMass system as used for MUC-6",
652
- "authors": [
653
- {
654
- "first": "David",
655
- "middle": [],
656
- "last": "Fisher",
657
- "suffix": ""
658
- },
659
- {
660
- "first": "Stephen",
661
- "middle": [],
662
- "last": "Soderland",
663
- "suffix": ""
664
- },
665
- {
666
- "first": "Joseph",
667
- "middle": [],
668
- "last": "Mc-Carthy",
669
- "suffix": ""
670
- }
671
- ],
672
- "year": 1995,
673
- "venue": "Proc. Si;zth Message Understanding Conf. (MUC-6)",
674
- "volume": "",
675
- "issue": "",
676
- "pages": "",
677
- "other_ids": {},
678
- "num": null,
679
- "urls": [],
680
- "raw_text": "David Fisher, Stephen Soderland, Joseph Mc- Carthy, Fang-fang Feng, and Wendy Lehnert. 1995. Description of the UMass system as used for MUC-6. In Proc. Si;zth Message Un- derstanding Conf. (MUC-6), Columbia, MD, November. Morgan Kaufmann.",
681
- "links": null
682
- },
683
- "BIBREF2": {
684
- "ref_id": "b2",
685
- "title": "Discovery procedures for sublanguage selectional patterns: Initial experiments",
686
- "authors": [
687
- {
688
- "first": "R",
689
- "middle": [],
690
- "last": "Grishman",
691
- "suffix": ""
692
- },
693
- {
694
- "first": "L",
695
- "middle": [],
696
- "last": "Hirschman",
697
- "suffix": ""
698
- },
699
- {
700
- "first": "N",
701
- "middle": [
702
- "T"
703
- ],
704
- "last": "Nhan",
705
- "suffix": ""
706
- }
707
- ],
708
- "year": 1986,
709
- "venue": "",
710
- "volume": "",
711
- "issue": "",
712
- "pages": "",
713
- "other_ids": {},
714
- "num": null,
715
- "urls": [],
716
- "raw_text": "R. Grishman, L. Hirschman, and N.T. Nhan. 1986. Discovery procedures for sublanguage selectional patterns: Initial experiments.",
717
- "links": null
718
- },
719
- "BIBREF4": {
720
- "ref_id": "b4",
721
- "title": "Grammatically-based automatic word class formation",
722
- "authors": [
723
- {
724
- "first": "Lynette",
725
- "middle": [],
726
- "last": "Hirschman",
727
- "suffix": ""
728
- },
729
- {
730
- "first": "Ralph",
731
- "middle": [],
732
- "last": "Grishman",
733
- "suffix": ""
734
- },
735
- {
736
- "first": "Naomi",
737
- "middle": [],
738
- "last": "Sager",
739
- "suffix": ""
740
- }
741
- ],
742
- "year": 1975,
743
- "venue": "Information Processing and Management",
744
- "volume": "11",
745
- "issue": "1/2",
746
- "pages": "39--57",
747
- "other_ids": {},
748
- "num": null,
749
- "urls": [],
750
- "raw_text": "Lynette Hirschman, Ralph Grishman, and Naomi Sager. 1975. Grammatically-based automatic word class formation. Information Processing and Management, 11(1/2):39-57.",
751
- "links": null
752
- },
753
- "BIBREF5": {
754
- "ref_id": "b5",
755
- "title": "A dependency parser for English. Technical Report TR-1, Department of General Linguistics",
756
- "authors": [
757
- {
758
- "first": "J",
759
- "middle": [],
760
- "last": "Timo",
761
- "suffix": ""
762
- },
763
- {
764
- "first": "Pasi",
765
- "middle": [],
766
- "last": "/Irvinen",
767
- "suffix": ""
768
- },
769
- {
770
- "first": "",
771
- "middle": [],
772
- "last": "Tapanainen",
773
- "suffix": ""
774
- }
775
- ],
776
- "year": 1997,
777
- "venue": "",
778
- "volume": "",
779
- "issue": "",
780
- "pages": "",
781
- "other_ids": {},
782
- "num": null,
783
- "urls": [],
784
- "raw_text": "Timo J/irvinen and Pasi Tapanainen. 1997. A dependency parser for English. Technical Re- port TR-1, Department of General Linguis- tics, University of Helsinki, Finland, Febru- ary.",
785
- "links": null
786
- },
787
- "BIBREF6": {
788
- "ref_id": "b6",
789
- "title": "Text-translation alignment",
790
- "authors": [
791
- {
792
- "first": "Martin",
793
- "middle": [],
794
- "last": "Kay",
795
- "suffix": ""
796
- },
797
- {
798
- "first": "Martin",
799
- "middle": [],
800
- "last": "Rsscheisen",
801
- "suffix": ""
802
- }
803
- ],
804
- "year": 1993,
805
- "venue": "Computational Linguistics",
806
- "volume": "",
807
- "issue": "1",
808
- "pages": "",
809
- "other_ids": {},
810
- "num": null,
811
- "urls": [],
812
- "raw_text": "Martin Kay and Martin RSscheisen. 1993. Text-translation alignment. Computational Linguistics, 19(1).",
813
- "links": null
814
- },
815
- "BIBREF7": {
816
- "ref_id": "b7",
817
- "title": "University of massachusetts: MUC-4 test results and analysis",
818
- "authors": [
819
- {
820
- "first": "W",
821
- "middle": [],
822
- "last": "Lehnert",
823
- "suffix": ""
824
- },
825
- {
826
- "first": "C",
827
- "middle": [],
828
- "last": "Cardie",
829
- "suffix": ""
830
- },
831
- {
832
- "first": "D",
833
- "middle": [],
834
- "last": "Fisher",
835
- "suffix": ""
836
- },
837
- {
838
- "first": "J",
839
- "middle": [],
840
- "last": "Mccarthy",
841
- "suffix": ""
842
- },
843
- {
844
- "first": "E",
845
- "middle": [],
846
- "last": "Riloff",
847
- "suffix": ""
848
- },
849
- {
850
- "first": "S",
851
- "middle": [],
852
- "last": "Soderland ; Mclean",
853
- "suffix": ""
854
- },
855
- {
856
- "first": "June",
857
- "middle": [
858
- "Morgan"
859
- ],
860
- "last": "Va",
861
- "suffix": ""
862
- },
863
- {
864
- "first": "",
865
- "middle": [],
866
- "last": "Kaufmann",
867
- "suffix": ""
868
- }
869
- ],
870
- "year": 1992,
871
- "venue": "Proc. Fourth Message Understanding Conf",
872
- "volume": "",
873
- "issue": "",
874
- "pages": "",
875
- "other_ids": {},
876
- "num": null,
877
- "urls": [],
878
- "raw_text": "W. Lehnert, C. Cardie, D. Fisher, J. McCarthy, E. Riloff, and S. Soderland. 1992. Univer- sity of massachusetts: MUC-4 test results and analysis. In Proc. Fourth Message Un- derstanding Conf., McLean, VA, June. Mor- gan Kaufmann.",
879
- "links": null
880
- },
881
- "BIBREF8": {
882
- "ref_id": "b8",
883
- "title": "Algorithms that learn to extract information; BBN: Description of the SIFT system as used for MUC-7",
884
- "authors": [
885
- {
886
- "first": "Scott",
887
- "middle": [],
888
- "last": "Miller",
889
- "suffix": ""
890
- },
891
- {
892
- "first": "Michael",
893
- "middle": [],
894
- "last": "Crystal",
895
- "suffix": ""
896
- },
897
- {
898
- "first": "Heidi",
899
- "middle": [],
900
- "last": "Fox",
901
- "suffix": ""
902
- },
903
- {
904
- "first": "Lance",
905
- "middle": [],
906
- "last": "Ramshaw",
907
- "suffix": ""
908
- },
909
- {
910
- "first": "Richard",
911
- "middle": [],
912
- "last": "Schwartz",
913
- "suffix": ""
914
- },
915
- {
916
- "first": "Rebecca",
917
- "middle": [],
918
- "last": "Stone",
919
- "suffix": ""
920
- }
921
- ],
922
- "year": 1993,
923
- "venue": "Proceedings of the Fifth Message Understanding Conference (MUC-5)",
924
- "volume": "",
925
- "issue": "",
926
- "pages": "",
927
- "other_ids": {},
928
- "num": null,
929
- "urls": [],
930
- "raw_text": "Scott Miller, Michael Crystal, Heidi Fox, Lance Ramshaw, Richard Schwartz, Rebecca Stone, Ralph Weischedel, and the Annota- tion Group. 1998. Algorithms that learn to extract information; BBN: Description of the SIFT system as used for MUC-7. In Proc. of the Seventh Message Understanding Confer- ence, Fairfax, VA. 1993. Proceedings of the Fifth Message Un- derstanding Conference (MUC-5), Baltimore, MD, August. Morgan Kaufmann.",
931
- "links": null
932
- },
933
- "BIBREF9": {
934
- "ref_id": "b9",
935
- "title": "Proceedings of the Sixth Message Understanding Conference (MUC-6)",
936
- "authors": [],
937
- "year": 1995,
938
- "venue": "",
939
- "volume": "",
940
- "issue": "",
941
- "pages": "",
942
- "other_ids": {},
943
- "num": null,
944
- "urls": [],
945
- "raw_text": "1995. Proceedings of the Sixth Message Un- derstanding Conference (MUC-6), Columbia, M_D, November. Morgan Kaufmann.",
946
- "links": null
947
- },
948
- "BIBREF10": {
949
- "ref_id": "b10",
950
- "title": "Secondary predicates. Proceedings of the 4th Annual Meeting of Berkeley Linguistics Society",
951
- "authors": [
952
- {
953
- "first": "Johanna",
954
- "middle": [],
955
- "last": "Nichols",
956
- "suffix": ""
957
- }
958
- ],
959
- "year": 1978,
960
- "venue": "",
961
- "volume": "",
962
- "issue": "",
963
- "pages": "114--127",
964
- "other_ids": {},
965
- "num": null,
966
- "urls": [],
967
- "raw_text": "Johanna Nichols. 1978. Secondary predicates. Proceedings of the 4th Annual Meeting of Berkeley Linguistics Society, pages 114-127.",
968
- "links": null
969
- },
970
- "BIBREF11": {
971
- "ref_id": "b11",
972
- "title": "Information Extraction",
973
- "authors": [],
974
- "year": 1997,
975
- "venue": "Lecture Notes in Artificial Intelligence",
976
- "volume": "",
977
- "issue": "",
978
- "pages": "",
979
- "other_ids": {},
980
- "num": null,
981
- "urls": [],
982
- "raw_text": "Maria Teresa Pazienza, editor. 1997. Infor- mation Extraction. Springer-Verlag, Lecture Notes in Artificial Intelligence, Rome.",
983
- "links": null
984
- },
985
- "BIBREF12": {
986
- "ref_id": "b12",
987
- "title": "Distributional clustering of English words",
988
- "authors": [
989
- {
990
- "first": "Fernando",
991
- "middle": [],
992
- "last": "Pereira",
993
- "suffix": ""
994
- },
995
- {
996
- "first": "Naftali",
997
- "middle": [],
998
- "last": "Tishby",
999
- "suffix": ""
1000
- },
1001
- {
1002
- "first": "Lillian",
1003
- "middle": [],
1004
- "last": "Lee",
1005
- "suffix": ""
1006
- }
1007
- ],
1008
- "year": 1993,
1009
- "venue": "Proceedings of the 31st Annual Meeting of the Assn. for Computational Linguistics",
1010
- "volume": "",
1011
- "issue": "",
1012
- "pages": "183--190",
1013
- "other_ids": {},
1014
- "num": null,
1015
- "urls": [],
1016
- "raw_text": "Fernando Pereira, Naftali Tishby, and Lillian Lee. 1993. Distributional clustering of En- glish words. In Proceedings of the 31st An- nual Meeting of the Assn. for Computational Linguistics, pages 183-190, Columbus, OH, June.",
1017
- "links": null
1018
- },
1019
- "BIBREF13": {
1020
- "ref_id": "b13",
1021
- "title": "Learning dictionaries for information extraction by multi-level bootstrapping",
1022
- "authors": [
1023
- {
1024
- "first": "Ellen",
1025
- "middle": [],
1026
- "last": "Riloff",
1027
- "suffix": ""
1028
- },
1029
- {
1030
- "first": "Rosie",
1031
- "middle": [],
1032
- "last": "Jones",
1033
- "suffix": ""
1034
- }
1035
- ],
1036
- "year": 1993,
1037
- "venue": "Proceedings of Sixteenth National Conference on Artificial Intelligence (AAAI-99)",
1038
- "volume": "",
1039
- "issue": "",
1040
- "pages": "811--816",
1041
- "other_ids": {},
1042
- "num": null,
1043
- "urls": [],
1044
- "raw_text": "Ellen Riloff and Rosie Jones. 1999. Learn- ing dictionaries for information extraction by multi-level bootstrapping. In Proceedings of Sixteenth National Conference on Artificial Intelligence (AAAI-99), Orlando, Florida, Ellen Riloff. 1993. Automatically construct- ing a dictionary for information extraction tasks. In Proceedings of Eleventh National Conference on Artificial Intelligence (AAAI- 93), pages 811-816. The AAAI Press/MIT Press.",
1045
- "links": null
1046
- },
1047
- "BIBREF14": {
1048
- "ref_id": "b14",
1049
- "title": "Automatically generating extraction patterns from untagged text",
1050
- "authors": [
1051
- {
1052
- "first": "Ellen",
1053
- "middle": [],
1054
- "last": "Riloff",
1055
- "suffix": ""
1056
- }
1057
- ],
1058
- "year": 1996,
1059
- "venue": "Proceedings of Thirteenth National Conference on Artificial Intelligence (AAAL96)",
1060
- "volume": "",
1061
- "issue": "",
1062
- "pages": "1044--1049",
1063
- "other_ids": {},
1064
- "num": null,
1065
- "urls": [],
1066
- "raw_text": "Ellen Riloff. 1996. Automatically generating extraction patterns from untagged text. In Proceedings of Thirteenth National Confer- ence on Artificial Intelligence (AAAL96), pages 1044-1049. The AAAI Press/MIT Press.",
1067
- "links": null
1068
- },
1069
- "BIBREF15": {
1070
- "ref_id": "b15",
1071
- "title": "A non-projective dependency parser",
1072
- "authors": [
1073
- {
1074
- "first": "Pasi",
1075
- "middle": [],
1076
- "last": "Tapanainen",
1077
- "suffix": ""
1078
- },
1079
- {
1080
- "first": "Timo",
1081
- "middle": [],
1082
- "last": "J~rvinen",
1083
- "suffix": ""
1084
- }
1085
- ],
1086
- "year": 1997,
1087
- "venue": "Proceedings of the 5th Conference on Applied Natural Language Processing",
1088
- "volume": "",
1089
- "issue": "",
1090
- "pages": "64--71",
1091
- "other_ids": {},
1092
- "num": null,
1093
- "urls": [],
1094
- "raw_text": "Pasi Tapanainen and Timo J~rvinen. 1997. A non-projective dependency parser. In Pro- ceedings of the 5th Conference on Applied Natural Language Processing, pages 64-71, Washington, D.C., April. ACL.",
1095
- "links": null
1096
- },
1097
- "BIBREF16": {
1098
- "ref_id": "b16",
1099
- "title": "Customization of information extraction systems",
1100
- "authors": [
1101
- {
1102
- "first": "Roman",
1103
- "middle": [],
1104
- "last": "Yangarber",
1105
- "suffix": ""
1106
- },
1107
- {
1108
- "first": "Ralph",
1109
- "middle": [],
1110
- "last": "Grishman",
1111
- "suffix": ""
1112
- }
1113
- ],
1114
- "year": 1997,
1115
- "venue": "International Workshop on Lexically Driven Information Extraction",
1116
- "volume": "",
1117
- "issue": "",
1118
- "pages": "1--11",
1119
- "other_ids": {},
1120
- "num": null,
1121
- "urls": [],
1122
- "raw_text": "Roman Yangarber and Ralph Grishman. 1997. Customization of information extraction sys- tems. In Paola Velardi, editor, International Workshop on Lexically Driven Information Extraction, pages 1-11, Frascati, Italy, July. Universit?~ di Roma.",
1123
- "links": null
1124
- }
1125
- },
1126
- "ref_entries": {
1127
- "FIGREF0": {
1128
- "text": "a large corpus of un-annotated and un-classified documents in the domain;",
1129
- "num": null,
1130
- "type_str": "figure",
1131
- "uris": null
1132
- },
1133
- "FIGREF1": {
1134
- "text": "Here C-Company and C-Person denote semantic classes containing named entities of the corresponding semantic types. C-Appoirlt denotes a class of verbs, containing four verbs { appoint, elect, promote, name}; C-Resign = { resign, depart, quit, step-down }. During a single iteration, we compute the score s, L(p), for each candidate pattern p: L(p) = Pc(P)\" log {H A R] (1) where R denotes the relevant subset, and H --H(p) the documents matching p, as above, and [gnR[",
1135
- "num": null,
1136
- "type_str": "figure",
1137
- "uris": null
1138
- },
1139
- "FIGREF2": {
1140
- "text": "....... ~ .......... ~ ......... ~ ........... ~ .......... ~ .......... ~ ......... ...... iiiiiiiiiiiiiilEi ........... ........ /... .......... ~ .......... '.\" ........ \": .......... ~ ........... r .......... ~ .......... ! .........",
1141
- "num": null,
1142
- "type_str": "figure",
1143
- "uris": null
1144
- },
1145
- "FIGREF3": {
1146
- "text": "Figure h Recall/Precision curves for Management Succession",
1147
- "num": null,
1148
- "type_str": "figure",
1149
- "uris": null
1150
- },
1151
- "FIGREF4": {
1152
- "text": "7 ................................................................... ~\" ............... ... ~ ........ , ........ ~ ....... ~ ........ ~ ....... ~ ........ ! ........ : ........ 4 .......",
1153
- "num": null,
1154
- "type_str": "figure",
1155
- "uris": null
1156
- },
1157
- "TABREF0": {
1158
- "text": "....... ........ ............... ---! ........ ....... ...... i ........ i ........ [ ....... ? ........ f ............ T ....... ...... J ........ i ........ i. ....... i ........ ~ ....... .; ........... ..: ........ i....: 0 6 ........................ .' 7 ....... ' ........ ~ ....... ~ ........ ~ ....................... ..... '. ........ , ........ ~\" ....... , ........ ~ ....... ~ ........ * ....... \"=. ........ , ....... ...... e ........ i ........ ! ........ ~ ........ ~... ' ........ ! ........ i .......",
1159
- "html": null,
1160
- "content": "<table><tr><td/><td/><td>II :</td><td>I :</td><td>I .</td><td>I ,</td><td/><td>I .</td><td/><td>I :</td><td>I \u2022</td><td>I</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>i</td><td>.</td><td>i</td><td>~</td><td>iB</td><td>i</td></tr><tr><td colspan=\"2\">0.9</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>0.9</td></tr><tr><td colspan=\"2\">0.8</td><td>i</td><td>i</td><td>i</td><td>::</td><td>i</td><td>i</td><td>i</td><td>i</td><td>i</td><td>~</td></tr><tr><td>0</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">0.7</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>(I)</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>i</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td>i i</td><td/><td/><td/><td/><td/><td/></tr><tr><td>0</td><td>. 5</td><td>..i</td><td/><td>z</td><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">0.4</td><td>i</td><td/><td>I</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>0</td><td colspan=\"8\">0.10.20.30.40.50.60.70.80.9</td><td/><td>1</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">Recall</td><td/><td/><td/><td/></tr><tr><td/><td/><td colspan=\"7\">Figure 2: Precision vs. Recall</td><td/><td/></tr></table>",
1161
- "num": null,
1162
- "type_str": "table"
1163
- }
1164
- }
1165
- }
1166
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1040.json DELETED
@@ -1,882 +0,0 @@
1
- {
2
- "paper_id": "A00-1040",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:23.469583Z"
6
- },
7
- "title": "Using Corpus-derived Name Lists for Named Entity Recognition",
8
- "authors": [
9
- {
10
- "first": "Mark",
11
- "middle": [],
12
- "last": "Stevenson",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Sheffield Regent Court",
17
- "location": {
18
- "addrLine": "211 Portobello Street",
19
- "postCode": "S1 4DP",
20
- "settlement": "Sheffield",
21
- "country": "United Kingdom"
22
- }
23
- },
24
- "email": ""
25
- },
26
- {
27
- "first": "Robert",
28
- "middle": [],
29
- "last": "Gaizauskas",
30
- "suffix": "",
31
- "affiliation": {
32
- "laboratory": "",
33
- "institution": "University of Sheffield Regent Court",
34
- "location": {
35
- "addrLine": "211 Portobello Street",
36
- "postCode": "S1 4DP",
37
- "settlement": "Sheffield",
38
- "country": "United Kingdom"
39
- }
40
- },
41
- "email": ""
42
- }
43
- ],
44
- "year": "",
45
- "venue": null,
46
- "identifiers": {},
47
- "abstract": "This paper describes experiments to establish the performance of a named entity recognition system which builds categorized lists of names from manually annotated training data. Names in text are then identified using only these lists. This approach does not perform as well as state-of-the-art named entity recognition systems. However, we then show that by using simple filtering techniques for improving the automatically acquired lists, substantial performance benefits can be achieved, with resulting Fmeasure scores of 87% on a standard test set. These results provide a baseline against which the contribution of more sophisticated supervised learning techniques for NE recognition should be measured.",
48
- "pdf_parse": {
49
- "paper_id": "A00-1040",
50
- "_pdf_hash": "",
51
- "abstract": [
52
- {
53
- "text": "This paper describes experiments to establish the performance of a named entity recognition system which builds categorized lists of names from manually annotated training data. Names in text are then identified using only these lists. This approach does not perform as well as state-of-the-art named entity recognition systems. However, we then show that by using simple filtering techniques for improving the automatically acquired lists, substantial performance benefits can be achieved, with resulting Fmeasure scores of 87% on a standard test set. These results provide a baseline against which the contribution of more sophisticated supervised learning techniques for NE recognition should be measured.",
54
- "cite_spans": [],
55
- "ref_spans": [],
56
- "eq_spans": [],
57
- "section": "Abstract",
58
- "sec_num": null
59
- }
60
- ],
61
- "body_text": [
62
- {
63
- "text": "Named entity (NE) recognition is the process of identifying and categorising names in text. Systems which have attempted the NE task have, in general, made use of lists of common names to provide clues. Name lists provide an extremely efficient way of recognising names, as the only processing required is to match the name pattern in the list against the text and no expensive advanced processing such as full text parsing is required. However, name lists are a naive method for recognising names. McDonald (1996) defines internal and external evidence in the NE task. The first is found within the name string itself, while the second is gathered from its context. For example, in the sentence \"President Washington chopped the tree\" the word \"President\" is clear external evidence that \"Washington\" denotes a person. In this case internal evidence from the name cannot conclusively tell us whether \"Washington\" is a person or a location (\"Washington, DC\"). A NE system based solely on lists of names makes use of only internal evidence and examples such as this demonstrate the limitations of this knowledge source.",
64
- "cite_spans": [
65
- {
66
- "start": 499,
67
- "end": 514,
68
- "text": "McDonald (1996)",
69
- "ref_id": "BIBREF3"
70
- }
71
- ],
72
- "ref_spans": [],
73
- "eq_spans": [],
74
- "section": "Introduction",
75
- "sec_num": "1"
76
- },
77
- {
78
- "text": "Despite these limitations, many NE systems use extensive lists of names. Krupke and Hausman (1998) made extensive use of name lists in their system. They found that reducing their size by more than 90% had little effect on performance, conversely adding just 42 entries led to improved results. This implies that the quality of list entries is a more important factor in their effectiveness than the total number of entries. Mikheev et al. (1999) experimented with different types of lists in an NE system entered for MUC7 (MUC, 1998) . They concluded that small lists of carefully selected names are as effective as more complete lists, a result consistent with Krupke and Hausman. However, both studies altered name lists within a larger NE system and it is difficult to tell whether the consistency of performance is due to the changes in lists or extra, external, evidence being used to balance against the loss of internal evidence.",
79
- "cite_spans": [
80
- {
81
- "start": 73,
82
- "end": 98,
83
- "text": "Krupke and Hausman (1998)",
84
- "ref_id": "BIBREF2"
85
- },
86
- {
87
- "start": 425,
88
- "end": 446,
89
- "text": "Mikheev et al. (1999)",
90
- "ref_id": "BIBREF4"
91
- },
92
- {
93
- "start": 523,
94
- "end": 534,
95
- "text": "(MUC, 1998)",
96
- "ref_id": null
97
- }
98
- ],
99
- "ref_spans": [],
100
- "eq_spans": [],
101
- "section": "Introduction",
102
- "sec_num": "1"
103
- },
104
- {
105
- "text": "In this paper a NE system which uses only the internal evidence contained in lists of names is presented. Section 3 explains how such lists can be automatically generated from annotated text. Sections 4 and 5 describe experiments in which these corpusgenerated lists are applied and their performance compared against hand-crafted lists. In the next section the NE task is described in further detail.",
106
- "cite_spans": [],
107
- "ref_spans": [],
108
- "eq_spans": [],
109
- "section": "Introduction",
110
- "sec_num": "1"
111
- },
112
- {
113
- "text": "The NE task itself was first introduced as part of the MUC6 (MUC, 1995) evaluation exercise and was continued in MUC7 (MUC, 1998) . This formulation of the NE task defines seven types of NE: PERSON, ORGANIZATION, LOCATION, DATE, TIME, MONEY and PERCENT. Figure 1 shows a short text marked up in SGML with NEs in the MUC style.",
114
- "cite_spans": [
115
- {
116
- "start": 118,
117
- "end": 129,
118
- "text": "(MUC, 1998)",
119
- "ref_id": null
120
- }
121
- ],
122
- "ref_spans": [
123
- {
124
- "start": 254,
125
- "end": 262,
126
- "text": "Figure 1",
127
- "ref_id": null
128
- }
129
- ],
130
- "eq_spans": [],
131
- "section": "NE Recognition of Broadcast News",
132
- "sec_num": "2.1"
133
- },
134
- {
135
- "text": "The task was duplicated for the DARPA/NIST HUB4 evaluation exercise (Chinchor et al., 1998) but this time the corpus to be processed consisted of single case transcribed speech, rather than mixed case newswire text. Participants were asked to carry out NE recognition on North American broadcast news stories recorded from radio and television and processed by automatic speech recognition (ASR) software. The participants were provided with a training corpus consisting of around 32,000 words of transcribed broadcast news stories from 1997 annotated with NEs. Participants used these text to \"It's a chance to think about first-level questions,\" said Ms. <enamex type=\"PERS0N\">Cohn<enamex>, a partner in the <enamex type=\"0RGANIZATION\">McGlashan Sarrail<enamex> firm in <enamex type=\"L0CATION\">San Mateo<enamex>, <enamex type=\"L0CATION\">Calif.<enamex> Figure 1 : Text with MUC-style NE's marked develop their systems and were then provided with new, unannotated texts, consisting of transcribed broadcast news from 1998 which they were given a short time to annotate using their systems and return. Participants are not given access to the evaluation data while developing their systems.",
136
- "cite_spans": [
137
- {
138
- "start": 68,
139
- "end": 91,
140
- "text": "(Chinchor et al., 1998)",
141
- "ref_id": "BIBREF1"
142
- }
143
- ],
144
- "ref_spans": [
145
- {
146
- "start": 854,
147
- "end": 862,
148
- "text": "Figure 1",
149
- "ref_id": null
150
- }
151
- ],
152
- "eq_spans": [],
153
- "section": "NE Recognition of Broadcast News",
154
- "sec_num": "2.1"
155
- },
156
- {
157
- "text": "After the evaluation, BBN, one of the participants, released a corpus of 1 million words which they had manually annotated to provide their system with more training data. Through the remainder of this paper we refer to the HUB4 training data provided by DARPA/NIST as the SNORT_TRAIN corpus and the union of this with the BBN data as the LONG_TRAIN corpus. The data used for the 1998 HUB4 evaluation was kept blind, we did not examine the text themselves, and shall be referred to as the TEST corpus.",
158
- "cite_spans": [],
159
- "ref_spans": [],
160
- "eq_spans": [],
161
- "section": "NE Recognition of Broadcast News",
162
- "sec_num": "2.1"
163
- },
164
- {
165
- "text": "The systems were evaluated in terms of the complementary precision (P) and recall (R) metrics. Briefly, precision is the proportion of names proposed by a system which are true names while recall is the proportion of the true names which are actually identified. These metrics are often combined using a weighted harmonic called the F-measure (F) calculated according to formula 1 where fl is a weighting constant often set to 1. A full explanation of these metrics is provided by van Rijsbergen (1979) .",
166
- "cite_spans": [
167
- {
168
- "start": 496,
169
- "end": 502,
170
- "text": "(1979)",
171
- "ref_id": null
172
- }
173
- ],
174
- "ref_spans": [],
175
- "eq_spans": [],
176
- "section": "NE Recognition of Broadcast News",
177
- "sec_num": "2.1"
178
- },
179
- {
180
- "text": "F= (f~+l) xPxR (fl \u00d7 P) + R (1)",
181
- "cite_spans": [],
182
- "ref_spans": [],
183
- "eq_spans": [],
184
- "section": "NE Recognition of Broadcast News",
185
- "sec_num": "2.1"
186
- },
187
- {
188
- "text": "The best performing system in the MUC7 exercise was produced by the Language Technology Group of Edinburgh University (Mikheev et al., 1999) . This achieved an F-measure of 93.39% (broken down as a precision of 95% and 92% recall). In HUB4 BBN (Miller et al., 1999) produced the best scoring system which achieved an F-measure of 90.56% (precision 91%, recall 90%) on the manually transcribed test data.",
189
- "cite_spans": [
190
- {
191
- "start": 118,
192
- "end": 140,
193
- "text": "(Mikheev et al., 1999)",
194
- "ref_id": "BIBREF4"
195
- },
196
- {
197
- "start": 244,
198
- "end": 265,
199
- "text": "(Miller et al., 1999)",
200
- "ref_id": "BIBREF5"
201
- }
202
- ],
203
- "ref_spans": [],
204
- "eq_spans": [],
205
- "section": "NE Recognition of Broadcast News",
206
- "sec_num": "2.1"
207
- },
208
- {
209
- "text": "The NE system used in this paper is based on Sheffield's LaSIE system (Wakao et al., 1996) , versions of which have participated in MUC and HUB4 evaluation exercises (Renals et al., 1999) . The system identifies names using a process consisting of four main modules:",
210
- "cite_spans": [
211
- {
212
- "start": 70,
213
- "end": 90,
214
- "text": "(Wakao et al., 1996)",
215
- "ref_id": "BIBREF10"
216
- },
217
- {
218
- "start": 166,
219
- "end": 187,
220
- "text": "(Renals et al., 1999)",
221
- "ref_id": "BIBREF8"
222
- }
223
- ],
224
- "ref_spans": [],
225
- "eq_spans": [],
226
- "section": "A Full NE system",
227
- "sec_num": "2.2"
228
- },
229
- {
230
- "text": "List Lookup This module consults several lists of likely names and name cues, marking each oc-currence in the input text. The name lists include lists of organisations, locations and person first names and the name cue lists of titles (eg. \"Mister\", \"Lord\"), which are likely to precede person names, and company designators (eg. \"Limited\" or \"Incorporated\"), which are likely to follow company names.",
231
- "cite_spans": [],
232
- "ref_spans": [],
233
- "eq_spans": [],
234
- "section": "A Full NE system",
235
- "sec_num": "2.2"
236
- },
237
- {
238
- "text": "Part of speech tagger The text is the part of speech tagged using the Brill tagger (Brill, 1992) . This tags some tokens as \"proper name\" but does not attempt to assign them to a NE class (eg. PERSON, LOCATION).",
239
- "cite_spans": [
240
- {
241
- "start": 83,
242
- "end": 96,
243
- "text": "(Brill, 1992)",
244
- "ref_id": "BIBREF0"
245
- }
246
- ],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "A Full NE system",
250
- "sec_num": "2.2"
251
- },
252
- {
253
- "text": "Name parsing Next the text is parsed using a collection of specialised NE grammars. The grammar rules identify sequences of part of speech tags as added by the List Lookup and Part of speech tagger modules. For example, there is a rule which says that a phrase consisting of a person first name followed by a word part of speech tagged as a proper noun is a person name. Namematching The names identified so far in the text are compared against all unidentified sequences of proper nouns produced by the part of speech tagger. Such sequences form candidate NEs and a set of heuristics is used to determine whether any such candidate names match any of those already identified. For example one such heuristics says that if a person is identified with a title (eg. \"President Clinton\") then any occurrences without the title are also likely to be person names '(so \"Clinton\" on it own would also be tagged as a person name).",
254
- "cite_spans": [],
255
- "ref_spans": [],
256
- "eq_spans": [],
257
- "section": "A Full NE system",
258
- "sec_num": "2.2"
259
- },
260
- {
261
- "text": "For the experiments described in this paper a restricted version of the system which used only the List Lookup module was constructed. The list lookup mechanism marks all words contained in any of the name lists and each is proposed as a NE. Any string occurring in more than one list is assigned the category form the first list in which it was found, although this did not occur in any of the sets of lists used in the experiments described here.",
262
- "cite_spans": [],
263
- "ref_spans": [],
264
- "eq_spans": [],
265
- "section": "A Full NE system",
266
- "sec_num": "2.2"
267
- },
268
- {
269
- "text": "List Generation The List Lookup module uses a set of handcrafted lists originally created for the MUC6 evaluation. They consisted of lists of names from the gazetteers provided for that competition, supplemented by manually added entries. These lists evolved for the MUC7 competition with new entries and lists being added. For HUB4 we used a selection of these lists, again manually supplementing them where necessary. These lists included lists of companies, organisations (such as government departments), countries and continents, cities, regions (such as US states) and person first names as well as company designators and person titles. We speculate that this ad hoc, evolutionary, approach to creating name lists is quite common amongst systems which perform the NE task.",
270
- "cite_spans": [],
271
- "ref_spans": [],
272
- "eq_spans": [],
273
- "section": "3",
274
- "sec_num": null
275
- },
276
- {
277
- "text": "In order to compare this approach against a simple system which gathers together all the names occurring in NE annotated training text, a program was implemented to analyse text annotated in the MUC SGML style (see Figure 1 ) and create lists for each NE type found. For example, given the NE <enamex type=\"LOCATION\">SAN MATE0<enamex> an entry SAN MATE0 would be added a list of locations.",
278
- "cite_spans": [],
279
- "ref_spans": [
280
- {
281
- "start": 215,
282
- "end": 223,
283
- "text": "Figure 1",
284
- "ref_id": null
285
- }
286
- ],
287
- "eq_spans": [],
288
- "section": "3",
289
- "sec_num": null
290
- },
291
- {
292
- "text": "This simple approach is certainly acceptable for the LOCATION, ORGANIZATION and, to a more limited extent, PERSON classes. It is less applicable to the remaining classes of names (DATE, TIME, MONEY and PERCENT) because these are most easily recognised by their grammatical structure. For example, there is a rule in the NE grammar which says a number followed by a currency unit is as instance of the MONEY name class-eg. FIFTY THREE DOLLARS, FIVE MILLION ECU. According to Przbocki et al. (1999) 88% of names occurring in broadcast news text fall into one of the LOCATION, ORGANIZATION and PERSON categories.",
293
- "cite_spans": [
294
- {
295
- "start": 474,
296
- "end": 496,
297
- "text": "Przbocki et al. (1999)",
298
- "ref_id": "BIBREF7"
299
- }
300
- ],
301
- "ref_spans": [],
302
- "eq_spans": [],
303
- "section": "3",
304
- "sec_num": null
305
- },
306
- {
307
- "text": "Two sets of lists were derived, one from the SHORT_TRAIN corpus and a second from the LONG_TRAIN texts. The lengths of the lists produced are shown in The SHORT_TRAIN and LONG_TRAIN lists were each applied in two ways, alone and appended to the original, manually-created, lists. In addition, we computed the performance obtained using only the original lists for comparison. Although both sets of lists were derived using the SHORT_TRAIN data (since the LONG_TRAIN corpus includes SHORT_TRAIN), we still compute the performance of the SHORT_TRAIN lists on that corpus since this provides some insight into the best possible performance which can be expected from NE recognition using a simple list lookup mechanism. No scores were computed for the LONG_TRAIN lists against the SHORT_TRAIN corpus since this is unlikely to provide more information. Table 2 shows the results obtained when the SHORT_TRAIN lists were applied to that corpus. This first experiment was designed to determine how well the list lookup approach would perform given lists compiled directly from the corpus to which they are being applied. Only PERSON, LOCATION and ORGANIZATION name classes are considered since they form the majority of names occurring in the HUB4 text. As was mentioned previously, the remaining categories of name are more easily recognised using the NE parser. For each configuration of lists the precision, recall and F-measure are calculated for the each name class both individually and together.",
308
- "cite_spans": [],
309
- "ref_spans": [
310
- {
311
- "start": 849,
312
- "end": 856,
313
- "text": "Table 2",
314
- "ref_id": "TABREF3"
315
- }
316
- ],
317
- "eq_spans": [],
318
- "section": "3",
319
- "sec_num": null
320
- },
321
- {
322
- "text": "We can see that the original lists performed reasonably well, scoring an F-measure of 79% overall. However, the corpus-based lists performed far better achieving high precision and perfect recall. We would expect the system to recognise every name in the text, since they are all in the lists, but perfect precision is unlikely as this would require that no word appeared as both a name and non-name or in more than one name class. Even bearing this in mind the calculated precision for the ORGANIZATION class of names is quite low. Analysis of the output showed that several words occurred as names a few times in the text but also as non-names more frequently. For example, \"police\" appeared 35 times but only once as an organisation; similarly \"finance\" and \"republican\" occur frequently but only as a name a few times. In fact, these three list entries account for 61 spuriously generated names, from a total of 86 for the ORGANIZATION class. The original lists do not include words which are likely to generate spurious entries and names like \"police\" would only be recognised when there was further evidence.",
323
- "cite_spans": [],
324
- "ref_spans": [],
325
- "eq_spans": [],
326
- "section": "3",
327
- "sec_num": null
328
- },
329
- {
330
- "text": "The SHORT_TRAIN lists contain all the names occurring in that text. When these lists are combined with the original system lists the observed recall remains 100% while the precision drops. The original system lists introduce more spurious entries, leading to a drop of 3% F-measure.",
331
- "cite_spans": [],
332
- "ref_spans": [],
333
- "eq_spans": [],
334
- "section": "3",
335
- "sec_num": null
336
- },
337
- {
338
- "text": "The results of applying the corpus-derived lists to the texts from which they were obtained show that, even under these circumstances, perfect results cannot be obtained. Table 3 shows a more meaningful evaluation; the SHORT_TRAIN lists are applied to the TEST corpus, an unseen text. The original system lists achieve an F-measure of 83% on this text and the corpus-derived lists perform 8% worse. However, the configuration of lists which performs best is the union of the original lists with those derived from the corpus. This out-performs each set of lists taken in isolation both overall and for each name category individually. This is clear evidence that the lists used by the system described could be improved with the addition of lists derived from annotated text. It is worth commenting on some of the results for individual classes of names in this experiment. We can see that the performance for the ORGANIZATION class actually increases when the corpus-based lists are used. This is partially because names which are made up from initials (eg. \"C. N. N.\" and \"B. B. C. \") are not generally recognised by the list lookup mechanism in our system, but are captured by the parser and so were not included in the original lists. However, it is also likely that the organisation list is lacking, at least to some level. More interestingly, there is a very noticeable drop in the performance for the PERSON class. The SHORT_TRAIN lists achieved an F-measure of 99% on that text but only 48% on the TEST text. In Section 2.1 we mentioned that the HUB4 training data consists of news stories from 1997, while the test data contains stories from 1998. We therefore suggest that the decrease in performance for the PERSON category demonstrates a general property of broadcast news: many person names mentioned are specific to a particular time period (eg. \"Monica Lewinksi\" and \"Rodney King\"). In contrast, the locations and organisations mentioned are more stable over time. Table 4 shows the performance obtained when the lists derived from LONG_TRAIN were applied to the TEST corpus. The corpus-derived lists perform significantly worse than the original system lists, showing a large drop in precision. This is to be expected since the lists derived from LONG_TRAIN contain all the names occurring in a large body of text and therefore contain many words and phrases which are not names in this text, but spuriously match nonnames. Although the F-measure result is worse than when the SHORT_TRAIN lists were used, the recall is higher showing that a higher proportion of the true names can be found by analysing a larger body of text. Combining the original and corpus-derived lists leads to a 1% improvement. Recall is noticeably improved compared with the original lists, however precision is lowered and this shows that the corpusderived lists introduce a large number of spurious names.",
339
- "cite_spans": [],
340
- "ref_spans": [
341
- {
342
- "start": 171,
343
- "end": 178,
344
- "text": "Table 3",
345
- "ref_id": "TABREF5"
346
- },
347
- {
348
- "start": 1980,
349
- "end": 1987,
350
- "text": "Table 4",
351
- "ref_id": "TABREF6"
352
- }
353
- ],
354
- "eq_spans": [],
355
- "section": "3",
356
- "sec_num": null
357
- },
358
- {
359
- "text": "From this first set of experiments it can be seen that perfect results will not be obtained even using lists contain all and only the names in a particular text, thus demonstrating the limitations of this naive approach to named entity recognition. We have also demonstrated that it is possible for the addition of corpus-derived lists to improve the performance of a NE recognition system based on gazetteers. However, this is not guaranteed and it appears that adding too many names without any restriction may actually lead to poorer results, as happened when the LONG_TRAIN lists were applied.",
360
- "cite_spans": [],
361
- "ref_spans": [],
362
- "eq_spans": [],
363
- "section": "3",
364
- "sec_num": null
365
- },
366
- {
367
- "text": "The results from our first set of experiments led us to question whether it is possible to restrict the entries being added to the lists in order to avoid those likely to generate spurious names. We now go on to describe some methods which can be used to identify and remove list entries which may generate spurious names.",
368
- "cite_spans": [],
369
- "ref_spans": [],
370
- "eq_spans": [],
371
- "section": "Filtering Lists",
372
- "sec_num": "5"
373
- },
374
- {
375
- "text": "Method 1: Dictionary Filtering The derived lists can be improved by removing items in the list which also occur as entries in a dictionary. We began by taking the Longman Dictionary of Contemporary Englisb (LDOCE) (Procter, 1978) and extracting a list of words it contained including all derived forms, for example pluralisation of nouns and different verb forms. This produced a list of 52,576 tokens which could be used to filter name lists.",
376
- "cite_spans": [
377
- {
378
- "start": 214,
379
- "end": 229,
380
- "text": "(Procter, 1978)",
381
- "ref_id": null
382
- }
383
- ],
384
- "ref_spans": [],
385
- "eq_spans": [],
386
- "section": "Filtering Lists",
387
- "sec_num": "5"
388
- },
389
- {
390
- "text": "Method 2: Probability Filtering The lists can be improved by removing names which occur more frequently in the corpus as non-names than names.",
391
- "cite_spans": [],
392
- "ref_spans": [],
393
- "eq_spans": [],
394
- "section": "Filtering Lists",
395
- "sec_num": "5"
396
- },
397
- {
398
- "text": "Another method for filtering lists was implemented, this time using the relative frequencies of phrases occurring as names and non-names. We can extract the probability that a phrase occurs as a name in the training corpus by dividing the number of times it occurs as a name by the total number of corpus occurrences. If this probability estimate is an accurate reflection of the name's behaviour in a new text we can use it to estimate the accuracy of adding that name to the list. Adding a name to a list will lead to a recall score of 1 for that name and a precision of Pr (where Pr is the probability value estimated from the training corpus) which implies an F-measure of ~.2Pr 1 Therefore the probabilities can be used to filter out candidate list items which imply low F-measure scores. We chose names whose corpus probabilities produced an F-measure lower than the overall score for the list. The LONG_TRAIN lists scored an F-measure of 73% on the unseen, TEST, data (see Table 4 ). Hence a filtering probability of 73% was used for these lists, with the corpus statistics gathered from LONG_TRAIN.",
399
- "cite_spans": [],
400
- "ref_spans": [
401
- {
402
- "start": 980,
403
- "end": 987,
404
- "text": "Table 4",
405
- "ref_id": "TABREF6"
406
- }
407
- ],
408
- "eq_spans": [],
409
- "section": "Filtering Lists",
410
- "sec_num": "5"
411
- },
412
- {
413
- "text": "Method 3: Combining Filters These filtering strategies can be improved by combining them.",
414
- "cite_spans": [],
415
- "ref_spans": [],
416
- "eq_spans": [],
417
- "section": "Filtering Lists",
418
- "sec_num": "5"
419
- },
420
- {
421
- "text": "We also combined these two filtering strategies in two ways. Firstly, all names which appeared in the lexicon or whose corpus probability is below the filtering probability are removed from the lists. This is dubbed the \"or combination\". The second combination strategy removes any names which appear in the lexicon and occur with a corpus frequency below the filtering probability are removed. This second strategy is called the \"and combination\". These filtering strategies were applied to the LONG_TRAIN lists. The lengths of the lists produced are shown in Table 5 .",
422
- "cite_spans": [],
423
- "ref_spans": [
424
- {
425
- "start": 561,
426
- "end": 568,
427
- "text": "Table 5",
428
- "ref_id": "TABREF7"
429
- }
430
- ],
431
- "eq_spans": [],
432
- "section": "Filtering Lists",
433
- "sec_num": "5"
434
- },
435
- {
436
- "text": "The strategies were evaluated by applying the filtered LONG_TRAIN lists to the TEST corpus, the results of which are shown in Table 6 . There is an 1Analysis of the behaviour of the function f(Pr) --2P~ l+Pr shows that it does not deviate too far from the value of Pr (ie. .f(Pr) ~ Pr) and so there is an argument for simply filtering the lists using the raw probabilities. improvement in performance of 4% F-measure when lists filtered using the \"and\" combination are used compared to the original, hand-crafted, lists. Although this approach removes only 108 items from all the lists there is a 14% F-measure improvement over the un-filtered lists. Each filtering strategy used individually demonstrates a lower level of improvement: the dictionary filtered lists 12% and the probability filtered 10%.",
437
- "cite_spans": [],
438
- "ref_spans": [
439
- {
440
- "start": 126,
441
- "end": 133,
442
- "text": "Table 6",
443
- "ref_id": "TABREF8"
444
- }
445
- ],
446
- "eq_spans": [],
447
- "section": "Filtering Lists",
448
- "sec_num": "5"
449
- },
450
- {
451
- "text": "The \"and\" combination is more successful because filtering lists using the dictionary alone removes many names we would like to keep (eg. country names are listed in LDOCE) but many of these are retained since both filters must agree. These experiments demonstrate that appropriately filtered corpus-derived lists can be more effective for NE recognition than hand-crafted lists. The difference between the observed performance of our simple method and those reported for the best-performing HUB4 system is perhaps lower that one may expect. The BBN system achieved 90.56% overall, and about 92% when only the PERSON, LOCATION and ORGANIZATION name classes are considered, 5% more than the method reported here. This difference is perhaps lower than we might expect given that name lists use only internal evidence (in the sense of Section 1). This indicates that simple application of the information contained in manually annotated NE training data can contribute massively to the overall performance of a system. They also provide a baseline against which the contribution of more sophisticated supervised learning techniques for NE recognition should be measured.",
452
- "cite_spans": [],
453
- "ref_spans": [],
454
- "eq_spans": [],
455
- "section": "Filtering Lists",
456
- "sec_num": "5"
457
- },
458
- {
459
- "text": "Un- Filtered Dictionary Probability List Filtered Filtered 2,157 1,978 2,000 3,947 3,769 3,235 1,489 1,412 1,364 Or Combined 1,964 3,522 1,382 And Combined 2,049 3,809 1,449 ",
460
- "cite_spans": [],
461
- "ref_spans": [
462
- {
463
- "start": 4,
464
- "end": 190,
465
- "text": "Filtered Dictionary Probability List Filtered Filtered 2,157 1,978 2,000 3,947 3,769 3,235 1,489 1,412 1,364 Or Combined 1,964 3,522 1,382 And Combined 2,049 3,809",
466
- "ref_id": "TABREF0"
467
- }
468
- ],
469
- "eq_spans": [],
470
- "section": "NE Category ORGANIZATION PERSON LOCATION",
471
- "sec_num": null
472
- },
473
- {
474
- "text": "This paper explored the role of lists of names in NE recognition, comparing hand-crafted and corpusderived lists. It was shown that, under certain conditions, corpus-derived lists outperform hand-crafted ones. Also, supplementing hand-crafted lists with corpus-based ones often improves their performance. The reported method was more effective for the ORGANIZATION and LOCATION classes of names than for PERSON, which was attributed to the fact that reportage of these names does not change as much over time in broadcast news.",
475
- "cite_spans": [],
476
- "ref_spans": [],
477
- "eq_spans": [],
478
- "section": "Conclusion",
479
- "sec_num": "6"
480
- },
481
- {
482
- "text": "The method reported here achieves 87% Fmeasure, 5% less than the best performing system in the HUB4 evaluation. However, it should be remembered that this technique uses only a simple application of internal evidence.",
483
- "cite_spans": [],
484
- "ref_spans": [],
485
- "eq_spans": [],
486
- "section": "Conclusion",
487
- "sec_num": "6"
488
- }
489
- ],
490
- "back_matter": [],
491
- "bib_entries": {
492
- "BIBREF0": {
493
- "ref_id": "b0",
494
- "title": "A simple rule-based part of speech tagger",
495
- "authors": [
496
- {
497
- "first": "E",
498
- "middle": [],
499
- "last": "Brill",
500
- "suffix": ""
501
- }
502
- ],
503
- "year": 1992,
504
- "venue": "Proceeding of the Third Conference on Applied Natural Language Processing (ANLP-92)",
505
- "volume": "",
506
- "issue": "",
507
- "pages": "152--155",
508
- "other_ids": {},
509
- "num": null,
510
- "urls": [],
511
- "raw_text": "E. Brill. 1992. A simple rule-based part of speech tagger. In Proceeding of the Third Conference on Applied Natural Language Processing (ANLP-92), pages 152-155, Trento, Italy.",
512
- "links": null
513
- },
514
- "BIBREF1": {
515
- "ref_id": "b1",
516
- "title": "Hub-4 named entity task definition (version 4.8)",
517
- "authors": [
518
- {
519
- "first": "N",
520
- "middle": [],
521
- "last": "Chinchor",
522
- "suffix": ""
523
- },
524
- {
525
- "first": "P",
526
- "middle": [],
527
- "last": "Robinson",
528
- "suffix": ""
529
- },
530
- {
531
- "first": "E",
532
- "middle": [],
533
- "last": "Brown",
534
- "suffix": ""
535
- }
536
- ],
537
- "year": 1998,
538
- "venue": "",
539
- "volume": "",
540
- "issue": "",
541
- "pages": "",
542
- "other_ids": {},
543
- "num": null,
544
- "urls": [],
545
- "raw_text": "N. Chinchor, P. Robinson, and E. Brown. 1998. Hub-4 named entity task defini- tion (version 4.8). Technical report, SAIC. http ://www. nist. gov/speech/hub4_98.",
546
- "links": null
547
- },
548
- "BIBREF2": {
549
- "ref_id": "b2",
550
- "title": "Isoquest Inc: description of the NetOwl(TM) extractor system as used for MUC-7",
551
- "authors": [
552
- {
553
- "first": "G",
554
- "middle": [],
555
- "last": "Krupke",
556
- "suffix": ""
557
- },
558
- {
559
- "first": "K",
560
- "middle": [],
561
- "last": "Hausman",
562
- "suffix": ""
563
- }
564
- ],
565
- "year": 1998,
566
- "venue": "Message Understanding Conference Proceedings: MUC 7",
567
- "volume": "",
568
- "issue": "",
569
- "pages": "",
570
- "other_ids": {},
571
- "num": null,
572
- "urls": [],
573
- "raw_text": "G. Krupke and K. Hausman. 1998. Isoquest Inc: description of the NetOwl(TM) extractor system as used for MUC-7. In Message Understanding Conference Proceedings: MUC 7. Available from http ://www.muc. saic. com.",
574
- "links": null
575
- },
576
- "BIBREF3": {
577
- "ref_id": "b3",
578
- "title": "Internal and external evidence in the identification and semantic categorization of proper names",
579
- "authors": [
580
- {
581
- "first": "D",
582
- "middle": [],
583
- "last": "Mcdonald",
584
- "suffix": ""
585
- }
586
- ],
587
- "year": 1996,
588
- "venue": "Corpus Processing for Lexical Aquisition",
589
- "volume": "",
590
- "issue": "",
591
- "pages": "21--39",
592
- "other_ids": {},
593
- "num": null,
594
- "urls": [],
595
- "raw_text": "D. McDonald. 1996. Internal and external evid- ence in the identification and semantic categor- ization of proper names. In B. Boguraev and J. Pustejovsky, editors, Corpus Processing for Lexical Aquisition, chapter 2, pages 21-39. MIT Press, Cambridge, MA.",
596
- "links": null
597
- },
598
- "BIBREF4": {
599
- "ref_id": "b4",
600
- "title": "Named entity recognition without gazeteers",
601
- "authors": [
602
- {
603
- "first": "A",
604
- "middle": [],
605
- "last": "Mikheev",
606
- "suffix": ""
607
- },
608
- {
609
- "first": "M",
610
- "middle": [],
611
- "last": "Moens",
612
- "suffix": ""
613
- },
614
- {
615
- "first": "C",
616
- "middle": [],
617
- "last": "Grovel",
618
- "suffix": ""
619
- }
620
- ],
621
- "year": 1999,
622
- "venue": "Proceedings of the Ninth Conference of the European Chapter of the Association for Computational Linguistics",
623
- "volume": "",
624
- "issue": "",
625
- "pages": "1--8",
626
- "other_ids": {},
627
- "num": null,
628
- "urls": [],
629
- "raw_text": "A. Mikheev, M. Moens, and C. Grovel 1999. Named entity recognition without gazeteers. In Proceedings of the Ninth Conference of the European Chapter of the Association for Compu- tational Linguistics, pages 1-8, Bergen, Norway.",
630
- "links": null
631
- },
632
- "BIBREF5": {
633
- "ref_id": "b5",
634
- "title": "Named entity extraction from broadcast news",
635
- "authors": [
636
- {
637
- "first": "D",
638
- "middle": [],
639
- "last": "Miller",
640
- "suffix": ""
641
- },
642
- {
643
- "first": "R",
644
- "middle": [],
645
- "last": "Schwartz",
646
- "suffix": ""
647
- },
648
- {
649
- "first": "R",
650
- "middle": [],
651
- "last": "Weischedel",
652
- "suffix": ""
653
- },
654
- {
655
- "first": "R",
656
- "middle": [],
657
- "last": "Stone ; I-Ierndon",
658
- "suffix": ""
659
- },
660
- {
661
- "first": "Virginia",
662
- "middle": [],
663
- "last": "Muc",
664
- "suffix": ""
665
- }
666
- ],
667
- "year": 1995,
668
- "venue": "Proceedings of the Sixth Message Understanding Conference (MUC-6}",
669
- "volume": "",
670
- "issue": "",
671
- "pages": "37--40",
672
- "other_ids": {},
673
- "num": null,
674
- "urls": [],
675
- "raw_text": "D. Miller, R. Schwartz, R. Weischedel, and R. Stone. 1999. Named entity extraction from broadcast news. In Proceedings of the DARPA Broadcast News Workshop, pages 37-40, I-Ierndon, Virginia. MUC. 1995. Proceedings of the Sixth Message Un- derstanding Conference (MUC-6}, San Mateo, CA. Morgan Kaufmann.",
676
- "links": null
677
- },
678
- "BIBREF6": {
679
- "ref_id": "b6",
680
- "title": "Message Understanding Conference Proceedings: MUC7",
681
- "authors": [],
682
- "year": null,
683
- "venue": "",
684
- "volume": "",
685
- "issue": "",
686
- "pages": "",
687
- "other_ids": {},
688
- "num": null,
689
- "urls": [],
690
- "raw_text": "Message Understanding Conference Proceed- ings: MUC7. http ://www.muc. sale com.",
691
- "links": null
692
- },
693
- "BIBREF7": {
694
- "ref_id": "b7",
695
- "title": "HUB4 Information Extraction Evaluation",
696
- "authors": [
697
- {
698
- "first": "M",
699
- "middle": [],
700
- "last": "Przbocki",
701
- "suffix": ""
702
- },
703
- {
704
- "first": "J",
705
- "middle": [],
706
- "last": "Fiscus",
707
- "suffix": ""
708
- },
709
- {
710
- "first": "J",
711
- "middle": [],
712
- "last": "Garofolo",
713
- "suffix": ""
714
- },
715
- {
716
- "first": "D",
717
- "middle": [],
718
- "last": "Pallett",
719
- "suffix": ""
720
- }
721
- ],
722
- "year": 1998,
723
- "venue": "Proceedings of the DARPA Broadcast News Workshop",
724
- "volume": "",
725
- "issue": "",
726
- "pages": "13--18",
727
- "other_ids": {},
728
- "num": null,
729
- "urls": [],
730
- "raw_text": "M. Przbocki, J. Fiscus, J. Garofolo, and D. Pallett. 1999. 1998 HUB4 Information Extraction Eval- uation. In Proceedings of the DARPA Broadcast News Workshop, pages 13-18, Herndon, Virginia.",
731
- "links": null
732
- },
733
- "BIBREF8": {
734
- "ref_id": "b8",
735
- "title": "Baseline IE-NE Experimants Using the SPRACH/LASIE System",
736
- "authors": [
737
- {
738
- "first": "S",
739
- "middle": [],
740
- "last": "Renals",
741
- "suffix": ""
742
- },
743
- {
744
- "first": "Y",
745
- "middle": [],
746
- "last": "Gotoh",
747
- "suffix": ""
748
- },
749
- {
750
- "first": "R",
751
- "middle": [],
752
- "last": "Gaizausaks",
753
- "suffix": ""
754
- },
755
- {
756
- "first": "M",
757
- "middle": [],
758
- "last": "Stevenson",
759
- "suffix": ""
760
- }
761
- ],
762
- "year": 1999,
763
- "venue": "Proceedings of the DAPRA Broadcast News Workshop",
764
- "volume": "",
765
- "issue": "",
766
- "pages": "47--50",
767
- "other_ids": {},
768
- "num": null,
769
- "urls": [],
770
- "raw_text": "S. Renals, Y. Gotoh, R. Gaizausaks, and M. Steven- son. 1999. Baseline IE-NE Experimants Using the SPRACH/LASIE System. In Proceedings of the DAPRA Broadcast News Workshop, pages 47-50, Herndon, Virginia.",
771
- "links": null
772
- },
773
- "BIBREF9": {
774
- "ref_id": "b9",
775
- "title": "Information Retrieval",
776
- "authors": [
777
- {
778
- "first": "C",
779
- "middle": [],
780
- "last": "Van Rijsbergen",
781
- "suffix": ""
782
- }
783
- ],
784
- "year": 1979,
785
- "venue": "",
786
- "volume": "",
787
- "issue": "",
788
- "pages": "",
789
- "other_ids": {},
790
- "num": null,
791
- "urls": [],
792
- "raw_text": "C. van Rijsbergen. 1979. Information Retrieval. Butterworths, London.",
793
- "links": null
794
- },
795
- "BIBREF10": {
796
- "ref_id": "b10",
797
- "title": "Evaluation of an algorithm for the recognition and classification of proper names",
798
- "authors": [
799
- {
800
- "first": "T",
801
- "middle": [],
802
- "last": "Wakao",
803
- "suffix": ""
804
- },
805
- {
806
- "first": "R",
807
- "middle": [],
808
- "last": "Gaizauskas",
809
- "suffix": ""
810
- },
811
- {
812
- "first": "K",
813
- "middle": [],
814
- "last": "Humphreys",
815
- "suffix": ""
816
- }
817
- ],
818
- "year": 1996,
819
- "venue": "Proceedings of the 16th International Conference on Computational Linguistics (COLING-96)",
820
- "volume": "",
821
- "issue": "",
822
- "pages": "418--423",
823
- "other_ids": {},
824
- "num": null,
825
- "urls": [],
826
- "raw_text": "T. Wakao, R. Gaizauskas, and K. Humphreys. 1996. Evaluation of an algorithm for the recognition and classification of proper names. In Proceedings of the 16th International Conference on Computa- tional Linguistics (COLING-96), pages 418-423, Copenhagen, Denmark.",
827
- "links": null
828
- }
829
- },
830
- "ref_entries": {
831
- "TABREF0": {
832
- "content": "<table><tr><td/><td>Corpus</td><td/></tr><tr><td colspan=\"3\">Category SHORT_TRAIN LONG_TRAIN</td></tr><tr><td>ORGANIZATION</td><td>245</td><td>2,157</td></tr><tr><td>PERSON</td><td>252</td><td>3,947</td></tr><tr><td>LOCATION</td><td>230</td><td>1,489</td></tr></table>",
833
- "type_str": "table",
834
- "text": "",
835
- "html": null,
836
- "num": null
837
- },
838
- "TABREF1": {
839
- "content": "<table><tr><td/><td>: Lengths of lists derived from SHORT_TRAIN</td></tr><tr><td colspan=\"2\">and LONG_TRAIN corpora</td></tr><tr><td>4</td><td>List Application</td></tr></table>",
840
- "type_str": "table",
841
- "text": "",
842
- "html": null,
843
- "num": null
844
- },
845
- "TABREF3": {
846
- "content": "<table/>",
847
- "type_str": "table",
848
- "text": "SHORT_TRAIN lists applied to SHORT_TRAIN corpus",
849
- "html": null,
850
- "num": null
851
- },
852
- "TABREF5": {
853
- "content": "<table><tr><td>Lists</td><td/><td>Original</td><td/><td colspan=\"3\">LONG_TRAIN</td><td colspan=\"3\">Combination</td></tr><tr><td>Name Type</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td>ALL</td><td colspan=\"8\">86 79 83 64 86 73 62 91</td><td>74</td></tr><tr><td colspan=\"10\">ORGANIZATION 82 57 67 44 85 58 43 88 58</td></tr><tr><td>PERSON</td><td colspan=\"9\">77 80 78 55 75 63 53 86 66</td></tr><tr><td>LOCATION</td><td colspan=\"9\">93 89 91 87 92 89 84 94 89</td></tr></table>",
854
- "type_str": "table",
855
- "text": "SHORT_TRAIN ]ists applied to TEST corpus",
856
- "html": null,
857
- "num": null
858
- },
859
- "TABREF6": {
860
- "content": "<table/>",
861
- "type_str": "table",
862
- "text": "LONG_TRAIN lists applied to TEST corpus",
863
- "html": null,
864
- "num": null
865
- },
866
- "TABREF7": {
867
- "content": "<table><tr><td/><td/><td colspan=\"5\">Original t Un-Filtered</td><td colspan=\"6\">Dictionary I Probability</td><td/><td>Or</td><td/><td/><td>And</td><td/></tr><tr><td/><td/><td>Lists</td><td/><td/><td>Lists</td><td/><td/><td>Filtered</td><td/><td/><td>Filtered</td><td/><td colspan=\"6\">Combination Combination</td></tr><tr><td>Name Type</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td>ALL</td><td colspan=\"3\">86 79 83</td><td colspan=\"3\">64 86 73</td><td colspan=\"3\">95 79 85</td><td colspan=\"3\">96 73 83</td><td>95</td><td colspan=\"2\">73 83</td><td colspan=\"3\">93 81 87</td></tr><tr><td>ORGANIZATION</td><td colspan=\"3\">82 57 67</td><td colspan=\"3\">44 85 58</td><td colspan=\"3\">86 72 78</td><td colspan=\"3\">85 74 79</td><td colspan=\"3\">84 60 70</td><td colspan=\"3\">84 76 80</td></tr><tr><td>PERSON</td><td colspan=\"3\">77 80 78</td><td colspan=\"3\">55 75 63</td><td colspan=\"3\">96 66 78</td><td colspan=\"3\">96 40 56</td><td colspan=\"3\">100 49 66</td><td colspan=\"3\">94 66 78</td></tr><tr><td>LOCATION</td><td colspan=\"3\">93 89 91</td><td colspan=\"3\">87 92 89</td><td colspan=\"3\">98 89 93</td><td colspan=\"3\">97 90 93</td><td colspan=\"3\">98 90 94</td><td colspan=\"3\">97 92 94</td></tr></table>",
868
- "type_str": "table",
869
- "text": "Lengths of corpus-derived lists",
870
- "html": null,
871
- "num": null
872
- },
873
- "TABREF8": {
874
- "content": "<table/>",
875
- "type_str": "table",
876
- "text": "Filtered and un-filtered LONG_TRAIN lists applied to TEST corpus",
877
- "html": null,
878
- "num": null
879
- }
880
- }
881
- }
882
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1041.json DELETED
@@ -1,759 +0,0 @@
1
- {
2
- "paper_id": "A00-1041",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:05.912030Z"
6
- },
7
- "title": "Answer Extraction",
8
- "authors": [
9
- {
10
- "first": "Steven",
11
- "middle": [],
12
- "last": "Abney",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "AT&T Shannon Laboratory",
16
- "institution": "",
17
- "location": {
18
- "addrLine": "180 Park Ave. Florharn Park",
19
- "postCode": "07932",
20
- "region": "NJ"
21
- }
22
- },
23
- "email": "[email protected]"
24
- },
25
- {
26
- "first": "Michael",
27
- "middle": [],
28
- "last": "Collins",
29
- "suffix": "",
30
- "affiliation": {
31
- "laboratory": "AT&T Shannon Laboratory",
32
- "institution": "",
33
- "location": {
34
- "addrLine": "180 Park Ave. Florharn Park",
35
- "postCode": "07932",
36
- "region": "NJ"
37
- }
38
- },
39
- "email": "[email protected]"
40
- },
41
- {
42
- "first": "Amit",
43
- "middle": [],
44
- "last": "Singhal",
45
- "suffix": "",
46
- "affiliation": {
47
- "laboratory": "AT&T Shannon Laboratory",
48
- "institution": "",
49
- "location": {
50
- "addrLine": "180 Park Ave. Florharn Park",
51
- "postCode": "07932",
52
- "region": "NJ"
53
- }
54
- },
55
- "email": "[email protected]"
56
- }
57
- ],
58
- "year": "",
59
- "venue": null,
60
- "identifiers": {},
61
- "abstract": "Information retrieval systems have typically concentrated on retrieving a set of documents which are relevant to a user's query. This paper describes a system that attempts to retrieve a much smaller section of text, namely, a direct answer to a user's question. The SMART IR system is used to extract a ranked set of passages that are relevant to the query. Entities are extracted from these passages as potential answers to the question, and ranked for plausibility according to how well their type matches the query, and according to their frequency and position in the passages. The system was evaluated at the TREC-8 question answering track: we give results and error analysis on these queries.",
62
- "pdf_parse": {
63
- "paper_id": "A00-1041",
64
- "_pdf_hash": "",
65
- "abstract": [
66
- {
67
- "text": "Information retrieval systems have typically concentrated on retrieving a set of documents which are relevant to a user's query. This paper describes a system that attempts to retrieve a much smaller section of text, namely, a direct answer to a user's question. The SMART IR system is used to extract a ranked set of passages that are relevant to the query. Entities are extracted from these passages as potential answers to the question, and ranked for plausibility according to how well their type matches the query, and according to their frequency and position in the passages. The system was evaluated at the TREC-8 question answering track: we give results and error analysis on these queries.",
68
- "cite_spans": [],
69
- "ref_spans": [],
70
- "eq_spans": [],
71
- "section": "Abstract",
72
- "sec_num": null
73
- }
74
- ],
75
- "body_text": [
76
- {
77
- "text": "In this paper, we describe and evaluate a questionanswering system based on passage retrieval and entity-extraction technology.",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": "1"
83
- },
84
- {
85
- "text": "There has long been a concensus in the Information Retrieval (IR) community that natural language processing has little to offer for retrieval systems. Plausibly, this is creditable to the preeminence of ad hoc document retrieval as the task of interest in IR. However, there is a growing recognition of the limitations of ad hoc retrieval, both in the sense that current systems have reached the limit of achievable performance, and in the sense that users' information needs are often not well characterized by document retrieval.",
86
- "cite_spans": [],
87
- "ref_spans": [],
88
- "eq_spans": [],
89
- "section": "Introduction",
90
- "sec_num": "1"
91
- },
92
- {
93
- "text": "In many cases, a user has a question with a specific answer, such as What city is it where the European Parliament meets? or Who discovered Pluto? In such cases, ranked answers with links to supporting documentation are much more useful than the ranked list of documents that standard retrieval engines produce.",
94
- "cite_spans": [],
95
- "ref_spans": [],
96
- "eq_spans": [],
97
- "section": "Introduction",
98
- "sec_num": "1"
99
- },
100
- {
101
- "text": "The ability to answer specific questions also provides a foundation for addressing quantitative inquiries such as How many times has the Fed raised interest rates this year? which can be interpreted as the cardinality of the set of answers to a specific question that happens to have multiple correct an-swers, like On what date did the Fed raise interest rates this year?",
102
- "cite_spans": [],
103
- "ref_spans": [],
104
- "eq_spans": [],
105
- "section": "Introduction",
106
- "sec_num": "1"
107
- },
108
- {
109
- "text": "We describe a system that extracts specific answers from a document collection. The system's performance was evaluated in the question-answering track that has been introduced this year at the TREC information-retrieval conference. The major points of interest are the following.",
110
- "cite_spans": [],
111
- "ref_spans": [],
112
- "eq_spans": [],
113
- "section": "Introduction",
114
- "sec_num": "1"
115
- },
116
- {
117
- "text": "\u2022 Comparison of the system's performance to a system that uses the same passage retrieval component, but no natural language processing, shows that NLP provides significant performance improvements on the question-answering task.",
118
- "cite_spans": [],
119
- "ref_spans": [],
120
- "eq_spans": [],
121
- "section": "Introduction",
122
- "sec_num": "1"
123
- },
124
- {
125
- "text": "\u2022 The system is designed to build on the strengths of both IR and NLP technologies. This makes for much more robustness than a pure NLP system would have, while affording much greater precision than a pure IR system would have.",
126
- "cite_spans": [],
127
- "ref_spans": [],
128
- "eq_spans": [],
129
- "section": "Introduction",
130
- "sec_num": "1"
131
- },
132
- {
133
- "text": "\u2022 The task is broken into subtasks that admit of independent development and evaluation. Passage retrieval and entity extraction are both recognized independent tasks. Other subtasks are entity classification and query classification-both being classification tasks that use features obtained by parsing--and entity ranking.",
134
- "cite_spans": [],
135
- "ref_spans": [],
136
- "eq_spans": [],
137
- "section": "Introduction",
138
- "sec_num": "1"
139
- },
140
- {
141
- "text": "In the following section, we describe the questionanswering system, and in section 3, we quantify its performance and give an error analysis.",
142
- "cite_spans": [],
143
- "ref_spans": [],
144
- "eq_spans": [],
145
- "section": "Introduction",
146
- "sec_num": "1"
147
- },
148
- {
149
- "text": "The Question-Answering System",
150
- "cite_spans": [],
151
- "ref_spans": [],
152
- "eq_spans": [],
153
- "section": "2",
154
- "sec_num": null
155
- },
156
- {
157
- "text": "The system takes a natural-language query as input and produces a list of answers ranked in order of confidence. The top five answers were submitted to the TREC evaluation. Queries are processed in two stages. In the information retrieval stage, the most promising passages of the most promising documents are retrieved. In the linguistic processing stage, potential answers are extracted from these passages and ranked.",
158
- "cite_spans": [],
159
- "ref_spans": [],
160
- "eq_spans": [],
161
- "section": "2",
162
- "sec_num": null
163
- },
164
- {
165
- "text": "The system can be divided into five main components. The information retrieval stage consists of a single component, passage retrieval, and the linguistic processing stage circumscribes four components: entity extraction, entity classification, query classification, and entity ranking.",
166
- "cite_spans": [],
167
- "ref_spans": [],
168
- "eq_spans": [],
169
- "section": "2",
170
- "sec_num": null
171
- },
172
- {
173
- "text": "Passage Retrieval Identify relevant documents, and within relevant documents, identify the passages most likely to contain the answer to the question.",
174
- "cite_spans": [],
175
- "ref_spans": [],
176
- "eq_spans": [],
177
- "section": "2",
178
- "sec_num": null
179
- },
180
- {
181
- "text": "Entity Extraction Extract a candidate set of possible answers from the passages. the answer should be an entity of type Person.",
182
- "cite_spans": [],
183
- "ref_spans": [],
184
- "eq_spans": [],
185
- "section": "2",
186
- "sec_num": null
187
- },
188
- {
189
- "text": "Entity Ranking Assign scores to entities, representing roughly belief that the entity is the correct answer. There are two components of the score. The most-significant bit is whether or not the category of the entity (as determined by entity classification) matches the category that the question is seeking (as determined by query classification). A finer-grained ranking is imposed on entities with the correct category, through the use of frequency and other information.",
190
- "cite_spans": [],
191
- "ref_spans": [],
192
- "eq_spans": [],
193
- "section": "Entity Classification",
194
- "sec_num": null
195
- },
196
- {
197
- "text": "The following sections describe these five components in detail.",
198
- "cite_spans": [],
199
- "ref_spans": [],
200
- "eq_spans": [],
201
- "section": "Entity Classification",
202
- "sec_num": null
203
- },
204
- {
205
- "text": "The first step is to find passages likely to contain the answer to the query. We use a modified version of the SMART information retrieval system (Buckley and Lewit, 1985; Salton, 1971) to recover a set of documents which are relevant to the question. We define passages as overlapping sets consisting of a sentence and its two immediate neighbors. (Passages are in one-one correspondence with with sentences, and adjacent passages have two sentences in common.) The score for passage i was calculated as",
206
- "cite_spans": [
207
- {
208
- "start": 146,
209
- "end": 171,
210
- "text": "(Buckley and Lewit, 1985;",
211
- "ref_id": "BIBREF1"
212
- },
213
- {
214
- "start": 172,
215
- "end": 185,
216
- "text": "Salton, 1971)",
217
- "ref_id": null
218
- }
219
- ],
220
- "ref_spans": [],
221
- "eq_spans": [],
222
- "section": "Passage Retrieval",
223
- "sec_num": "2.1"
224
- },
225
- {
226
- "text": "EQUATION",
227
- "cite_spans": [],
228
- "ref_spans": [],
229
- "eq_spans": [
230
- {
231
- "start": 0,
232
- "end": 8,
233
- "text": "EQUATION",
234
- "ref_id": "EQREF",
235
- "raw_str": "1 \u00bcSi-z + \u00bdSi + ~'S,+1",
236
- "eq_num": "(1)"
237
- }
238
- ],
239
- "section": "Passage Retrieval",
240
- "sec_num": "2.1"
241
- },
242
- {
243
- "text": "where Sj, the score for sentence j, is the sum of IDF weights of non-stop terms that it shares with the query, plus an additional bonus for pairs of words (bigrams) that the sentence and query have in common.",
244
- "cite_spans": [],
245
- "ref_spans": [],
246
- "eq_spans": [],
247
- "section": "Passage Retrieval",
248
- "sec_num": "2.1"
249
- },
250
- {
251
- "text": "The top 50 passages are passed on as input to linguistic processing.",
252
- "cite_spans": [],
253
- "ref_spans": [],
254
- "eq_spans": [],
255
- "section": "Passage Retrieval",
256
- "sec_num": "2.1"
257
- },
258
- {
259
- "text": "Entity extraction is done using the Cass partial parser (Abney, 1996) . From the Cass output, we take dates, durations, linear measures, and quantities.",
260
- "cite_spans": [
261
- {
262
- "start": 56,
263
- "end": 69,
264
- "text": "(Abney, 1996)",
265
- "ref_id": "BIBREF0"
266
- }
267
- ],
268
- "ref_spans": [],
269
- "eq_spans": [],
270
- "section": "Entity Extraction",
271
- "sec_num": "2.2"
272
- },
273
- {
274
- "text": "In addition, we constructed specialized code for extracting proper names. The proper-name extractor essentially classifies capitalized words as intrinsically capitalized or not, where the alternatives to intrinsic capitalization are sentence-initial capitalization or capitalization in titles and headings. The extractor uses various heuristics, including whether the words under consideration appear unambiguously capitalized elsewhere in the document.",
275
- "cite_spans": [],
276
- "ref_spans": [],
277
- "eq_spans": [],
278
- "section": "Entity Extraction",
279
- "sec_num": "2.2"
280
- },
281
- {
282
- "text": "The following types of entities were extracted as potential answers to queries.",
283
- "cite_spans": [],
284
- "ref_spans": [],
285
- "eq_spans": [],
286
- "section": "Entity Classification",
287
- "sec_num": "2.3"
288
- },
289
- {
290
- "text": "Proper names were classified into these categories using a classifier built using the method described in (Collins and Singer, 1999) . 1 This is the only place where entity classification was actually done as a separate step from entity extraction.",
291
- "cite_spans": [
292
- {
293
- "start": 106,
294
- "end": 132,
295
- "text": "(Collins and Singer, 1999)",
296
- "ref_id": "BIBREF2"
297
- }
298
- ],
299
- "ref_spans": [],
300
- "eq_spans": [],
301
- "section": "Person, Location, Organization, Other",
302
- "sec_num": null
303
- },
304
- {
305
- "text": "Dates Four-digit numbers starting with 1... or 20.. were taken to be years. Cass was used to extract more complex date expressions (such as Saturday, January 1st, 2000) . We should note that this list does not exhaust the space of useful categories. Monetary amounts (e.g., ~The classifier makes a three way distinction between Person, Location and Organization; names where the classifier makes no decision were classified as Other Named E~tity. $25 million) were added to the system shortly after the Trec run, but other gaps in coverage remain. We discuss this further in section 3.",
306
- "cite_spans": [
307
- {
308
- "start": 140,
309
- "end": 168,
310
- "text": "Saturday, January 1st, 2000)",
311
- "ref_id": null
312
- }
313
- ],
314
- "ref_spans": [],
315
- "eq_spans": [],
316
- "section": "Person, Location, Organization, Other",
317
- "sec_num": null
318
- },
319
- {
320
- "text": "This step involves processing the query to identify the category of answer the user is seeking. We parse the query, then use the following rules to determine the category of the desired answer:",
321
- "cite_spans": [],
322
- "ref_spans": [],
323
- "eq_spans": [],
324
- "section": "Query Classification",
325
- "sec_num": "2.4"
326
- },
327
- {
328
- "text": "\u2022 Who, Whom -+ Person.",
329
- "cite_spans": [],
330
- "ref_spans": [],
331
- "eq_spans": [],
332
- "section": "Query Classification",
333
- "sec_num": "2.4"
334
- },
335
- {
336
- "text": "\u2022 Where, Whence, Whither--+ Location.",
337
- "cite_spans": [],
338
- "ref_spans": [],
339
- "eq_spans": [],
340
- "section": "Query Classification",
341
- "sec_num": "2.4"
342
- },
343
- {
344
- "text": "\u2022 When -+ Date.",
345
- "cite_spans": [],
346
- "ref_spans": [],
347
- "eq_spans": [],
348
- "section": "Query Classification",
349
- "sec_num": "2.4"
350
- },
351
- {
352
- "text": "\u2022 How few, great, little, many, much -+ Quemtity. We also extract the head word of the How expression (e.g., stooges in how many stooges) for later comparison to the head word of candidate answers.",
353
- "cite_spans": [],
354
- "ref_spans": [],
355
- "eq_spans": [],
356
- "section": "Query Classification",
357
- "sec_num": "2.4"
358
- },
359
- {
360
- "text": "\u2022 How long --+ Duration or Linear Measure.",
361
- "cite_spans": [],
362
- "ref_spans": [],
363
- "eq_spans": [],
364
- "section": "Query Classification",
365
- "sec_num": "2.4"
366
- },
367
- {
368
- "text": "Measure.",
369
- "cite_spans": [],
370
- "ref_spans": [],
371
- "eq_spans": [],
372
- "section": "How tall, wide, high, big, far --+ Linear",
373
- "sec_num": null
374
- },
375
- {
376
- "text": "\u2022 The wh-words Which or What typically appear with a head noun that describes the category of entity involved. These questions fall into two formats: What X where X is the noun involved, and What is the ... X. Here are a couple of examples:",
377
- "cite_spans": [],
378
- "ref_spans": [],
379
- "eq_spans": [],
380
- "section": "How tall, wide, high, big, far --+ Linear",
381
- "sec_num": null
382
- },
383
- {
384
- "text": "What company is the largest Japanese ship builder?",
385
- "cite_spans": [],
386
- "ref_spans": [],
387
- "eq_spans": [],
388
- "section": "How tall, wide, high, big, far --+ Linear",
389
- "sec_num": null
390
- },
391
- {
392
- "text": "What is the largest city in Germany?",
393
- "cite_spans": [],
394
- "ref_spans": [],
395
- "eq_spans": [],
396
- "section": "How tall, wide, high, big, far --+ Linear",
397
- "sec_num": null
398
- },
399
- {
400
- "text": "For these queries the head noun (e.g., company or city) is extracted, and a lexicon mapping nouns to categories is used to identify the category of the query. The lexicon was partly hand-built (including some common cases such as number --+ Quantity or year --~ Date). A large list of nouns indicating Person, Location or Organization categories was automatically taken from the contextual (appositive) cues learned in the named entity classifier described in (Collins and Singer, 1999 ).",
401
- "cite_spans": [
402
- {
403
- "start": 460,
404
- "end": 485,
405
- "text": "(Collins and Singer, 1999",
406
- "ref_id": "BIBREF2"
407
- }
408
- ],
409
- "ref_spans": [],
410
- "eq_spans": [],
411
- "section": "How tall, wide, high, big, far --+ Linear",
412
- "sec_num": null
413
- },
414
- {
415
- "text": "\u2022 In queries containing no wh-word (e.g., Name the largest city in Germany), the first noun phrase that is an immediate constituent of the matrix sentence is extracted, and its head is used to determine query category, as for What X questions.",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "How tall, wide, high, big, far --+ Linear",
420
- "sec_num": null
421
- },
422
- {
423
- "text": "\u2022 Otherwise, the category is the wildcard Any.",
424
- "cite_spans": [],
425
- "ref_spans": [],
426
- "eq_spans": [],
427
- "section": "How tall, wide, high, big, far --+ Linear",
428
- "sec_num": null
429
- },
430
- {
431
- "text": "Entity scores have two components. The first, mostsignificant, component is whether or not the entity's category matches the query's category. (If the query category is Any, all entities match it.)",
432
- "cite_spans": [],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "Entity Ranking",
436
- "sec_num": "2.5"
437
- },
438
- {
439
- "text": "In most cases, the matching is boolean: either an entity has the correct category or not. However, there are a couple of special cases where finer distinctions are made. If a question is of the Date type, and the query contains one of the words day or month, then \"full\" dates are ranked above years. Conversely, if the query contains the word year, then years are ranked above full dates. In How many X questions (where X is a noun), quantified phrases whose head noun is also X are ranked above bare numbers or other quantified phrases: for example, in the query How many lives were lost in the Lockerbie air crash, entities such as 270 lives or almost 300 lives would be ranked above entities such as 200 pumpkins or 150. 2",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "Entity Ranking",
444
- "sec_num": "2.5"
445
- },
446
- {
447
- "text": "The second component of the entity score is based on the frequency and position of occurrences of a given entity within the retrieved passages. Each occurrence of an entity in a top-ranked passage counts 10 points, and each occurrence of an entity in any other passage counts 1 point. (\"Top-ranked passage\" means the passage or passages that received the maximal score from the passage retrieval component.) This score component is used as a secondary sort key, to impose a ranking on entities that are not distinguished by the first score component.",
448
- "cite_spans": [],
449
- "ref_spans": [],
450
- "eq_spans": [],
451
- "section": "Entity Ranking",
452
- "sec_num": "2.5"
453
- },
454
- {
455
- "text": "In counting occurrences of entities, it is necessary to decide whether or not two occurrences are tokens of the same entity or different entities. To this end, we do some normalization of entities. Dates are mapped to the format year-month-day: that is, last Tuesday, November 9, 1999 and 11/9/99 are both mapped to the normal form 1999 Nov 9 before frequencies are counted. Person names axe aliased based on the final word they contain. For example, Jackson and Michael Jackson are both mapped to the normal form Jackson. a",
456
- "cite_spans": [],
457
- "ref_spans": [],
458
- "eq_spans": [],
459
- "section": "Entity Ranking",
460
- "sec_num": "2.5"
461
- },
462
- {
463
- "text": "The system was evaluated in the TREC-8 questionanswering track. TREC provided 198 questions as a blind test set: systems were required to provide five potential answers for each question, ranked in order of plausibility. The output from each system was then scored by hand by evaluators at NIST, each answer being marked as either correct or incorrect. The system's score on a particular question is a function of whether it got a correct answer in the five ranked answers, with higher scores for the answer appearing higher in the ranking. The system receives a score of 1, 1/2, 1/3, 1/4, 1/5, or 0, re-2perhaps less desirably, people would not be recognized as a synonym of lives in this example: 200 people would be indistinguishable from 200 pumpkins.",
464
- "cite_spans": [],
465
- "ref_spans": [],
466
- "eq_spans": [],
467
- "section": "Results on the TREC-8 Evaluation",
468
- "sec_num": "3.1"
469
- },
470
- {
471
- "text": "3This does introduce occasional errors, when two people with the same last name appear in retrieved passages.",
472
- "cite_spans": [],
473
- "ref_spans": [],
474
- "eq_spans": [],
475
- "section": "Results on the TREC-8 Evaluation",
476
- "sec_num": "3.1"
477
- },
478
- {
479
- "text": "Answer Figure 1 : Results on the TREC-8 Evaluation spectively, according as the correct answer is ranked 1st, 2nd, 3rd, 4th, 5th, or lower in the system output. The final score for a system is calculated as its mean score on the 198 questions.",
480
- "cite_spans": [],
481
- "ref_spans": [
482
- {
483
- "start": 7,
484
- "end": 15,
485
- "text": "Figure 1",
486
- "ref_id": null
487
- }
488
- ],
489
- "eq_spans": [],
490
- "section": "Mean",
491
- "sec_num": null
492
- },
493
- {
494
- "text": "The TREC evaluation considered two questionanswering scenarios: one where answers were limited to be less than 250 bytes in length, the other where the limit was 50 bytes. The output from the passage retrieval component (section 2.1), with some trimming of passages to ensure they were less than 250 bytes, was submitted to the 250 byte scenario. The output of the full entity-based system was submitted to the 50 byte track. For comparison, we also submitted the output of a 50-byte system based on IR techniques alone. In this system single-sentence passages were retrieved as potential answers, their score being calculated using conventional IR methods. Some trimming of sentences so that they were less than 50 bytes in length was performed. Figure 1 shows results on the TREC-8 evaluation. The 250-byte passage-based system found a correct answer somewhere in the top five answers on 68% of the questions, with a final score of 0.545. The 50byte passage-based system found a correct answer on 38.9% of all questions, with an average score of 0.261. The reduction in accuracy when moving from the 250-byte limit to the 50-byte limit is expected, because much higher precision is required; the 50byte limit allows much less extraneous material to be included with the answer. The benefit of the including less extraneous material is that the user can interpret the output with much less effort.",
495
- "cite_spans": [],
496
- "ref_spans": [
497
- {
498
- "start": 747,
499
- "end": 755,
500
- "text": "Figure 1",
501
- "ref_id": null
502
- }
503
- ],
504
- "eq_spans": [],
505
- "section": "Mean",
506
- "sec_num": null
507
- },
508
- {
509
- "text": "Our entity-based system found a correct answer in the top five answers on 46% of the questions, with a final score of 0.356. The performance is not as good as that of the 250-byte passage-based system. But when less extraneous material is permitted, the entity-based system outperforms the passage-based approach. The accuracy of the entity-based system is significantly better than that of the 50-byte passage-based system, and it returns virtually no extraneous material, as reflected in the average answer length of only 10.5 bytes. The implication is that NLP techniques become increasingly useful when short answers are required.",
510
- "cite_spans": [],
511
- "ref_spans": [],
512
- "eq_spans": [],
513
- "section": "Mean",
514
- "sec_num": null
515
- },
516
- {
517
- "text": "System 3.2.1 Ranking of Answers As a first point, we looked at the performance of the entity-based system, considering the queries where the correct answer was found somewhere in the top 5 answers (46% of the 198 questions). We found that on these questions, the percentage of answers ranked 1, 2, 3, 4, and 5 was 66%, 14%, 11%, 4%, and 4% respectively. This distribution is by no means uniform; it is clear that when the answer is somewhere in the top five, it is very likely to be ranked 1st or 2nd. The system's performance is quite bimodah it either completely fails to get the answer, or else recovers it with a high ranking. Figure 2 shows the distribution of question types in the TREC-8 test set (\"Percentage of Q's\"), and the performance of the entity-based system by question type (\"System Accuracy\"). We categorized the questions by hand, using the eight categories described in section 2.3, plus two categories that essentially represent types that were not handled by the system at the time of the TREC competition:",
518
- "cite_spans": [],
519
- "ref_spans": [
520
- {
521
- "start": 631,
522
- "end": 639,
523
- "text": "Figure 2",
524
- "ref_id": null
525
- }
526
- ],
527
- "eq_spans": [],
528
- "section": "Error Analysis of the Entity-Based",
529
- "sec_num": "3.2"
530
- },
531
- {
532
- "text": "Monetary Amount and Miscellaneous.",
533
- "cite_spans": [],
534
- "ref_spans": [],
535
- "eq_spans": [],
536
- "section": "Accuracy on Different Categories",
537
- "sec_num": "3.2.2"
538
- },
539
- {
540
- "text": "\"System Accuracy\" means the percentage of questions for which the correct answer was in the top five returned by the system. There is a sharp division in the performance on different question types. The categories Person, Location, Date and Quantity are handled fairly well, with the correct answer appearing in the top five 60% of the time. These four categories make up 67% of all questions. In contrast, the other question types, accounting for 33% of the questions, are handled with only 15% accuracy.",
541
- "cite_spans": [],
542
- "ref_spans": [],
543
- "eq_spans": [],
544
- "section": "Accuracy on Different Categories",
545
- "sec_num": "3.2.2"
546
- },
547
- {
548
- "text": "Unsurprisingly, the Miscellaneous and Other Named Entity categories are problematic; unfortunately, they are also rather frequent. Figure 3 shows some examples of these queries. They include a large tail of questions seeking other entity types (mountain ranges, growth rates, films, etc.) and questions whose answer is not even an entity (e.g., \"Why did David Koresh ask the FBI for a word processor?\") For reference, figure 4 gives an impression of the sorts of questions that the system does well on (correct answer in top five).",
549
- "cite_spans": [],
550
- "ref_spans": [
551
- {
552
- "start": 131,
553
- "end": 139,
554
- "text": "Figure 3",
555
- "ref_id": "FIGREF1"
556
- },
557
- {
558
- "start": 418,
559
- "end": 426,
560
- "text": "figure 4",
561
- "ref_id": null
562
- }
563
- ],
564
- "eq_spans": [],
565
- "section": "Accuracy on Different Categories",
566
- "sec_num": "3.2.2"
567
- },
568
- {
569
- "text": "Finally, we performed an analysis to gauge which components represent performance bottlenecks in the current system. We examined system logs for a 50-question sample, and made a judgment of what caused the error, when there was an error. Figure 5 gives the breakdown. Each question was assigned to exactly one line of the table.",
570
- "cite_spans": [],
571
- "ref_spans": [
572
- {
573
- "start": 238,
574
- "end": 246,
575
- "text": "Figure 5",
576
- "ref_id": "FIGREF0"
577
- }
578
- ],
579
- "eq_spans": [],
580
- "section": "Errors by Component",
581
- "sec_num": "3.2.3"
582
- },
583
- {
584
- "text": "The largest body of errors, accounting for 18% of the questions, are those that are due to unhandled Figure 2 : Performance of the entity-based system on different question types. \"System Accuracy\" means percent of questions for which the correct answer was in the top five returned by the system. \"Good\" types are in the upper block, \"Bad\" types are in the lower block. five, but not at rank one, are almost all due to failures of entity ranking) Various factors contributing to misrankings are the heavy weighting assigned to answers in the top-ranked passage, the failure to adjust frequencies by \"complexity\" (e.g., it is significant if 22.5 million occurs several times, but not if 3 occurs several times), and the failure of the system to consider the linguistic context in which entities appear. types, of which half are monetary amounts. (Questions with non-entity answers account for another 4%.) Another large block (16%) is due to the passage retrieval component: the correct answer was not present in the retrieved passages. The linguistic components together account for the remaining 14% of error, spread evenly among them. The cases in which the correct answer is in the top 4 Conclusions and Future Work",
585
- "cite_spans": [],
586
- "ref_spans": [
587
- {
588
- "start": 101,
589
- "end": 109,
590
- "text": "Figure 2",
591
- "ref_id": null
592
- }
593
- ],
594
- "eq_spans": [],
595
- "section": "Errors by Component",
596
- "sec_num": "3.2.3"
597
- },
598
- {
599
- "text": "We have described a system that handles arbitrary questions, producing a candidate list of answers ranked by their plausibility. Evaluation on the TREC question-answering track showed that the correct answer to queries appeared in the top five answers 46% of the time, with a mean score of 0.356. The average length of answers produced by the system was 10.5 bytes.",
600
- "cite_spans": [],
601
- "ref_spans": [],
602
- "eq_spans": [],
603
- "section": "Errors by Component",
604
- "sec_num": "3.2.3"
605
- },
606
- {
607
- "text": "4The sole exception was a query misclassification caused by a parse failure---miraculously, the correct answer made it to rank five despite being of the \"wrong\" type.",
608
- "cite_spans": [],
609
- "ref_spans": [],
610
- "eq_spans": [],
611
- "section": "Errors by Component",
612
- "sec_num": "3.2.3"
613
- },
614
- {
615
- "text": "There are several possible areas for future work. There may be potential for improved performance through more sophisticated use of NLP techniques. In particular, the syntactic context in which a particular entity appears may provide important information, but it is not currently used by the system.",
616
- "cite_spans": [],
617
- "ref_spans": [],
618
- "eq_spans": [],
619
- "section": "Errors by Component",
620
- "sec_num": "3.2.3"
621
- },
622
- {
623
- "text": "Another area of future work is to extend the entity-extraction component of the system to handle arbitrary types (mountain ranges, films etc.). The error analysis in section 3.2.2 showed that these question types cause particular difficulties for the system.",
624
- "cite_spans": [],
625
- "ref_spans": [],
626
- "eq_spans": [],
627
- "section": "Errors by Component",
628
- "sec_num": "3.2.3"
629
- },
630
- {
631
- "text": "The system is largely hand-built. It is likely that as more features are added a trainable statistical or machine learning approach to the problem will become increasingly desirable. This entails developing a training set of question-answer pairs, raising the question of how a relatively large corpus of questions can be gathered and annotated.",
632
- "cite_spans": [],
633
- "ref_spans": [],
634
- "eq_spans": [],
635
- "section": "Errors by Component",
636
- "sec_num": "3.2.3"
637
- }
638
- ],
639
- "back_matter": [],
640
- "bib_entries": {
641
- "BIBREF0": {
642
- "ref_id": "b0",
643
- "title": "Partial parsing via finitestate cascades",
644
- "authors": [
645
- {
646
- "first": "Steven",
647
- "middle": [],
648
- "last": "Abney",
649
- "suffix": ""
650
- }
651
- ],
652
- "year": 1996,
653
- "venue": "J. Natural Language Engineering",
654
- "volume": "2",
655
- "issue": "4",
656
- "pages": "337--344",
657
- "other_ids": {},
658
- "num": null,
659
- "urls": [],
660
- "raw_text": "Steven Abney. 1996. Partial parsing via finite- state cascades. J. Natural Language Engineering, 2(4):337-344, December.",
661
- "links": null
662
- },
663
- "BIBREF1": {
664
- "ref_id": "b1",
665
- "title": "Optimization of inverted vector searches",
666
- "authors": [
667
- {
668
- "first": "C",
669
- "middle": [],
670
- "last": "Buckley",
671
- "suffix": ""
672
- },
673
- {
674
- "first": "A",
675
- "middle": [
676
- "F"
677
- ],
678
- "last": "Lewit",
679
- "suffix": ""
680
- }
681
- ],
682
- "year": 1985,
683
- "venue": "Proe. Eighth International ACM SIGIR Conference",
684
- "volume": "",
685
- "issue": "",
686
- "pages": "97--110",
687
- "other_ids": {},
688
- "num": null,
689
- "urls": [],
690
- "raw_text": "C. Buckley and A.F. Lewit. 1985. Optimization of inverted vector searches. In Proe. Eighth Interna- tional ACM SIGIR Conference, pages 97-110.",
691
- "links": null
692
- },
693
- "BIBREF2": {
694
- "ref_id": "b2",
695
- "title": "Unsupervised models for named entity classification",
696
- "authors": [
697
- {
698
- "first": "Michael",
699
- "middle": [],
700
- "last": "Collins",
701
- "suffix": ""
702
- },
703
- {
704
- "first": "Yoram",
705
- "middle": [],
706
- "last": "Singer",
707
- "suffix": ""
708
- }
709
- ],
710
- "year": 1999,
711
- "venue": "EMNLP",
712
- "volume": "",
713
- "issue": "",
714
- "pages": "",
715
- "other_ids": {},
716
- "num": null,
717
- "urls": [],
718
- "raw_text": "Michael Collins and Yoram Singer. 1999. Unsuper- vised models for named entity classification. In EMNLP.",
719
- "links": null
720
- },
721
- "BIBREF3": {
722
- "ref_id": "b3",
723
- "title": "The Smart Retrieval System -Experiments in Automatic Document Processing",
724
- "authors": [],
725
- "year": 1971,
726
- "venue": "",
727
- "volume": "",
728
- "issue": "",
729
- "pages": "",
730
- "other_ids": {},
731
- "num": null,
732
- "urls": [],
733
- "raw_text": "G. Salton, editor. 1971. The Smart Retrieval Sys- tem -Experiments in Automatic Document Pro- cessing. Prentice-Hall, Inc., Englewood Cliffs, NJ.",
734
- "links": null
735
- }
736
- },
737
- "ref_entries": {
738
- "FIGREF0": {
739
- "text": "Breakdown of questions by error type, in particular, by component responsible. Numbers are percent of questions in a 50-question sample.",
740
- "num": null,
741
- "uris": null,
742
- "type_str": "figure"
743
- },
744
- "FIGREF1": {
745
- "text": "Examples of \"Other Named Entity\" and Miscellaneous questions.",
746
- "num": null,
747
- "uris": null,
748
- "type_str": "figure"
749
- },
750
- "TABREF3": {
751
- "text": "Question I Rank I Output from SystemWho is the author of the book, The Iron Lady: A Biography of 2 Margaret Thatcher? What is the name of the managing director of Apricot Computer? i What country is the biggest producer of tungsten? Who was the first Taiwanese President?",
752
- "html": null,
753
- "content": "<table><tr><td/><td/><td/><td/><td>Hugo Young</td></tr><tr><td/><td/><td/><td/><td>Dr Peter Horne</td></tr><tr><td/><td/><td/><td/><td>China</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">Taiwanese President Li</td></tr><tr><td/><td/><td/><td/><td>Teng hui</td></tr><tr><td colspan=\"2\">When did Nixon visit China?</td><td/><td/><td>1972</td></tr><tr><td colspan=\"3\">How many calories are there in a Big Mac?</td><td>4</td><td>562 calories</td></tr><tr><td colspan=\"4\">What is the acronym for the rating system for air conditioner effi-1</td><td>EER</td></tr><tr><td>ciency?</td><td/><td/><td/><td/></tr><tr><td colspan=\"5\">Figure 4: A few TREC questions answered correctly by the system.</td></tr><tr><td>Type</td><td>Percent</td><td>System</td><td colspan=\"2\">Errors</td></tr><tr><td/><td>of Q's</td><td>Accuracy</td><td colspan=\"2\">Passage retrieval failed</td><td>16%</td></tr><tr><td>Person</td><td>28</td><td>62.5</td><td colspan=\"2\">Answer is not an entity</td><td>4%</td></tr><tr><td>Location Date Quantity</td><td>18.5 11 9.5</td><td>67.6 45.5 52.7</td><td colspan=\"2\">Answer of unhandled type: money Answer of unhandled type: misc Entity extraction failed</td><td>10% 8% 2%</td></tr><tr><td>TOTAL Other Named Ent Miscellaneous Linear Measure</td><td>67 14.5 8.5 3.5</td><td>60 31 5.9 0</td><td colspan=\"2\">Entity classification failed Query classification failed Entity ranking failed Successes</td><td>4% 4% 4%</td></tr><tr><td>Monetary Amt</td><td>3</td><td>0</td><td colspan=\"2\">Answer at Rank 2-5</td><td>I 16%</td></tr><tr><td>Organization</td><td>2</td><td>0</td><td>Answer at Rank 1</td><td/><td>I 32%</td></tr><tr><td>Duration</td><td>1.5</td><td>0</td><td>TOTAL</td><td/></tr><tr><td>TOTAL</td><td>33</td><td>15</td><td/><td/></tr></table>",
754
- "num": null,
755
- "type_str": "table"
756
- }
757
- }
758
- }
759
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1042.json DELETED
@@ -1,1041 +0,0 @@
1
- {
2
- "paper_id": "A00-1042",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:52.307004Z"
6
- },
7
- "title": "Evaluation of Automatically Identified Index Terms for Browsing Electronic Documents I",
8
- "authors": [
9
- {
10
- "first": "Nina",
11
- "middle": [],
12
- "last": "Wacholder",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Columbia University",
17
- "location": {}
18
- },
19
- "email": ""
20
- },
21
- {
22
- "first": "Judith",
23
- "middle": [
24
- "L"
25
- ],
26
- "last": "Klavans",
27
- "suffix": "",
28
- "affiliation": {
29
- "laboratory": "",
30
- "institution": "Columbia University",
31
- "location": {}
32
- },
33
- "email": ""
34
- },
35
- {
36
- "first": "David",
37
- "middle": [
38
- "K"
39
- ],
40
- "last": "Evans",
41
- "suffix": "",
42
- "affiliation": {
43
- "laboratory": "",
44
- "institution": "Columbia University",
45
- "location": {}
46
- },
47
- "email": ""
48
- }
49
- ],
50
- "year": "",
51
- "venue": null,
52
- "identifiers": {},
53
- "abstract": "We present an evaluation of domainindependent natural language tools for use in the identification of significant concepts in documents. Using qualitative evaluation, we compare three shallow processing methods for extracting index terms, i.e., terms that can be used to model the content of documents. We focus on two criteria: quality and coverage. In terms of quality alone, our results show that technical term (TT) extraction [Justeson and Katz 1995] receives the highest rating. However, in terms of a combined quality and coverage metric, the Head Sorting (HS) method, described in [Wacholder 1998], outperforms both other methods, keyword (KW) and TT. 1 This research was partly funded by NSF IRI 97-12069, \"Automatic identification of significant topics in domain independent full text documents\" and NSF IRI 97-53054, \"Computationally tractable methods for document analysis\".",
54
- "pdf_parse": {
55
- "paper_id": "A00-1042",
56
- "_pdf_hash": "",
57
- "abstract": [
58
- {
59
- "text": "We present an evaluation of domainindependent natural language tools for use in the identification of significant concepts in documents. Using qualitative evaluation, we compare three shallow processing methods for extracting index terms, i.e., terms that can be used to model the content of documents. We focus on two criteria: quality and coverage. In terms of quality alone, our results show that technical term (TT) extraction [Justeson and Katz 1995] receives the highest rating. However, in terms of a combined quality and coverage metric, the Head Sorting (HS) method, described in [Wacholder 1998], outperforms both other methods, keyword (KW) and TT. 1 This research was partly funded by NSF IRI 97-12069, \"Automatic identification of significant topics in domain independent full text documents\" and NSF IRI 97-53054, \"Computationally tractable methods for document analysis\".",
60
- "cite_spans": [],
61
- "ref_spans": [],
62
- "eq_spans": [],
63
- "section": "Abstract",
64
- "sec_num": null
65
- }
66
- ],
67
- "body_text": [
68
- {
69
- "text": "In this paper, we consider the problem of how to evaluate the automatic identification of index terms that have been derived without recourse to lexicons or to other kinds of domain-specific information. By index terms, we mean natural language expressions that constitute a meaningful representation of a document for humans. The premise of this research is that if significant topics coherently represent information in a document, these topics can be used as index terms that approximate the content of individual documents in large collections of electronic documents.",
70
- "cite_spans": [],
71
- "ref_spans": [],
72
- "eq_spans": [],
73
- "section": "Introduction",
74
- "sec_num": "2."
75
- },
76
- {
77
- "text": "We compare three shallow processing methods for identifying index terms: \u2022 Keywords (KW) are terms identified by counting frequency of stemmed words in a document;",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": "2."
83
- },
84
- {
85
- "text": "Technical terms (TT) are noun phrases (NPs) or subparts of NPs repeated more than twice in a document [Justeson and Katz 1995] ; Head sorted terms (HS) are identified by a method in which simplex noun phrases (as defined below) are sorted by head and then ranked in decreasing order of frequency [Wacholder 1998 ].",
86
- "cite_spans": [
87
- {
88
- "start": 102,
89
- "end": 126,
90
- "text": "[Justeson and Katz 1995]",
91
- "ref_id": "BIBREF7"
92
- },
93
- {
94
- "start": 296,
95
- "end": 311,
96
- "text": "[Wacholder 1998",
97
- "ref_id": "BIBREF14"
98
- }
99
- ],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "Introduction",
103
- "sec_num": "2."
104
- },
105
- {
106
- "text": "The three methods that we evaluated are domain-independent in that they use statistical and/or linguistic properties that apply to any natural language document in any field. These methods are also corpus-independent, in that the ranking of terms for an individual document is not dependent on properties of the corpus.",
107
- "cite_spans": [],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "Introduction",
111
- "sec_num": "2."
112
- },
113
- {
114
- "text": "Subjects were drawn from two groups: professionals and students. Professionals included librarians and publishing professionals familiar with both manual and automatic text indexing. Students included undergraduate and graduate students with a variety of academic interests.",
115
- "cite_spans": [],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "Overview of methods and results",
119
- "sec_num": "2.1"
120
- },
121
- {
122
- "text": "To assess terms, we used a standard qualitative ranking technique. We presented subjects with an article and a list of terms identified by one of the three methods. Subjects were asked to answer the following general question: \"Would this term be useful in an electronic index for this article?\" Terms were rated on a scale of 1 to 5, where 1 indicates a high quality term that should definitely be included in the index and 5 indicates a junk term that definitely should not be included. For ex-ample, the phrase court-approved affirmative action plans received an average rating of 1 from the professionals, meaning that it was ranked as useful for the article; the KW affirmative received an average rating of 3.75, meaning that it was less useful; and the KW action received an average ranking of 4.5, meaning that it was not useful.",
123
- "cite_spans": [],
124
- "ref_spans": [],
125
- "eq_spans": [],
126
- "section": "Overview of methods and results",
127
- "sec_num": "2.1"
128
- },
129
- {
130
- "text": "The goal of our research is to determine which method, or combination of methods, provides the best results. We measure results in terms of two criteria: quality and coverage.",
131
- "cite_spans": [],
132
- "ref_spans": [],
133
- "eq_spans": [],
134
- "section": "Overview of methods and results",
135
- "sec_num": "2.1"
136
- },
137
- {
138
- "text": "By quality, we mean that evaluators ranked terms high on the 1 to 5 scale from highest to lowest. By coverage, we mean the thoroughness with which the terms cover the significant topics in the document. Our methodology permits us to measure both criteria, as shown in Figure 4 .",
139
- "cite_spans": [],
140
- "ref_spans": [
141
- {
142
- "start": 268,
143
- "end": 277,
144
- "text": "Figure 4",
145
- "ref_id": "FIGREF0"
146
- }
147
- ],
148
- "eq_spans": [],
149
- "section": "Overview of methods and results",
150
- "sec_num": "2.1"
151
- },
152
- {
153
- "text": "Our results from both the professionals and students show that TTs are superior with respect to quality; however, there are only a small number of TTs per document, so they do not provide adequate coverage in that they are not fully representative of the document as a whole. In contrast, KWs provide good coverage but relatively poor quality in that KWs are vague, and not well filtered. SNPs, which have been sorted using HS and filtered, provide a better balance of quality and coverage.",
154
- "cite_spans": [],
155
- "ref_spans": [],
156
- "eq_spans": [],
157
- "section": "Overview of methods and results",
158
- "sec_num": "2.1"
159
- },
160
- {
161
- "text": "From our study, we draw the following conclusions:",
162
- "cite_spans": [],
163
- "ref_spans": [],
164
- "eq_spans": [],
165
- "section": "Overview of methods and results",
166
- "sec_num": "2.1"
167
- },
168
- {
169
- "text": "\u2022 The KW approach identifies some useful index terms, but they are mixed in with a large number of low-ranked terms. \u2022 The TT approach identifies high quality terms, but with low coverage, i.e., relatively few indexing terms. \u2022 The HS approach achieves a balance between quality and coverage.",
170
- "cite_spans": [],
171
- "ref_spans": [],
172
- "eq_spans": [],
173
- "section": "Overview of methods and results",
174
- "sec_num": "2.1"
175
- },
176
- {
177
- "text": "In order to identify significant topics in a document, a significance measure is needed, i.e., a method for determining which concepts in the document are relatively important for a given task. The need to determine the importance of a particular concept within a document is motivated by a range of applications, including information retrieval [Salton 1989 ], 303 automatic determination of authorship [Mosteller and Wallace 1963] , similarity metrics for cross-document clustering [Hatzivassiloglou et al. 1999] , automatic indexing [Hodges et al. 1996 ] and input to summarization [Paice 1990 ].",
178
- "cite_spans": [
179
- {
180
- "start": 346,
181
- "end": 358,
182
- "text": "[Salton 1989",
183
- "ref_id": "BIBREF11"
184
- },
185
- {
186
- "start": 404,
187
- "end": 432,
188
- "text": "[Mosteller and Wallace 1963]",
189
- "ref_id": "BIBREF9"
190
- },
191
- {
192
- "start": 484,
193
- "end": 514,
194
- "text": "[Hatzivassiloglou et al. 1999]",
195
- "ref_id": "BIBREF4"
196
- },
197
- {
198
- "start": 536,
199
- "end": 555,
200
- "text": "[Hodges et al. 1996",
201
- "ref_id": null
202
- },
203
- {
204
- "start": 585,
205
- "end": 596,
206
- "text": "[Paice 1990",
207
- "ref_id": "BIBREF10"
208
- }
209
- ],
210
- "ref_spans": [],
211
- "eq_spans": [],
212
- "section": "Domain-independent metrics for identifying significant topics",
213
- "sec_num": "3."
214
- },
215
- {
216
- "text": "For example, one of the earlier applications using frequency for identifying significant topics in a document was proposed by [Luhn 1958 ] for use in creating automatic abstracts.",
217
- "cite_spans": [
218
- {
219
- "start": 126,
220
- "end": 136,
221
- "text": "[Luhn 1958",
222
- "ref_id": "BIBREF8"
223
- }
224
- ],
225
- "ref_spans": [],
226
- "eq_spans": [],
227
- "section": "Domain-independent metrics for identifying significant topics",
228
- "sec_num": "3."
229
- },
230
- {
231
- "text": "For each document, a list of stoplisted stems was created, and ranked by frequency; the most frequent keywords were used to identify significant sentences in the original document. Luhn's premise was that emphasis, as indicated by repetition of words and collocation is an indicator of significance. Namely, \"the more often certain words are found in each other's company within a sentence, the more significance may be attributed to each of these words.\" This basic observation, although refined extensively by later summarization techniques (as reviewed in [Paice 1990 ]), relies on the capability of identifying significant concepts.",
232
- "cite_spans": [
233
- {
234
- "start": 559,
235
- "end": 570,
236
- "text": "[Paice 1990",
237
- "ref_id": "BIBREF10"
238
- }
239
- ],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "Domain-independent metrics for identifying significant topics",
243
- "sec_num": "3."
244
- },
245
- {
246
- "text": "The standard IR technique known as tf*idf [Salton 1989 ] seeks to identify documents relevant to a particular query by relativizing keyword frequency in a document as compared to frequency in a corpus. This method can be used to locate at least some important concepts in full text. Although it has been effective for information retrieval, for other applications, such as human-oriented indexing, this technique is impractical. Ambiguity of stems (trad might refer to trader or tradition) and of isolated words (state might be a political entity or a mode of being) means that lists of keywords have not usually been used to represent the content of a document to human beings. Furthermore, humans have a difficult time processing stems and parts of words out of phrasal context.",
247
- "cite_spans": [
248
- {
249
- "start": 42,
250
- "end": 54,
251
- "text": "[Salton 1989",
252
- "ref_id": "BIBREF11"
253
- }
254
- ],
255
- "ref_spans": [],
256
- "eq_spans": [],
257
- "section": "Domain-independent metrics for identifying significant topics",
258
- "sec_num": "3."
259
- },
260
- {
261
- "text": "The technical term (TT) method, another technique for identification of significant terms in text that can be used as index terms was introduced by [Justeson and Katz 1995] , who developed an algorithm for identifying repeated multi-word phrases such as central processing unit in the computer domain or word sense in the lexical semantic domain.",
262
- "cite_spans": [
263
- {
264
- "start": 148,
265
- "end": 172,
266
- "text": "[Justeson and Katz 1995]",
267
- "ref_id": "BIBREF7"
268
- }
269
- ],
270
- "ref_spans": [],
271
- "eq_spans": [],
272
- "section": "Domain-independent metrics for identifying significant topics",
273
- "sec_num": "3."
274
- },
275
- {
276
- "text": "This algorithm identifies candidate TTs in a corpus by locating NPs consisting of nouns, adjectives, and sometimes prepositional phrases. TTs are defined as those NPs, or their subparts, which occur above some frequency threshold in a corpus. However, as [Boguraev and Kennedy 1998 ] observe, the TT technique may not characterize the full content of documents. Indeed, even in a technical document, TTs do not provide adequate coverage of the NPs in a document that contribute to its content, especially since TTs are by definition multi-word. A truly domain-general method should apply to both technical and nontechnical documents. The relevant difference between technical and non-technical documents is that in technical documents, many of the topics which are significant to the document as a whole may be also TTs. [Wacholder 1998 ] proposed the method of Head Sorting for identifying significant topics that can be used to represent a source document. HS also uses a frequency measure to provide an approximation of topic significance. However, instead of counting frequency of stems or repetition of word sequences, this method counts frequency of a relatively easily identified grammatical element, heads of simplex noun phrases (SNPs). For common NPs (NPs whose head is a common noun), an SNP is a maximal NP that includes premodifiers such as determiners and possessives but not post-nominal constituents such as prepositions or relativizers. For example, the well-known book is an SNP but the well-known book on asteroids includes two SNPs, wellknown book and asteroids. For proper names, an SNP is a name that refers to a single entity. For example, Museum of the City of New York, the name of an organization, is an SNP even though the organizational name incorporates a city name. Others, such as [Church 1988] , have discussed a similar concept, sometimes called simple or base NPs.",
277
- "cite_spans": [
278
- {
279
- "start": 255,
280
- "end": 281,
281
- "text": "[Boguraev and Kennedy 1998",
282
- "ref_id": null
283
- },
284
- {
285
- "start": 821,
286
- "end": 836,
287
- "text": "[Wacholder 1998",
288
- "ref_id": "BIBREF14"
289
- },
290
- {
291
- "start": 1812,
292
- "end": 1825,
293
- "text": "[Church 1988]",
294
- "ref_id": null
295
- }
296
- ],
297
- "ref_spans": [],
298
- "eq_spans": [],
299
- "section": "Domain-independent metrics for identifying significant topics",
300
- "sec_num": "3."
301
- },
302
- {
303
- "text": "The HS approach is based on the assumption that nominal elements can be used to convey the gist of a document. SNPs, which are semantically and syntactically coherent, appear to be at a good level of detail for content representation of the document. '",
304
- "cite_spans": [],
305
- "ref_spans": [],
306
- "eq_spans": [],
307
- "section": "Domain-independent metrics for identifying significant topics",
308
- "sec_num": "3."
309
- },
310
- {
311
- "text": "SNPs are identified by a system [Evans 1998; Evans et al. 2000] which sequentially parses text that has been tagged with part of speech using a finite state machine. Next, the complete list of SNPs identified in a document is sorted by the head of the phrase, which, at least for English-language common SNPs, is almost always the last word. The intuitive justification for sorting SNPs by head is based on the fundamental linguistic distinction between head and modifier: in general, a head makes a greater contribution to the syntax and semantics of a phrase than does a modifier. This linguistic insight can be extended to the document level. If, as a practical matter, it is necessary to rank the contribution to a whole document made by the sequence of words constituting an NP, the head should be ranked more highly than other words in the phrase. This distinction is important in linguistic theory; for example, [Jackendoff 1977] discusses the relationship of heads and modifiers in phrase structure. It is also important in NLP, where, for example, [Strzalkowski 1997] and [Evans and Zhai 1996] have used the distinction between heads and modifiers to add query terms to information retrieval systems.",
312
- "cite_spans": [
313
- {
314
- "start": 32,
315
- "end": 44,
316
- "text": "[Evans 1998;",
317
- "ref_id": "BIBREF2"
318
- },
319
- {
320
- "start": 45,
321
- "end": 63,
322
- "text": "Evans et al. 2000]",
323
- "ref_id": "BIBREF3"
324
- },
325
- {
326
- "start": 919,
327
- "end": 936,
328
- "text": "[Jackendoff 1977]",
329
- "ref_id": "BIBREF6"
330
- },
331
- {
332
- "start": 1057,
333
- "end": 1076,
334
- "text": "[Strzalkowski 1997]",
335
- "ref_id": "BIBREF13"
336
- },
337
- {
338
- "start": 1081,
339
- "end": 1102,
340
- "text": "[Evans and Zhai 1996]",
341
- "ref_id": "BIBREF1"
342
- }
343
- ],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "304",
347
- "sec_num": null
348
- },
349
- {
350
- "text": "Powerful corpus processing techniques have been developed to measure deviance from an average occurrence or co-occurrence in the corpus. In this paper we chose to evaluate methods that depend only on document-internal data, independent of corpus, domain or genre. We therefore did not use, for example, tf*idf, the purely statistical technique that is the used by most information retrieval systems, or [Smadja 1993 ], a hybrid statistical and symbolic technique for identifying collocations.",
351
- "cite_spans": [
352
- {
353
- "start": 403,
354
- "end": 415,
355
- "text": "[Smadja 1993",
356
- "ref_id": "BIBREF12"
357
- }
358
- ],
359
- "ref_spans": [],
360
- "eq_spans": [],
361
- "section": "304",
362
- "sec_num": null
363
- },
364
- {
365
- "text": "To evaluate techniques, we performed a qualitative user evaluation in which the terms identified by each method were compared for usefulness as index terms.",
366
- "cite_spans": [],
367
- "ref_spans": [],
368
- "eq_spans": [],
369
- "section": "Experimental Method",
370
- "sec_num": "4."
371
- },
372
- {
373
- "text": "We performed our study with librarians, publishing professionals and undergraduate and graduate students at our university. 29 subjects participated in the study: 7 librarians and publishing professionals and 22 students.",
374
- "cite_spans": [],
375
- "ref_spans": [],
376
- "eq_spans": [],
377
- "section": "Subjects",
378
- "sec_num": "4.1"
379
- },
380
- {
381
- "text": "For this experiment, we selected three articles from the 1990 Wall Street Journal contained in the Tipster collection of documents. The articles were about 500 words in length.",
382
- "cite_spans": [],
383
- "ref_spans": [],
384
- "eq_spans": [],
385
- "section": "Data",
386
- "sec_num": "4.2"
387
- },
388
- {
389
- "text": "To compare methods, each article was processed three times: 1) with SMART to identify stemmed keywords [Salton 1989] ; 2) with an implementation of the TT algorithm based on [Justeson and Katz 1995] ; and 3) with our implementation of the HS method. Output for one article is shown in Appendix A. Figure 1 shows the articles selected, their length in words and the number of index terms from each method for each article presented to the subjects. ",
390
- "cite_spans": [
391
- {
392
- "start": 103,
393
- "end": 116,
394
- "text": "[Salton 1989]",
395
- "ref_id": "BIBREF11"
396
- },
397
- {
398
- "start": 174,
399
- "end": 198,
400
- "text": "[Justeson and Katz 1995]",
401
- "ref_id": "BIBREF7"
402
- }
403
- ],
404
- "ref_spans": [
405
- {
406
- "start": 297,
407
- "end": 306,
408
- "text": "Figure 1",
409
- "ref_id": null
410
- }
411
- ],
412
- "eq_spans": [],
413
- "section": "Data",
414
- "sec_num": "4.2"
415
- },
416
- {
417
- "text": "The number of TTs is much lower than the number of KWs or HSs. This presented us with a problem: on the one hand, we were concerned about preserving the integrity of the three methods, each of which has their own logic, and at the same time, we were concerned to present lists that were balanced relative to each other. Toward this end, we made several decisions about presentation of the data: 1. Threshold: So that no bias would be unintentionally introduced, we presented subjects with all terms output by each method, up to a specified cut-off poin-However, using lists of equal length for each method would have necessitated either omitting HSs and KWs or changing the definition of TTs. Therefore we made the following decisions:",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "Figure 1: Word and term count, by type, per article",
422
- "sec_num": null
423
- },
424
- {
425
- "text": "\u2022 For TTs, we included all identified terms; \u2022 For HSs, we included all terms whose head occurred more than once in the document;",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "Figure 1: Word and term count, by type, per article",
430
- "sec_num": null
431
- },
432
- {
433
- "text": ". .",
434
- "cite_spans": [],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "305",
438
- "sec_num": null
439
- },
440
- {
441
- "text": "\u2022 For KWs, we included all terms in order of decreasing frequency, up to the point where we observed diminishing quality and where the number of KWs approximated the number of HSs.",
442
- "cite_spans": [],
443
- "ref_spans": [],
444
- "eq_spans": [],
445
- "section": "305",
446
- "sec_num": null
447
- },
448
- {
449
- "text": "Order: For the KW and TT approach, order is not significant. However, for the HS approach, the grouping together of phrases with common heads is, we claim, one of the advantages of the method. We therefore alphabetized the KWs and TTs in standard left to right order and alphabetized the HSs by head, e.g., trust account precedes money market fund.",
450
- "cite_spans": [],
451
- "ref_spans": [],
452
- "eq_spans": [],
453
- "section": "305",
454
- "sec_num": null
455
- },
456
- {
457
- "text": "The KW approach identifies stems which represent a set of one or more morphological variants of the stem. Since in some cases the stem is not an English word, we expanded each stem to include the morphological variants that actually occurred in the article. For example, for the stem reject, we listed rejected and rejecting but did not list rejects, which did not occur in the article.",
458
- "cite_spans": [],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "Morphological expansion:",
462
- "sec_num": null
463
- },
464
- {
465
- "text": "Each subject was presented with three articles. For one article, the subject received a head sorted list of HSs; for another article, the subject received a list of technical terms, and for the third article, the subject saw a list of keywords. No time limit was placed on the task.",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Presentation to subjects",
470
- "sec_num": "4.3"
471
- },
472
- {
473
- "text": "Our results for the three types of terms, by document, are shown in Figure 2 . Although we asked subjects to rate three articles, some volunteers rated only two. All results were included. ",
474
- "cite_spans": [],
475
- "ref_spans": [
476
- {
477
- "start": 68,
478
- "end": 76,
479
- "text": "Figure 2",
480
- "ref_id": null
481
- }
482
- ],
483
- "eq_spans": [],
484
- "section": "Results",
485
- "sec_num": "5."
486
- },
487
- {
488
- "text": "For the three lists of index terms, TTs received the highest ratings for all three documents--an average of 1.79 on the scale of 1 to 5, with 1 being the best rating. HS came in second, with an average of 2.89, and KW came in last with an average of 3.27. It should be noted that averaging the average conceals the fact that the number of TTs is much lower than the other two types of terms, as shown in Figure 1 . Figure 3 (included before Appendix A) shows cumulative rankings of terms by method. The X axis represents ratings awarded by subjects. The Y axis reflects the percentage of terms receiving a given rank or better. All data series must reach 100% since every term has been assigned a rating by the evaluators. At any given data point, a larger value indicates that a larger percentage of that series' data has that particular rating or better. For example, 100% of the TTs have a rating of 3 or better; while only about 30% of the terms of the lowest-scoring KW document received a score of 3 or better. In two out of the three documents, HS terms fall between TTs and KWs.",
489
- "cite_spans": [],
490
- "ref_spans": [
491
- {
492
- "start": 404,
493
- "end": 412,
494
- "text": "Figure 1",
495
- "ref_id": null
496
- },
497
- {
498
- "start": 415,
499
- "end": 423,
500
- "text": "Figure 3",
501
- "ref_id": null
502
- }
503
- ],
504
- "eq_spans": [],
505
- "section": "Quality",
506
- "sec_num": "5.1"
507
- },
508
- {
509
- "text": "The graph in Figure 3 shows results for quality, not coverage. In contrast, Figure 4 , which shows the total number of terms rated at or below specified rankings, allows us to measure quality and coverage. (1 is the highest rating; 5 is the lowest.) This figure shows that the HS method identifies more high quality terms than the TT method does. TT clearly identifies the highest quality terms: 100% of TTs receive a rating of 2 or better. However, only 8 TTs received a rating of 2 or better (38% of the total), while 41 HSs re-306 ceived a rating of 2 or better (26% of the total). This indicates that the TT method misses many high quality terms. KW, the least discriminating method in terms of quality, also provides better coverage than does TT. This result is consistent with our observation that TT identifies the highest quality terms, but there are very few of them: an average of 7 per 500 words compared to over 50 for HS and KW. Therefore there is a need for additional high quality terms. The list of HSs received a higher average rating than did the list of KWs, as shown in Figure 2 . This is consistent with our expectation that phrases containing more content-bearing modifiers would be perceived as more useful index terms than would single word phrases consisting only of heads.",
510
- "cite_spans": [],
511
- "ref_spans": [
512
- {
513
- "start": 13,
514
- "end": 21,
515
- "text": "Figure 3",
516
- "ref_id": null
517
- },
518
- {
519
- "start": 76,
520
- "end": 84,
521
- "text": "Figure 4",
522
- "ref_id": "FIGREF0"
523
- },
524
- {
525
- "start": 1090,
526
- "end": 1098,
527
- "text": "Figure 2",
528
- "ref_id": null
529
- }
530
- ],
531
- "eq_spans": [],
532
- "section": "Coverage",
533
- "sec_num": "5.2"
534
- },
535
- {
536
- "text": "The difference in the average ratings for the list of KWs and the list of head-sorted SNPs was less than expected. The small difference in average ratings for the HS list and the KW list can be explained, at least in part, by two factors: 1) Differences among professionals and students in inter-subject agreement and reliability; 2) A discrepancy in the rating of single word terms across term types.",
537
- "cite_spans": [],
538
- "ref_spans": [],
539
- "eq_spans": [],
540
- "section": "Ranking variability",
541
- "sec_num": "5.3"
542
- },
543
- {
544
- "text": "22 students and 7 professionals participated in the study. Figure 5 shows differences in the ratings of professionals and of students.",
545
- "cite_spans": [],
546
- "ref_spans": [
547
- {
548
- "start": 59,
549
- "end": 67,
550
- "text": "Figure 5",
551
- "ref_id": null
552
- }
553
- ],
554
- "eq_spans": [],
555
- "section": "Ranking variability",
556
- "sec_num": "5.3"
557
- },
558
- {
559
- "text": "Students 2.64 3.30 2.3 3.03 1.49 2.1",
560
- "cite_spans": [],
561
- "ref_spans": [],
562
- "eq_spans": [],
563
- "section": "Professionals",
564
- "sec_num": null
565
- },
566
- {
567
- "text": "When variation in the scores for terms was calculated using standard deviation, the standard deviation for the professionals was 0.78, while for the students it was 1.02. Because of the relatively low number of professionals, the standard deviation was calculated only over terms that were rated by more than one professional. A review of the students' results showed that they appeared not to be as careful as the professionals. For example, the phrase 'Wall Street Journal' was included on the HS list only because it is specified as the document source. However, four of the eight students assigned this term a high rating (1 or 2); this is puzzling because the document is about asbestos-related disease. The other four students assigned a 4 or 5 to 'Wall Street Journal', as we expected. But the average score for this term was 3, due to the anomalous ratings. We therefore have more confidence in the reliability of the professional ratings, even though there are relatively few of them. We examined some of the differences in rating for term types. Single word index terms are rated more highly by professionals when they appear in the context of other single word index terms, but are downrated in the context of phrasal expansions that make the meaning of the one-word term more specific. The KW list and HS list overlap when the SNP consists only of a single word (the head) or only of a head modified by determiners. When the same word appears in both lists in identical form, the token in the KW list tends to receive a better rating than the token does when it appears in the HS list, where it is often followed by expansions of the head. For example, the word exposure received an average rating of 2.2 when it appeared on the KW list, but a rating of only 2.75 on the HS list. However, the more specific phrase racial quotas, which immediately followed quota on the HS list received a rating of 1.",
568
- "cite_spans": [],
569
- "ref_spans": [],
570
- "eq_spans": [],
571
- "section": "Figure 5: Average ratings, by term type, of professionals and students",
572
- "sec_num": null
573
- },
574
- {
575
- "text": "To better understand these differences, we selected 40 multi-word phrases and examined the average score that the phrase received in the TT and HS lists, and compared it to the average ratings that individual words received in the KW list. We found that in about half of the cases (21 of 40), the phrase as a whole and the individual words in the phrase received similar scores, as in Example 1 in Figure 6 . In just over one-fourth of the cases (12 of 40), the phrase scored well, but scores from the individual words were rated from good to poor, as in Example 2. In about one-eighth of the cases (6 of 40), the phrase scored well, but the individual words scored poorly, as in Example 3. Finally, in only one case, shown in Example 4 of Figure 6 , the phrase scored poorly but the individual words scored well. This shows that single words in isolation are judged differently than the same word when presented in the context of a larger phrase. These results have important implications in the design of indexing tools.",
576
- "cite_spans": [],
577
- "ref_spans": [
578
- {
579
- "start": 398,
580
- "end": 406,
581
- "text": "Figure 6",
582
- "ref_id": "FIGREF1"
583
- },
584
- {
585
- "start": 740,
586
- "end": 748,
587
- "text": "Figure 6",
588
- "ref_id": "FIGREF1"
589
- }
590
- ],
591
- "eq_spans": [],
592
- "section": "Figure 5: Average ratings, by term type, of professionals and students",
593
- "sec_num": null
594
- },
595
- {
596
- "text": "Our results show that the head sorting technique outperforms two other indexing methods, technical terms and keywords, as measured by balance of quality and coverage. We have performed a qualitative evaluation of three techniques for identifying significant terms in a document, driven by an indexing task. Such an applicati;on can be used to create a profile or thumbnail of a document by presenting to users a set of terms which can be considered to be a representation of the content of the document. We have used human judges to evaluate the effectiveness of each method. This research is a contribution to the overall evaluation of computational linguistic tools in terms of their usefulness for human-oriented computational applications.",
597
- "cite_spans": [],
598
- "ref_spans": [],
599
- "eq_spans": [],
600
- "section": "Conclusion",
601
- "sec_num": "6."
602
- },
603
- {
604
- "text": "Boguraev, Branimir and Kennedy, Christopher (1998) \"Applications of term identification terminology: domain description and content characterisation\", Natural Language Engineering 1(1): 1-28. Church, Kenneth Ward (1988) \"A stochastic parts program and noun phrase parser for unrestricted text\", in Proceedings of the Second",
605
- "cite_spans": [],
606
- "ref_spans": [],
607
- "eq_spans": [],
608
- "section": "References",
609
- "sec_num": "8."
610
- }
611
- ],
612
- "back_matter": [],
613
- "bib_entries": {
614
- "BIBREF0": {
615
- "ref_id": "b0",
616
- "title": "on Applied Natural Language Processing",
617
- "authors": [],
618
- "year": null,
619
- "venue": "",
620
- "volume": "",
621
- "issue": "",
622
- "pages": "136--143",
623
- "other_ids": {},
624
- "num": null,
625
- "urls": [],
626
- "raw_text": "on Applied Natural Language Processing, pp. 136-143.",
627
- "links": null
628
- },
629
- "BIBREF1": {
630
- "ref_id": "b1",
631
- "title": "Noun-phrase analysis in unrestricted text for information retrieval",
632
- "authors": [
633
- {
634
- "first": "David",
635
- "middle": [
636
- "A"
637
- ],
638
- "last": "Evans",
639
- "suffix": ""
640
- },
641
- {
642
- "first": "Chengxiang",
643
- "middle": [],
644
- "last": "Zhai",
645
- "suffix": ""
646
- }
647
- ],
648
- "year": 1996,
649
- "venue": "Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics",
650
- "volume": "",
651
- "issue": "",
652
- "pages": "17--24",
653
- "other_ids": {},
654
- "num": null,
655
- "urls": [],
656
- "raw_text": "Evans, David A. and Chengxiang Zhai (1996) \"Noun-phrase analysis in unrestricted text for information retrieval\", Proceedings of the 34th Annual Meeting of the Association for Computational Linguistics, pp. 17-24.24-27 June 1996, University of California, Santa Cruz, California, Morgan Kaufmann Pub- lishers.",
657
- "links": null
658
- },
659
- "BIBREF2": {
660
- "ref_id": "b2",
661
- "title": "LinklT Documentation",
662
- "authors": [
663
- {
664
- "first": "David",
665
- "middle": [
666
- "K"
667
- ],
668
- "last": "Evans",
669
- "suffix": ""
670
- }
671
- ],
672
- "year": 1998,
673
- "venue": "",
674
- "volume": "",
675
- "issue": "",
676
- "pages": "",
677
- "other_ids": {},
678
- "num": null,
679
- "urls": [],
680
- "raw_text": "Evans, David K. (1998) LinklT Documentation, Columbia University Department of Com- puter Science Report.",
681
- "links": null
682
- },
683
- "BIBREF3": {
684
- "ref_id": "b3",
685
- "title": "Document processing with LinklT",
686
- "authors": [
687
- {
688
- "first": "David",
689
- "middle": [
690
- "K"
691
- ],
692
- "last": "Evans",
693
- "suffix": ""
694
- },
695
- {
696
- "first": "Judith",
697
- "middle": [],
698
- "last": "Klavans",
699
- "suffix": ""
700
- },
701
- {
702
- "first": "Wacholder",
703
- "middle": [],
704
- "last": "",
705
- "suffix": ""
706
- }
707
- ],
708
- "year": 2000,
709
- "venue": "RIAO Conference",
710
- "volume": "",
711
- "issue": "",
712
- "pages": "",
713
- "other_ids": {},
714
- "num": null,
715
- "urls": [],
716
- "raw_text": "Evans, David K., Klavans, Judith, and Wacholder, Nina (2000) \"Document processing with LinklT\", RIAO Conference, Paris, France, to appear.",
717
- "links": null
718
- },
719
- "BIBREF4": {
720
- "ref_id": "b4",
721
- "title": "Detecting text similarity over short passages: exploring linguistic feature combinations via machine learning",
722
- "authors": [
723
- {
724
- "first": "",
725
- "middle": [],
726
- "last": "Hatzivassiloglou",
727
- "suffix": ""
728
- },
729
- {
730
- "first": "Judith",
731
- "middle": [
732
- "L"
733
- ],
734
- "last": "Vasileios",
735
- "suffix": ""
736
- },
737
- {
738
- "first": "Eleazar",
739
- "middle": [],
740
- "last": "Klavans",
741
- "suffix": ""
742
- },
743
- {
744
- "first": "",
745
- "middle": [],
746
- "last": "Eskin",
747
- "suffix": ""
748
- }
749
- ],
750
- "year": 1999,
751
- "venue": "Proceedings of the EMNLP/VLC-99 Joint SIGDAT Conference on Empirical Methods in NLP and Very Large Corpora",
752
- "volume": "",
753
- "issue": "",
754
- "pages": "",
755
- "other_ids": {},
756
- "num": null,
757
- "urls": [],
758
- "raw_text": "Hatzivassiloglou, Vasileios, Judith L. Klavans and Eleazar Eskin (1999) \"Detecting text simi- larity over short passages: exploring linguis- tic feature combinations via machine learning\", Proceedings of the EMNLP/VLC- 99 Joint SIGDAT Conference on Empirical Methods in NLP and Very Large Corpora, June 21-22, 1999, University of Maryland, College Park, MD.",
759
- "links": null
760
- },
761
- "BIBREF5": {
762
- "ref_id": "b5",
763
- "title": "An automated system that assists in the generation of document indexes",
764
- "authors": [
765
- {
766
- "first": "Julia",
767
- "middle": [],
768
- "last": "Hedges",
769
- "suffix": ""
770
- },
771
- {
772
- "first": "Shiyun",
773
- "middle": [],
774
- "last": "Yie",
775
- "suffix": ""
776
- },
777
- {
778
- "first": "Ray",
779
- "middle": [],
780
- "last": "Reighart",
781
- "suffix": ""
782
- },
783
- {
784
- "first": "Lois",
785
- "middle": [],
786
- "last": "Boggess",
787
- "suffix": ""
788
- }
789
- ],
790
- "year": 1996,
791
- "venue": "Natural Language Engineering",
792
- "volume": "2",
793
- "issue": "2",
794
- "pages": "137--160",
795
- "other_ids": {},
796
- "num": null,
797
- "urls": [],
798
- "raw_text": "Hedges, Julia, Shiyun Yie, Ray Reighart and Lois Boggess (1996) \"An automated system that assists in the generation of document in- dexes\", Natural Language Engineering 2(2): 137-160.",
799
- "links": null
800
- },
801
- "BIBREF6": {
802
- "ref_id": "b6",
803
- "title": "X-bar Syntax: A Study of Phrase Structure",
804
- "authors": [
805
- {
806
- "first": "Ray",
807
- "middle": [],
808
- "last": "Jackendoff",
809
- "suffix": ""
810
- }
811
- ],
812
- "year": 1977,
813
- "venue": "",
814
- "volume": "",
815
- "issue": "",
816
- "pages": "",
817
- "other_ids": {},
818
- "num": null,
819
- "urls": [],
820
- "raw_text": "Jackendoff, Ray (1977) X-bar Syntax: A Study of Phrase Structure, MIT Press, Cambridge, MA.",
821
- "links": null
822
- },
823
- "BIBREF7": {
824
- "ref_id": "b7",
825
- "title": "Technical terminology: some linguistic properties and an algorithm for identification in text",
826
- "authors": [
827
- {
828
- "first": "John",
829
- "middle": [
830
- "S"
831
- ],
832
- "last": "Justeson",
833
- "suffix": ""
834
- },
835
- {
836
- "first": "M",
837
- "middle": [],
838
- "last": "Slava",
839
- "suffix": ""
840
- },
841
- {
842
- "first": "",
843
- "middle": [],
844
- "last": "Katz",
845
- "suffix": ""
846
- }
847
- ],
848
- "year": 1995,
849
- "venue": "Natural Language Engineering",
850
- "volume": "1",
851
- "issue": "1",
852
- "pages": "9--27",
853
- "other_ids": {},
854
- "num": null,
855
- "urls": [],
856
- "raw_text": "Justeson, John S. and Slava M. Katz (1995) \"Technical terminology: some linguistic properties and an algorithm for identification in text\", Natural Language Engineering 1(1):9-27.",
857
- "links": null
858
- },
859
- "BIBREF8": {
860
- "ref_id": "b8",
861
- "title": "The automatic creation of literature abstracts",
862
- "authors": [
863
- {
864
- "first": "Hans",
865
- "middle": [
866
- "P"
867
- ],
868
- "last": "Luhn",
869
- "suffix": ""
870
- }
871
- ],
872
- "year": 1958,
873
- "venue": "IBM Journal",
874
- "volume": "",
875
- "issue": "",
876
- "pages": "159--165",
877
- "other_ids": {},
878
- "num": null,
879
- "urls": [],
880
- "raw_text": "Luhn, Hans P. (1958) \"The automatic creation of literature abstracts\", IBM Journal, 159-165.",
881
- "links": null
882
- },
883
- "BIBREF9": {
884
- "ref_id": "b9",
885
- "title": "Inference in an authorship problem",
886
- "authors": [
887
- {
888
- "first": "Frederick",
889
- "middle": [],
890
- "last": "Mosteller",
891
- "suffix": ""
892
- },
893
- {
894
- "first": "David",
895
- "middle": [
896
- "L"
897
- ],
898
- "last": "Wallace",
899
- "suffix": ""
900
- }
901
- ],
902
- "year": 1963,
903
- "venue": "Journal of the American Statistical Association",
904
- "volume": "58",
905
- "issue": "302",
906
- "pages": "275--309",
907
- "other_ids": {},
908
- "num": null,
909
- "urls": [],
910
- "raw_text": "Mosteller, Frederick and David L. Wallace (1963) \"Inference in an authorship problem\", Jour- nal of the American Statistical Association 58(302):275-309. Available at http://www.jstor.org/.",
911
- "links": null
912
- },
913
- "BIBREF10": {
914
- "ref_id": "b10",
915
- "title": "Constructing literature abstracts by computer: techniques and prospects",
916
- "authors": [
917
- {
918
- "first": "Chris",
919
- "middle": [
920
- "D"
921
- ],
922
- "last": "Paice",
923
- "suffix": ""
924
- }
925
- ],
926
- "year": 1990,
927
- "venue": "Information Processing & Management",
928
- "volume": "26",
929
- "issue": "1",
930
- "pages": "171--186",
931
- "other_ids": {},
932
- "num": null,
933
- "urls": [],
934
- "raw_text": "Paice, Chris D. (1990) \"Constructing literature abstracts by computer: techniques and pros- pects\". Information Processing & Manage- ment 26(1): 171-186.",
935
- "links": null
936
- },
937
- "BIBREF11": {
938
- "ref_id": "b11",
939
- "title": "Automatic Text Processing: The Transformation, Analysis and Retrieval of lnformation by Computer",
940
- "authors": [
941
- {
942
- "first": "Gerald",
943
- "middle": [],
944
- "last": "Salton",
945
- "suffix": ""
946
- }
947
- ],
948
- "year": 1989,
949
- "venue": "",
950
- "volume": "",
951
- "issue": "",
952
- "pages": "",
953
- "other_ids": {},
954
- "num": null,
955
- "urls": [],
956
- "raw_text": "Salton, Gerald (1989) Automatic Text Processing: The Transformation, Analysis and Retrieval of lnformation by Computer. Addison- Wesley, Reading, MA.",
957
- "links": null
958
- },
959
- "BIBREF12": {
960
- "ref_id": "b12",
961
- "title": "Retrieving collocations from text",
962
- "authors": [
963
- {
964
- "first": "Frank",
965
- "middle": [],
966
- "last": "Smadja",
967
- "suffix": ""
968
- }
969
- ],
970
- "year": 1993,
971
- "venue": "Computational Linguistics",
972
- "volume": "19",
973
- "issue": "1",
974
- "pages": "143--177",
975
- "other_ids": {},
976
- "num": null,
977
- "urls": [],
978
- "raw_text": "Smadja, Frank (1993) \"Retrieving collocations from text\", Computational Linguistics 19(1):143-177.",
979
- "links": null
980
- },
981
- "BIBREF13": {
982
- "ref_id": "b13",
983
- "title": "Building effective queries in natural language information retrieval",
984
- "authors": [
985
- {
986
- "first": "Thomas",
987
- "middle": [],
988
- "last": "Strzalkowski",
989
- "suffix": ""
990
- }
991
- ],
992
- "year": 1997,
993
- "venue": "Proceedings of the ANLP, ACL",
994
- "volume": "",
995
- "issue": "",
996
- "pages": "299--306",
997
- "other_ids": {},
998
- "num": null,
999
- "urls": [],
1000
- "raw_text": "Strzalkowski, Thomas (1997) \"Building effective queries in natural language information re- trieval\", Proceedings of the ANLP, ACL, Washington, DC., pp.299-306.",
1001
- "links": null
1002
- },
1003
- "BIBREF14": {
1004
- "ref_id": "b14",
1005
- "title": "Simplex NPS sorted by head: a method for identifying significant topics within a document",
1006
- "authors": [
1007
- {
1008
- "first": "Nina",
1009
- "middle": [],
1010
- "last": "Wacholder",
1011
- "suffix": ""
1012
- }
1013
- ],
1014
- "year": 1998,
1015
- "venue": "Proceedings of the Workshop on the Computational Treatment of Nominals",
1016
- "volume": "",
1017
- "issue": "",
1018
- "pages": "70--79",
1019
- "other_ids": {},
1020
- "num": null,
1021
- "urls": [],
1022
- "raw_text": "Wacholder, Nina (1998) \"Simplex NPS sorted by head: a method for identifying significant topics within a document\", Proceedings of the Workshop on the Computational Treat- ment of Nominals, pp.70-79. COLING-ACL '98, Montreal, Canada, August 16, 1998.",
1023
- "links": null
1024
- }
1025
- },
1026
- "ref_entries": {
1027
- "FIGREF0": {
1028
- "text": "Running total of terms identified at or below a specified rank",
1029
- "type_str": "figure",
1030
- "uris": null,
1031
- "num": null
1032
- },
1033
- "FIGREF1": {
1034
- "text": "Comparison of scores of phrases and single words",
1035
- "type_str": "figure",
1036
- "uris": null,
1037
- "num": null
1038
- }
1039
- }
1040
- }
1041
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1043.json DELETED
@@ -1,1028 +0,0 @@
1
- {
2
- "paper_id": "A00-1043",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:53.345296Z"
6
- },
7
- "title": "Sentence Reduction for Automatic Text Summarization",
8
- "authors": [
9
- {
10
- "first": "Hongyan",
11
- "middle": [],
12
- "last": "Jing",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Columbia University New York",
17
- "location": {
18
- "postCode": "10027",
19
- "region": "NY",
20
- "country": "USA"
21
- }
22
- },
23
- "email": "[email protected]"
24
- }
25
- ],
26
- "year": "",
27
- "venue": null,
28
- "identifiers": {},
29
- "abstract": "We present a novel sentence reduction system for automatically removing extraneous phrases from sentences that are extracted from a document for summarization purpose. The system uses multiple sources of knowledge to decide which phrases in an extracted sentence can be removed, including syntactic knowledge, context information, and statistics computed from a corpus which consists of examples written by human professionals. Reduction can significantly improve the conciseness of automatic summaries.",
30
- "pdf_parse": {
31
- "paper_id": "A00-1043",
32
- "_pdf_hash": "",
33
- "abstract": [
34
- {
35
- "text": "We present a novel sentence reduction system for automatically removing extraneous phrases from sentences that are extracted from a document for summarization purpose. The system uses multiple sources of knowledge to decide which phrases in an extracted sentence can be removed, including syntactic knowledge, context information, and statistics computed from a corpus which consists of examples written by human professionals. Reduction can significantly improve the conciseness of automatic summaries.",
36
- "cite_spans": [],
37
- "ref_spans": [],
38
- "eq_spans": [],
39
- "section": "Abstract",
40
- "sec_num": null
41
- }
42
- ],
43
- "body_text": [
44
- {
45
- "text": "Current automatic summarizers usually rely on sentence extraction to produce summaries. Human professionals also often reuse the input documents to generate summaries; however, rather than simply extracting sentences and stringing them together, as most current summarizers do, humans often \"edit\" the extracted sentences in some way so that the resulting summary is concise and coherent. We analyzed a set of articles and identified six major operations that can be used for editing the extracted sentences, including removing extraneous phrases from an extracted sentence, combining a reduced sentence with other sentences, syntactic transformation, substituting phrases in an extracted sentence with their paraphrases, substituting phrases with more general or specific descriptions, and reordering the extracted sentences (Jing and McKeown, 1999; Jing and McKeown, 2000) .",
46
- "cite_spans": [
47
- {
48
- "start": 826,
49
- "end": 850,
50
- "text": "(Jing and McKeown, 1999;",
51
- "ref_id": "BIBREF5"
52
- },
53
- {
54
- "start": 851,
55
- "end": 874,
56
- "text": "Jing and McKeown, 2000)",
57
- "ref_id": "BIBREF6"
58
- }
59
- ],
60
- "ref_spans": [],
61
- "eq_spans": [],
62
- "section": "Motivation",
63
- "sec_num": null
64
- },
65
- {
66
- "text": "We call the operation of removing extraneous phrases from an extracted sentence sentence reduction. It is one of the most effective operations that can be used to edit the extracted sentences. Reduction can remove material at any granularity: a word, a prepositional phrase, a gerund, a to-infinitive or a clause. We use the term \"phrase\" here to refer to any of the above components that can be removed in reduction. The following example shows an original sentence and its reduced form written by a human professional:",
67
- "cite_spans": [],
68
- "ref_spans": [],
69
- "eq_spans": [],
70
- "section": "Motivation",
71
- "sec_num": null
72
- },
73
- {
74
- "text": "Original sentence:",
75
- "cite_spans": [],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "Motivation",
79
- "sec_num": null
80
- },
81
- {
82
- "text": "When it arrives sometime next year in new TV sets, the V-chip will give parents a new and potentially revolutionary device to block out programs they don't want their children to see.",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "Motivation",
87
- "sec_num": null
88
- },
89
- {
90
- "text": "Reduced sentence by humans: The V-chip will give parents a device to block out programs they don't want their children to see.",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "Motivation",
95
- "sec_num": null
96
- },
97
- {
98
- "text": "We implemented an automatic sentence reduction system. Input to the reduction system includes extracted sentences, as well as the original document. Output of reduction are reduced forms of the extracted sentences, which can either be used to produce summaries directly, or be merged with other sentences. The reduction system uses multiple sources of knowledge to make reduction decisions, including syntactic knowledge, context, and statistics computed from a training corpus. We evaluated the system against the output of human professionals. The program achieved a success rate of 81.3%, meaning that 81.3% of reduction decisions made by the system agreed with those of humans.",
99
- "cite_spans": [],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "Motivation",
103
- "sec_num": null
104
- },
105
- {
106
- "text": "Sentence reduction improves the conciseness of automatically generated summaries, making it concise and on target. It can also improve the coherence of generated summaries, since extraneous phrases that can potentially introduce incoherece are removed. We collected 500 sentences and their corresponding reduced forms written by humans, and found that humans reduced the length of these 500 sentences by 44.2% on average. This indicates that a good sentence reduction system can improve the conciseness of generated summaries significantly.",
107
- "cite_spans": [],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "Motivation",
111
- "sec_num": null
112
- },
113
- {
114
- "text": "In the next section, we describe the sentence reduction algorithm in details. In Section 3, we introduce the evaluation scheme used to access the performance of the system and present evaluation results. In Section 4, we discuss other applications of sentence reduction, the interaction between reduction and other modules in a summarization system, and related work on sentence simplication. Finally, we conclude with future work.",
115
- "cite_spans": [],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "Motivation",
119
- "sec_num": null
120
- },
121
- {
122
- "text": "Sentence reduction based on multiple sources of knowledge The goal of sentence reduction is to \"reduce without major loss\"; that is, we want to remove as many extraneous phrases as possible from an extracted sentence so that it can be concise, but without detracting from the main idea the sentence conveys. Ideally, we want to remove a phrase from an extracted sentence only if it is irrelevant to the main topic. To achieve this, the system relies on multiple sources of knowledge to make reduction decisions. We first introduce the resources in the system and then describe the reduction algorithm.",
123
- "cite_spans": [],
124
- "ref_spans": [],
125
- "eq_spans": [],
126
- "section": "2",
127
- "sec_num": null
128
- },
129
- {
130
- "text": "(1) The corpus.",
131
- "cite_spans": [],
132
- "ref_spans": [],
133
- "eq_spans": [],
134
- "section": "The resources",
135
- "sec_num": "2.1"
136
- },
137
- {
138
- "text": "One of the key features of the system is that it uses a corpus consisting of original sentences and their corresponding reduced forms written by humans for training and testing purpose. This corpus was created using an automatic program we have developed to automatically analyze human-written abstracts.",
139
- "cite_spans": [],
140
- "ref_spans": [],
141
- "eq_spans": [],
142
- "section": "The resources",
143
- "sec_num": "2.1"
144
- },
145
- {
146
- "text": "The program, called the decomposition program, matches phrases in a human-written summary sentence to phrases in the original document (Jing and McKeown, 1999) . The human-written abstracts were collected from the free daily news service \"Communicationsrelated headlines\", provided by the Benton Foundation (http://www.benton.org). The articles in the corpus are news reports on telecommunication related issues, but they cover a wide range of topics, such as law, labor, and company mergers.",
147
- "cite_spans": [
148
- {
149
- "start": 135,
150
- "end": 159,
151
- "text": "(Jing and McKeown, 1999)",
152
- "ref_id": "BIBREF5"
153
- }
154
- ],
155
- "ref_spans": [],
156
- "eq_spans": [],
157
- "section": "The resources",
158
- "sec_num": "2.1"
159
- },
160
- {
161
- "text": "(2) The lexicon. The system also uses a largescale, reusable lexicon we combined from multiple resources (Jing and McKeown, 1998) . The resources that were combined include COMLEX syntactic dictionary (Macleod and Grishman, 1995) , English Verb Classes and Alternations (Levin, 1993) , the WordNet lexical database (Miller et al., 1990) , the Brown Corpus tagged with WordNet senses (Miller et al., 1993) . The lexicon includes subcategorizations for over 5,000 verbs. This information is used to identify the obligatory arguments of verb phrases.",
162
- "cite_spans": [
163
- {
164
- "start": 105,
165
- "end": 129,
166
- "text": "(Jing and McKeown, 1998)",
167
- "ref_id": "BIBREF4"
168
- },
169
- {
170
- "start": 201,
171
- "end": 229,
172
- "text": "(Macleod and Grishman, 1995)",
173
- "ref_id": null
174
- },
175
- {
176
- "start": 270,
177
- "end": 283,
178
- "text": "(Levin, 1993)",
179
- "ref_id": "BIBREF7"
180
- },
181
- {
182
- "start": 315,
183
- "end": 336,
184
- "text": "(Miller et al., 1990)",
185
- "ref_id": "BIBREF10"
186
- },
187
- {
188
- "start": 383,
189
- "end": 404,
190
- "text": "(Miller et al., 1993)",
191
- "ref_id": "BIBREF11"
192
- }
193
- ],
194
- "ref_spans": [],
195
- "eq_spans": [],
196
- "section": "The resources",
197
- "sec_num": "2.1"
198
- },
199
- {
200
- "text": "(3) The WordNet lexical database. Word-Net (Miller et al., 1990 ) is the largest lexical database to date.",
201
- "cite_spans": [
202
- {
203
- "start": 43,
204
- "end": 63,
205
- "text": "(Miller et al., 1990",
206
- "ref_id": "BIBREF10"
207
- }
208
- ],
209
- "ref_spans": [],
210
- "eq_spans": [],
211
- "section": "The resources",
212
- "sec_num": "2.1"
213
- },
214
- {
215
- "text": "It provides lexical relations between words, including synonymy, antonymy, meronymy, entailment (e.g., eat --+ chew), or causation (e.g., kill --4 die). These lexical links are used to identify the focus in the local context.",
216
- "cite_spans": [],
217
- "ref_spans": [],
218
- "eq_spans": [],
219
- "section": "The resources",
220
- "sec_num": "2.1"
221
- },
222
- {
223
- "text": "(4) The syntactic parser. We use the English Slot Grammar(ESG) parser developed at IBM (Mc-Cord, 1990) to analyze the syntactic structure of an input sentence and produce a sentence parse tree. The ESG parser not only annotates the syntactic category of a phrase (e.g., \"np\" or \"vp\"), it also annotates the thematic role of a phrase (e.g., \"subject\" or \"object\").",
224
- "cite_spans": [
225
- {
226
- "start": 87,
227
- "end": 102,
228
- "text": "(Mc-Cord, 1990)",
229
- "ref_id": null
230
- }
231
- ],
232
- "ref_spans": [],
233
- "eq_spans": [],
234
- "section": "The resources",
235
- "sec_num": "2.1"
236
- },
237
- {
238
- "text": "There are five steps in the reduction program:",
239
- "cite_spans": [],
240
- "ref_spans": [],
241
- "eq_spans": [],
242
- "section": "The algorithm",
243
- "sec_num": "2.2"
244
- },
245
- {
246
- "text": "Step 1: Syntactic parsing. We first parse the input sentence using the ESG parser and produce the sentence parse tree. The operations in all other steps are performed based on this parse tree. Each following step annotates each node in the parse tree with additional information, such as syntactic or context importance, which are used later to determine which phrases (they are represented as subtrees in a parse tree) can be considered extraneous and thus removed.",
247
- "cite_spans": [],
248
- "ref_spans": [],
249
- "eq_spans": [],
250
- "section": "The algorithm",
251
- "sec_num": "2.2"
252
- },
253
- {
254
- "text": "Step 2: Grammar checking.",
255
- "cite_spans": [],
256
- "ref_spans": [],
257
- "eq_spans": [],
258
- "section": "The algorithm",
259
- "sec_num": "2.2"
260
- },
261
- {
262
- "text": "In this step, we determine which components of a sentence must not be deleted to keep the sentence grammatical. To do this, we traverse the parse tree produced in the first step in top-down order and mark, for each node in the parse tree, which of its children are grammatically obligatory. We use two sources of knowledge for this purpose. One source includes simple, linguistic-based rules that use the thematic role structure produced by the ESG parser. For instance, for a sentence, the main verb, the subject, and the object(s) are essential if they exist, but a prepositional phrase is not; for a noun phrase, the head noun is essential, but an adjective modifier of the head noun is not. The other source we rely on is the large-scale lexicon we described earlier. The information in the lexicon is used to mark the obligatory arguments of verb phrases. For example, for the verb \"convince\", the lexicon has the following entry: This entry indicates that the verb \"convince\" can be followed by a noun phrase and a prepositional phrase starting with the preposition \"of\" (e.g., he convinced me of his innocence). It can also be followed by a noun phrase and a to-infinitive phrase (e.g., he convinced me to go to the party). This information prevents the system from deleting the \"of\" prepositional phrase or the to-infinitive that is part of the verb phrase.",
263
- "cite_spans": [],
264
- "ref_spans": [],
265
- "eq_spans": [],
266
- "section": "The algorithm",
267
- "sec_num": "2.2"
268
- },
269
- {
270
- "text": "At the end of this step, each node in the parse tree --including both leaf nodes and intermediate nodes --is annotated with a value indicating whether it is grammatically obligatory. Note that whether a node is obligatory is relative to its parent node only. For example, whether a determiner is obligatory is relative to the noun phrase it is in; whether a prepositional phrase is obligatory is relative to the sentence or the phrase it is in.",
271
- "cite_spans": [],
272
- "ref_spans": [],
273
- "eq_spans": [],
274
- "section": "The algorithm",
275
- "sec_num": "2.2"
276
- },
277
- {
278
- "text": "Step 3: Context information.",
279
- "cite_spans": [],
280
- "ref_spans": [],
281
- "eq_spans": [],
282
- "section": "The algorithm",
283
- "sec_num": "2.2"
284
- },
285
- {
286
- "text": "In this step, the system decides which components in the sentence are most related to the main topic being discussed. To measure the importance of a phrase in the local context, the system relies on lexical links between words. The hypothesis is that the more connected a word is with other words in the local context, the more likely it is to be the focus of the local context. We link the words in the extracted sentence with words in its local context, if they are repetitions, morphologically related, or linked in WordNet through one of the lexical relations. The system then computes an importance score for each word in the extracted sentence, based on the number of links it has with other words and the types of links. The formula for computing the context importance score for a word w is as follows:",
287
- "cite_spans": [],
288
- "ref_spans": [],
289
- "eq_spans": [],
290
- "section": "The algorithm",
291
- "sec_num": "2.2"
292
- },
293
- {
294
- "text": "9 ContextWeight(w) = ~-~(Li x NUMi(w)) i-----1",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "The algorithm",
299
- "sec_num": "2.2"
300
- },
301
- {
302
- "text": "Here, i represents the different types of lexical relations the system considered, including repetition, inflectional relation, derivational relation, and the lexical relations from WordNet. We assigned a weight to each type of lexical relation, represented by Li in the formula. Relations such as repetition or inflectional relation are considered more important and are assigned higher weights, while relations such as hypernym are considered less important and assigned lower weights. NUMi(w) in the formula represents the number of a particular type of lexical links the word w has with words in the local context.",
303
- "cite_spans": [],
304
- "ref_spans": [],
305
- "eq_spans": [],
306
- "section": "The algorithm",
307
- "sec_num": "2.2"
308
- },
309
- {
310
- "text": "After an importance score is computed for each word, each phrase in the \"sentence gets a score by adding up the scores of its children nodes in the parse tree. This score indicates how important the phrase is in the local context.",
311
- "cite_spans": [],
312
- "ref_spans": [],
313
- "eq_spans": [],
314
- "section": "The algorithm",
315
- "sec_num": "2.2"
316
- },
317
- {
318
- "text": "Step 4: Corpus evidence.",
319
- "cite_spans": [],
320
- "ref_spans": [],
321
- "eq_spans": [],
322
- "section": "The algorithm",
323
- "sec_num": "2.2"
324
- },
325
- {
326
- "text": "The program uses a corpus consisting of sentences reduced by human professionals and their corresponding original sentences to compute how likely humans remove a certain phrase. The system first parsed the sentences in the corpus using ESG parser. It then marked which subtrees in these parse trees (i.e., phrases in the sentences) were removed by humans. Using this corpus of marked parse trees, we can compute how likely a subtree is removed from its parent node. For example, we can compute the probability that the \"when\" temporal clause is removed when the main verb is \"give\", represented as Prob(\"when-clause is removed\"l\"v=give\"), or the probability that the to-infinitive modifier of the head noun \"device\" is removed, represented as",
327
- "cite_spans": [],
328
- "ref_spans": [],
329
- "eq_spans": [],
330
- "section": "The algorithm",
331
- "sec_num": "2.2"
332
- },
333
- {
334
- "text": "Prob(\"to-infinitive modifier is removed\"l\"n=device\").",
335
- "cite_spans": [],
336
- "ref_spans": [],
337
- "eq_spans": [],
338
- "section": "The algorithm",
339
- "sec_num": "2.2"
340
- },
341
- {
342
- "text": "These probabilities are computed using Bayes's rule. For example, the probability that the \"when\" temporal clause is removed when the main verb is \"give\", Prob(\"when-clause is removed'l\"v=give'), is computed as the product of Prob(\"v=give\"[\"when-clause is removed\") (i.e., the probability that the main verb is \"give\" when the \"when\" clause is removed) and",
343
- "cite_spans": [],
344
- "ref_spans": [],
345
- "eq_spans": [],
346
- "section": "The algorithm",
347
- "sec_num": "2.2"
348
- },
349
- {
350
- "text": "Prob(\"when-clause is removed\") (i.e., the probability that the \"when\" clause is removed), divided by Prob(\"v=give\") (i.e., the probability that the main verb is \"give\").",
351
- "cite_spans": [],
352
- "ref_spans": [],
353
- "eq_spans": [],
354
- "section": "The algorithm",
355
- "sec_num": "2.2"
356
- },
357
- {
358
- "text": "Besides computing the probability that a phrase is removed, we also compute two other types of probabilities: the probability that a phrase is reduced (i.e., the phrase is not removed as a whole, but some components in the phrase are removed), and the probability that a phrase is unchanged at all (i.e., neither removed nor reduced).",
359
- "cite_spans": [],
360
- "ref_spans": [],
361
- "eq_spans": [],
362
- "section": "The algorithm",
363
- "sec_num": "2.2"
364
- },
365
- {
366
- "text": "These corpus probabilities help us capture human practice. For example, for sentences like \"The agency reported that ...\", \"The other source says that ...\", \"The new study suggests that ...', the thatclause following the say-verb (i.e., report, say, and suggest) in each sentence is very rarely changed at all by professionals. The system can capture this human practice, since the probability that that-clause of the verb say or report being unchanged at all will be relatively high, which will help the system to avoid removing components in the that-clause. These corpus probabilities are computed beforehand using a training corpus. They are then stored in a table and loaded at running time.",
367
- "cite_spans": [],
368
- "ref_spans": [],
369
- "eq_spans": [],
370
- "section": "The algorithm",
371
- "sec_num": "2.2"
372
- },
373
- {
374
- "text": "Step 5: Final Decision.",
375
- "cite_spans": [],
376
- "ref_spans": [],
377
- "eq_spans": [],
378
- "section": "The algorithm",
379
- "sec_num": "2.2"
380
- },
381
- {
382
- "text": "The final reduction decisions are based on the results from all the earlier steps. To decide which phrases to remove, the system traverses the sentence parse tree, which now have been annotated with different types of information from earlier steps, in the top-down order and decides which subtrees should be removed, reduced or unchanged. A subtree (i.e., a phrase) is removed only if it is not grammatically obligatory, not the focus of the local context (indicated by a low importance score), and has a reasonable probability of being removed by humans. Figure 1 shows sample output of the reduction program. The reduced sentences produced by humans are also provided for comparison.",
383
- "cite_spans": [],
384
- "ref_spans": [
385
- {
386
- "start": 557,
387
- "end": 565,
388
- "text": "Figure 1",
389
- "ref_id": "FIGREF1"
390
- }
391
- ],
392
- "eq_spans": [],
393
- "section": "The algorithm",
394
- "sec_num": "2.2"
395
- },
396
- {
397
- "text": "3 Evaluation",
398
- "cite_spans": [],
399
- "ref_spans": [],
400
- "eq_spans": [],
401
- "section": "The algorithm",
402
- "sec_num": "2.2"
403
- },
404
- {
405
- "text": "We define a measure called success rate to evaluate the performance of our sentence reduction program.",
406
- "cite_spans": [],
407
- "ref_spans": [],
408
- "eq_spans": [],
409
- "section": "The evaluation scheme",
410
- "sec_num": "3.1"
411
- },
412
- {
413
- "text": "Example 1: Original sentence : When it arrives sometime next year in new TV sets, the V-chip will give parents a new and potentially revolutionary device to block out programs they don't want their children to see.",
414
- "cite_spans": [],
415
- "ref_spans": [],
416
- "eq_spans": [],
417
- "section": "The evaluation scheme",
418
- "sec_num": "3.1"
419
- },
420
- {
421
- "text": "Reduction program: The V-chip will give parents a new and potentially revolutionary device to block out programs they don't want their children to see.",
422
- "cite_spans": [],
423
- "ref_spans": [],
424
- "eq_spans": [],
425
- "section": "The evaluation scheme",
426
- "sec_num": "3.1"
427
- },
428
- {
429
- "text": ": The V-chip will give parents a device to block out programs they don't want their children to see. The success rate computes the percentage of system's reduction decisions that agree with those of humans.",
430
- "cite_spans": [],
431
- "ref_spans": [],
432
- "eq_spans": [],
433
- "section": "Professionals",
434
- "sec_num": null
435
- },
436
- {
437
- "text": "We compute the success rate in the following way. The reduction process can be considered as a series of decision-making process along the edges of a sentence parse tree. At each node of the parse tree, both the human and the program make a decision whether to remove the node or to keep it. If a node is removed, the subtree with that node as the root is removed as a whole, thus no decisions are needed for the descendants of the removed node. If the node is kept, we consider that node as the root and repeat this process. Suppose we have an input sentence (ABCDE-FGH), which has a parse tree shown in Figure 2 . Suppose a human reduces the sentence to (ABDGH), which can be translated to a series of decisions made along edges in the sentence parse tree as shown in Figure 3 . The symbol \"y\" along an edge means the node it points to will be kept, and \"n\" means the node will be removed. Suppose the program reduces the sentence to (BCD), which can be translated similarly to the annotated tree shown in Figure 4 .",
438
- "cite_spans": [],
439
- "ref_spans": [
440
- {
441
- "start": 605,
442
- "end": 613,
443
- "text": "Figure 2",
444
- "ref_id": "FIGREF3"
445
- },
446
- {
447
- "start": 770,
448
- "end": 778,
449
- "text": "Figure 3",
450
- "ref_id": null
451
- },
452
- {
453
- "start": 1008,
454
- "end": 1016,
455
- "text": "Figure 4",
456
- "ref_id": "FIGREF2"
457
- }
458
- ],
459
- "eq_spans": [],
460
- "section": "Professionals",
461
- "sec_num": null
462
- },
463
- {
464
- "text": "We can see that along five edges (they are D--+B, D--+E, D--+G, B--+A, B-+C), both the human and the program made decisions. Two out of the five decisions agree (they are D--+B and D--4E), so the success rate is 2/5 (40%). The success rate is defined as:",
465
- "cite_spans": [],
466
- "ref_spans": [],
467
- "eq_spans": [],
468
- "section": "Professionals",
469
- "sec_num": null
470
- },
471
- {
472
- "text": "# of edges along which the human and the program have made success rate = the same decision the total # of edges along which both the human and the progam have made decisions",
473
- "cite_spans": [],
474
- "ref_spans": [],
475
- "eq_spans": [],
476
- "section": "Professionals",
477
- "sec_num": null
478
- },
479
- {
480
- "text": "Note that the edges along which only the human or the program has made a decision (e.g., G--+F and G--+F in Figure 3 and Figure 4) are not considered in the computation of success rate, since there is no agreement issue in such cases.",
481
- "cite_spans": [],
482
- "ref_spans": [
483
- {
484
- "start": 108,
485
- "end": 116,
486
- "text": "Figure 3",
487
- "ref_id": null
488
- },
489
- {
490
- "start": 121,
491
- "end": 130,
492
- "text": "Figure 4)",
493
- "ref_id": "FIGREF2"
494
- }
495
- ],
496
- "eq_spans": [],
497
- "section": "Professionals",
498
- "sec_num": null
499
- },
500
- {
501
- "text": "In the evaluation, we used 400 sentences in the corpus to compute the probabilities that a phrase is removed, reduced, or unchanged. We tested the program on the rest 100 sentences.",
502
- "cite_spans": [],
503
- "ref_spans": [],
504
- "eq_spans": [],
505
- "section": "Evaluation result",
506
- "sec_num": "3.2"
507
- },
508
- {
509
- "text": "Using five-fold validation (i.e., chose different 100 sentences for testing each time and repeating the experiment five times), The program achieved an average success rate of 81.3%. If we consider the baseline as removing all the prepositional phrases, clauses, to-infinitives and gerunds, the baseline performance is 43.2%.",
510
- "cite_spans": [],
511
- "ref_spans": [],
512
- "eq_spans": [],
513
- "section": "Evaluation result",
514
- "sec_num": "3.2"
515
- },
516
- {
517
- "text": "We also computed the success rate of program's decisions on particular types of phrases. For the decisions on removing or keeping a clause, the system has a success rate of 78.1%; for the decisions on removing or keeping a to-infinitive, the system has a success rate of 85.2%. We found out that the system has a low success rate on removing adjectives of noun phrases or removing adverbs of a sentence or a verb phrase. One reason for this is that our probability model can hardly capture the dependencies between a particular adjective and the head noun since the training corpus is not large enough, while the other sources of information, including grammar or context information, provide little evidence on whether an adjective or an adverb should be removed. Given that whether or not an adjective or an adverb is removed does not affect the conciseness of the sentence significantly and the system lacks of reliability in making such decisions, we decide not to remove adjectives and adverbs.",
518
- "cite_spans": [],
519
- "ref_spans": [],
520
- "eq_spans": [],
521
- "section": "Evaluation result",
522
- "sec_num": "3.2"
523
- },
524
- {
525
- "text": "On average, the system reduced the length of the 500 sentence by 32.7% (based on the number of words), while humans reduced it by 41.8%.",
526
- "cite_spans": [],
527
- "ref_spans": [],
528
- "eq_spans": [],
529
- "section": "Evaluation result",
530
- "sec_num": "3.2"
531
- },
532
- {
533
- "text": "The probabilities we computed from the training corpus covered 58% of instances in the test corpus. When the corpus probability is absent for a case, the system makes decisions based on the other two sources of knowledge.",
534
- "cite_spans": [],
535
- "ref_spans": [],
536
- "eq_spans": [],
537
- "section": "Evaluation result",
538
- "sec_num": "3.2"
539
- },
540
- {
541
- "text": "Some of the errors made by the system result from the errors by the syntactic parser. We randomly checked 50 sentences, and found that 8% of the errors made by the system are due to parsing errors. There are two main reasons responsible for this relative low percentage of errors resulted from mistakes in parsing. One reason is that we have taken some special measures to avoid errors introduced by mistakes in parsing. For example, PP attachment is a difficult problem in parsing and it is not rare that a PP is wrongly attached. Therefore, we take this into account when marking the obligatory components using subcategorization knowledge from the lexicon (step 2) -we not only look at the PPs that are attached to a verb phrase, but also PPs that are next to the verb phrase but not attached, in case it is part of the verb phrase. We also wrote a preprocessor to deal with particular structures that the parser often has problems with, such as appositions. The other reason is that parsing errors do not always result in reduction errors. For example, given a sentence \"The spokesperson of the University said that ...', although that-clause in the sentence may have a complicated structure and the parser gets it wrong, the reduction system is not necessarily affected since it may decide in this case to keep that-clause as it is, as humans often do, so the parsing errors will not matter in this example.",
542
- "cite_spans": [],
543
- "ref_spans": [],
544
- "eq_spans": [],
545
- "section": "Evaluation result",
546
- "sec_num": "3.2"
547
- },
548
- {
549
- "text": "The reduction algorithm we present assumes generic summarization; that is, we want to generate a summary that includes the most important information in an article. We can tailor the reduction system to queries-based summarization. In that case, the task of the reduction is not to remove phrases that are extraneous in terms of the main topic of an article, but phrases that are not very relevant to users' queries. We extended our sentence reduction program to query-based summarization by adding another step in the algorithm to measure the relevance of users' queries to phrases in the sentence. In the last step of reduction when the system makes the final decision, the relevance of a phrase to the query is taken into account, together with syntactic, context, and corpus information.",
550
- "cite_spans": [],
551
- "ref_spans": [],
552
- "eq_spans": [],
553
- "section": "Discussion and related work",
554
- "sec_num": "4"
555
- },
556
- {
557
- "text": "Ideally, the sentence reduction module should interact with other modules in a summarization system. It should be able to send feedback to the extraction module if it finds that a sentence selected by the extraction module may be inappropriate (for example, having a very low context importance score). It should also be able to interact with the modules that run after it, such as the sentence combination module, so that it can revise reduction decisions according to the feedback from these modules.",
558
- "cite_spans": [],
559
- "ref_spans": [],
560
- "eq_spans": [],
561
- "section": "Discussion and related work",
562
- "sec_num": "4"
563
- },
564
- {
565
- "text": "Some researchers suggested removing phrases or clauses from sentences for certain applications. (Grefenstette, 1998) proposed to remove phrases in sentences to produce a telegraphic text that can be used to provide audio scanning service for the blind. (Corston-Oliver and Dolan, 1999) proposed to remove clauses in sentences before indexing documents for Information Retrieval. Both studies removed phrases based only on their syntactic categories, while the focus of our system is on deciding when it is appropriate to remove a phrase.",
566
- "cite_spans": [
567
- {
568
- "start": 96,
569
- "end": 116,
570
- "text": "(Grefenstette, 1998)",
571
- "ref_id": "BIBREF3"
572
- },
573
- {
574
- "start": 253,
575
- "end": 285,
576
- "text": "(Corston-Oliver and Dolan, 1999)",
577
- "ref_id": "BIBREF2"
578
- }
579
- ],
580
- "ref_spans": [],
581
- "eq_spans": [],
582
- "section": "Discussion and related work",
583
- "sec_num": "4"
584
- },
585
- {
586
- "text": "Other researchers worked on the text simplifica-tion problem, which usually involves in simplifying text but not removing any phrases. For example, (Carroll et al., 1998) discussed simplifying newspaper text by replacing uncommon words with common words, or replacing complicated syntactic structures with simpler structures to assist people with reading disabilities. (Chandrasekar et al., 1996) discussed text simplification in general. The difference between these studies on text simplification and our system is that a text simplification system usually does not remove anything from an original sentence, although it may change its structure or words, but our system removes extraneous phrases from the extracted sentences.",
587
- "cite_spans": [
588
- {
589
- "start": 148,
590
- "end": 170,
591
- "text": "(Carroll et al., 1998)",
592
- "ref_id": "BIBREF0"
593
- },
594
- {
595
- "start": 369,
596
- "end": 396,
597
- "text": "(Chandrasekar et al., 1996)",
598
- "ref_id": "BIBREF1"
599
- }
600
- ],
601
- "ref_spans": [],
602
- "eq_spans": [],
603
- "section": "Discussion and related work",
604
- "sec_num": "4"
605
- },
606
- {
607
- "text": "Conclusions and future work We present a novel sentence reduction system which removes extraneous phrases from sentences that are extracted from an article in text summarization. The deleted phrases can be prepositional phrases, clauses, to-infinitives, or gerunds, and multiple phrases can be removed form a single sentence. The focus of this work is on determining, for a sentence in a particular context, which phrases in the sentence are less important and can be removed. Our system makes intelligent reduction decisions based on multiple sources of knowledge, including syntactic knowledge, context, and probabilities computed from corpus analysis. We also created a corpus consisting of 500 sentences and their reduced forms produced by human professionals, and used this corpus for training and testing the system. The evaluation shows that 81.3% of reduction decisions made by the system agreed with those of humans.",
608
- "cite_spans": [],
609
- "ref_spans": [],
610
- "eq_spans": [],
611
- "section": "5",
612
- "sec_num": null
613
- },
614
- {
615
- "text": "In the future, we would like to integrate our sentence reduction system with extraction-based summarization systems other than the one we have developed, improve the performance of the system further by introducing other sources of knowledge necessary for reduction, and explore other interesting applications of the reduction system.",
616
- "cite_spans": [],
617
- "ref_spans": [],
618
- "eq_spans": [],
619
- "section": "5",
620
- "sec_num": null
621
- }
622
- ],
623
- "back_matter": [
624
- {
625
- "text": "This material is based upon work supported by the National Science Foundation under Grant No. IRI 96-19124 and IRI 96-18797. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation.",
626
- "cite_spans": [],
627
- "ref_spans": [],
628
- "eq_spans": [],
629
- "section": "Acknowledgment",
630
- "sec_num": null
631
- }
632
- ],
633
- "bib_entries": {
634
- "BIBREF0": {
635
- "ref_id": "b0",
636
- "title": "Practical simplification of English newspaper text to assist aphasic readers",
637
- "authors": [
638
- {
639
- "first": "John",
640
- "middle": [],
641
- "last": "Carroll",
642
- "suffix": ""
643
- },
644
- {
645
- "first": "Guido",
646
- "middle": [],
647
- "last": "Minnen",
648
- "suffix": ""
649
- },
650
- {
651
- "first": "Yvonne",
652
- "middle": [],
653
- "last": "Canning",
654
- "suffix": ""
655
- },
656
- {
657
- "first": "Siobhan",
658
- "middle": [],
659
- "last": "Devlin",
660
- "suffix": ""
661
- },
662
- {
663
- "first": "John",
664
- "middle": [],
665
- "last": "Tait",
666
- "suffix": ""
667
- }
668
- ],
669
- "year": 1998,
670
- "venue": "Proceedings of AAAI-98 Workshop on Integrating Artificial Intelligence and Assistive Technology",
671
- "volume": "",
672
- "issue": "",
673
- "pages": "",
674
- "other_ids": {},
675
- "num": null,
676
- "urls": [],
677
- "raw_text": "John Carroll, Guido Minnen, Yvonne Canning, Siobhan Devlin, and John Tait. 1998. Practi- cal simplification of English newspaper text to assist aphasic readers. In Proceedings of AAAI- 98 Workshop on Integrating Artificial Intelligence and Assistive Technology, Madison, Wisconsin, July.",
678
- "links": null
679
- },
680
- "BIBREF1": {
681
- "ref_id": "b1",
682
- "title": "Motivations and methods for text simplification",
683
- "authors": [
684
- {
685
- "first": "R",
686
- "middle": [],
687
- "last": "Chandrasekar",
688
- "suffix": ""
689
- },
690
- {
691
- "first": "C",
692
- "middle": [],
693
- "last": "Doran",
694
- "suffix": ""
695
- },
696
- {
697
- "first": "B",
698
- "middle": [],
699
- "last": "Srinivas",
700
- "suffix": ""
701
- }
702
- ],
703
- "year": 1996,
704
- "venue": "Proceedings of the 16th International Conference on Computational Linguistics (COLING'96)",
705
- "volume": "",
706
- "issue": "",
707
- "pages": "",
708
- "other_ids": {},
709
- "num": null,
710
- "urls": [],
711
- "raw_text": "R. Chandrasekar, C. Doran, and B. Srinivas. 1996. Motivations and methods for text simplification. In Proceedings of the 16th International Confer- ence on Computational Linguistics (COLING'96), Copenhagen, Denmark, August.",
712
- "links": null
713
- },
714
- "BIBREF2": {
715
- "ref_id": "b2",
716
- "title": "Less is more: Eliminating index terms from subordinate clauses",
717
- "authors": [
718
- {
719
- "first": "H",
720
- "middle": [],
721
- "last": "Simon",
722
- "suffix": ""
723
- },
724
- {
725
- "first": "-",
726
- "middle": [],
727
- "last": "Corston",
728
- "suffix": ""
729
- },
730
- {
731
- "first": "William",
732
- "middle": [
733
- "B"
734
- ],
735
- "last": "Oliver",
736
- "suffix": ""
737
- },
738
- {
739
- "first": "",
740
- "middle": [],
741
- "last": "Dolan",
742
- "suffix": ""
743
- }
744
- ],
745
- "year": 1999,
746
- "venue": "Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics(ACL'99)",
747
- "volume": "",
748
- "issue": "",
749
- "pages": "349--356",
750
- "other_ids": {},
751
- "num": null,
752
- "urls": [],
753
- "raw_text": "Simon H. Corston-Oliver and William B. Dolan. 1999. Less is more: Eliminating index terms from subordinate clauses. In Proceedings of the 37th Annual Meeting of the Association for Computa- tional Linguistics(ACL'99), pages 349-356, Uni- versity of Maryland, Maryland, June.",
754
- "links": null
755
- },
756
- "BIBREF3": {
757
- "ref_id": "b3",
758
- "title": "Producing intelligent telegraphic text reduction to provide an audio scanning service for the blind",
759
- "authors": [
760
- {
761
- "first": "Gregory",
762
- "middle": [],
763
- "last": "Grefenstette",
764
- "suffix": ""
765
- }
766
- ],
767
- "year": 1998,
768
- "venue": "Working Notes of AAAI 1998 Spring Symposium on Intelligent Text Summarization",
769
- "volume": "",
770
- "issue": "",
771
- "pages": "",
772
- "other_ids": {},
773
- "num": null,
774
- "urls": [],
775
- "raw_text": "Gregory Grefenstette. 1998. Producing intelligent telegraphic text reduction to provide an audio scanning service for the blind. In Working Notes of AAAI 1998 Spring Symposium on Intelligent Text Summarization, Stanford University, Stand- ford, California, March.",
776
- "links": null
777
- },
778
- "BIBREF4": {
779
- "ref_id": "b4",
780
- "title": "Combining multiple, large-scale resources in a reusable lexicon for natural language generation",
781
- "authors": [
782
- {
783
- "first": "Hongyan",
784
- "middle": [],
785
- "last": "Jing",
786
- "suffix": ""
787
- },
788
- {
789
- "first": "Kathleen",
790
- "middle": [
791
- "R"
792
- ],
793
- "last": "Mckeown",
794
- "suffix": ""
795
- }
796
- ],
797
- "year": 1998,
798
- "venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and the 17th International Conference on Computational Linguistics",
799
- "volume": "1",
800
- "issue": "",
801
- "pages": "607--613",
802
- "other_ids": {},
803
- "num": null,
804
- "urls": [],
805
- "raw_text": "Hongyan Jing and Kathleen R. McKeown. 1998. Combining multiple, large-scale resources in a reusable lexicon for natural language generation. In Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics and the 17th International Conference on Computational Linguistics, volume 1, pages 607-613, Universit~ de Montreal, Quebec, Canada, August.",
806
- "links": null
807
- },
808
- "BIBREF5": {
809
- "ref_id": "b5",
810
- "title": "The decomposition of human-written summary sentences",
811
- "authors": [
812
- {
813
- "first": "Hongyan",
814
- "middle": [],
815
- "last": "Jing",
816
- "suffix": ""
817
- },
818
- {
819
- "first": "Kathleen",
820
- "middle": [
821
- "R"
822
- ],
823
- "last": "Mckeown",
824
- "suffix": ""
825
- }
826
- ],
827
- "year": 1999,
828
- "venue": "Proceedings of the 22nd International ACM SIGIR Conference on Research and Development in Information Retrieval",
829
- "volume": "",
830
- "issue": "",
831
- "pages": "129--136",
832
- "other_ids": {},
833
- "num": null,
834
- "urls": [],
835
- "raw_text": "Hongyan Jing and Kathleen R. McKeown. 1999. The decomposition of human-written summary sentences. In Proceedings of the 22nd Interna- tional ACM SIGIR Conference on Research and Development in Information Retrieval, pages 129- 136, University of Berkeley, CA, August.",
836
- "links": null
837
- },
838
- "BIBREF6": {
839
- "ref_id": "b6",
840
- "title": "Cut and paste based text summarization",
841
- "authors": [
842
- {
843
- "first": "Hongyan",
844
- "middle": [],
845
- "last": "Jing",
846
- "suffix": ""
847
- },
848
- {
849
- "first": "Kathleen",
850
- "middle": [
851
- "R"
852
- ],
853
- "last": "Mckeown",
854
- "suffix": ""
855
- }
856
- ],
857
- "year": 2000,
858
- "venue": "Proceedings of NAACL",
859
- "volume": "",
860
- "issue": "",
861
- "pages": "",
862
- "other_ids": {},
863
- "num": null,
864
- "urls": [],
865
- "raw_text": "Hongyan Jing and Kathleen R. McKeown. 2000. Cut and paste based text summarization. In Pro- ceedings of NAACL 2000.",
866
- "links": null
867
- },
868
- "BIBREF7": {
869
- "ref_id": "b7",
870
- "title": "English Verb Classes and Alternations: A Preliminary Investigation",
871
- "authors": [
872
- {
873
- "first": "Beth",
874
- "middle": [],
875
- "last": "Levin",
876
- "suffix": ""
877
- }
878
- ],
879
- "year": 1993,
880
- "venue": "",
881
- "volume": "",
882
- "issue": "",
883
- "pages": "",
884
- "other_ids": {},
885
- "num": null,
886
- "urls": [],
887
- "raw_text": "Beth Levin. 1993. English Verb Classes and Alter- nations: A Preliminary Investigation. University of Chicago Press, Chicago, Illinois.",
888
- "links": null
889
- },
890
- "BIBREF9": {
891
- "ref_id": "b9",
892
- "title": "English Slot Grammar",
893
- "authors": [
894
- {
895
- "first": "Michael",
896
- "middle": [],
897
- "last": "Mccord",
898
- "suffix": ""
899
- }
900
- ],
901
- "year": 1990,
902
- "venue": "",
903
- "volume": "",
904
- "issue": "",
905
- "pages": "",
906
- "other_ids": {},
907
- "num": null,
908
- "urls": [],
909
- "raw_text": "Michael McCord, 1990. English Slot Grammar. IBM.",
910
- "links": null
911
- },
912
- "BIBREF10": {
913
- "ref_id": "b10",
914
- "title": "Introduction to WordNet: An on-line lexical database",
915
- "authors": [
916
- {
917
- "first": "George",
918
- "middle": [
919
- "A"
920
- ],
921
- "last": "Miller",
922
- "suffix": ""
923
- },
924
- {
925
- "first": "Richard",
926
- "middle": [],
927
- "last": "Beckwith",
928
- "suffix": ""
929
- },
930
- {
931
- "first": "Christiane",
932
- "middle": [],
933
- "last": "Fellbaum",
934
- "suffix": ""
935
- },
936
- {
937
- "first": "Derek",
938
- "middle": [],
939
- "last": "Gross",
940
- "suffix": ""
941
- },
942
- {
943
- "first": "Katherine",
944
- "middle": [
945
- "J"
946
- ],
947
- "last": "Miller",
948
- "suffix": ""
949
- }
950
- ],
951
- "year": 1990,
952
- "venue": "International Journal of Lexicography (special issue)",
953
- "volume": "3",
954
- "issue": "4",
955
- "pages": "235--312",
956
- "other_ids": {},
957
- "num": null,
958
- "urls": [],
959
- "raw_text": "George A. Miller, Richard Beckwith, Christiane Fell- baum, Derek Gross, and Katherine J. Miller. 1990. Introduction to WordNet: An on-line lexi- cal database. International Journal of Lexicogra- phy (special issue), 3(4):235-312.",
960
- "links": null
961
- },
962
- "BIBREF11": {
963
- "ref_id": "b11",
964
- "title": "A semantic concordance",
965
- "authors": [
966
- {
967
- "first": "George",
968
- "middle": [
969
- "A"
970
- ],
971
- "last": "Miller",
972
- "suffix": ""
973
- },
974
- {
975
- "first": "Claudia",
976
- "middle": [],
977
- "last": "Leacock",
978
- "suffix": ""
979
- },
980
- {
981
- "first": "Randee",
982
- "middle": [],
983
- "last": "Tengi",
984
- "suffix": ""
985
- },
986
- {
987
- "first": "Ross",
988
- "middle": [
989
- "T"
990
- ],
991
- "last": "Bunker",
992
- "suffix": ""
993
- }
994
- ],
995
- "year": 1993,
996
- "venue": "",
997
- "volume": "",
998
- "issue": "",
999
- "pages": "",
1000
- "other_ids": {},
1001
- "num": null,
1002
- "urls": [],
1003
- "raw_text": "George A. Miller, Claudia Leacock, Randee Tengi, and Ross T. Bunker. 1993. A semantic concor- dance. Cognitive Science Laboratory, Princeton University.",
1004
- "links": null
1005
- }
1006
- },
1007
- "ref_entries": {
1008
- "FIGREF1": {
1009
- "uris": null,
1010
- "text": "Sample output of sentence reduction program",
1011
- "type_str": "figure",
1012
- "num": null
1013
- },
1014
- "FIGREF2": {
1015
- "uris": null,
1016
- "text": "Reduced form by the program",
1017
- "type_str": "figure",
1018
- "num": null
1019
- },
1020
- "FIGREF3": {
1021
- "uris": null,
1022
- "text": "Sample sentence and parse tree",
1023
- "type_str": "figure",
1024
- "num": null
1025
- }
1026
- }
1027
- }
1028
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1044.json DELETED
@@ -1,952 +0,0 @@
1
- {
2
- "paper_id": "A00-1044",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:32.038440Z"
6
- },
7
- "title": "Named Entity Extraction from Noisy Input: Speech and OCR",
8
- "authors": [
9
- {
10
- "first": "David",
11
- "middle": [],
12
- "last": "Miller",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "[email protected]"
16
- }
17
- ],
18
- "year": "",
19
- "venue": null,
20
- "identifiers": {},
21
- "abstract": "In this paper, we analyze the performance of name finding in the context of a variety of automatic speech recognition (ASR) systems and in the context of one optical character recognition (OCR) system. We explore the effects of word error rate from ASR and OCR, performance as a function of the amount of training data, and for speech, the effect of out-of-vocabulary errors and the loss of punctuation and mixed case I",
22
- "pdf_parse": {
23
- "paper_id": "A00-1044",
24
- "_pdf_hash": "",
25
- "abstract": [
26
- {
27
- "text": "In this paper, we analyze the performance of name finding in the context of a variety of automatic speech recognition (ASR) systems and in the context of one optical character recognition (OCR) system. We explore the effects of word error rate from ASR and OCR, performance as a function of the amount of training data, and for speech, the effect of out-of-vocabulary errors and the loss of punctuation and mixed case I",
28
- "cite_spans": [],
29
- "ref_spans": [],
30
- "eq_spans": [],
31
- "section": "Abstract",
32
- "sec_num": null
33
- }
34
- ],
35
- "body_text": [
36
- {
37
- "text": "Information extraction systems have traditionally been evaluated on online text with relatively few errors in the input. For example, this description of the Nominator system (Wacholder et al. 1997) would apply to several other systems: \"We chose The Wall Street Journal corpus because it follows standard stylistic conventions, especially capitalization, which is essential for Nominator to work.\" The real-world challenge, however, is pointed out in Palmer and Day (1997) : \"It is also unknown how the existing high-scoring systems would perform on less well-behaved texts, such as single-case texts, non-newswire texts, or text obtained via optical character recognition (OCR).\"",
38
- "cite_spans": [
39
- {
40
- "start": 175,
41
- "end": 198,
42
- "text": "(Wacholder et al. 1997)",
43
- "ref_id": null
44
- },
45
- {
46
- "start": 452,
47
- "end": 473,
48
- "text": "Palmer and Day (1997)",
49
- "ref_id": null
50
- }
51
- ],
52
- "ref_spans": [],
53
- "eq_spans": [],
54
- "section": "Introduction",
55
- "sec_num": null
56
- },
57
- {
58
- "text": "In this paper we explore how performance degrades on noisy input, in particular on broadcast news (speech) and on newspaper (printed matter).",
59
- "cite_spans": [],
60
- "ref_spans": [],
61
- "eq_spans": [],
62
- "section": "Introduction",
63
- "sec_num": null
64
- },
65
- {
66
- "text": "Error rates of automatic speech recognizers (ASR) on broadcast news are still very high, e.g., 14-28% word error. Though character error can be very low for laser printer output, word error rates of 20% are possible for OCR systems applied to newsprint or low-quality printed matter.",
67
- "cite_spans": [],
68
- "ref_spans": [],
69
- "eq_spans": [],
70
- "section": "Introduction",
71
- "sec_num": null
72
- },
73
- {
74
- "text": "In this paper, we evaluate a learning algorithm, a hidden Markov model (HM), for named entity extraction applied to human transcripts of news, to transcripts without case or punctuation (perfect speech output), to errorful ASR output and to OCR output. Extracting information from noisy sources poses the following challenges, which are addressed in the paper.",
75
- "cite_spans": [],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "Introduction",
79
- "sec_num": null
80
- },
81
- {
82
- "text": "\u2022 Since speech recognizers do not generate mixed case nor punctuation, how much do case and punctuation contribute to recognizing names in English? (Section 3.) Note that these challenges also arise in languages without case to signal proper nouns (e.g., Chinese, German, Japanese), in mono-case English or informal English (e.g., emails).",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "Introduction",
87
- "sec_num": null
88
- },
89
- {
90
- "text": "\u2022 How much will performance degrade with increasing error in the input? (Section 4.)",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "Introduction",
95
- "sec_num": null
96
- },
97
- {
98
- "text": "\u2022 How does closed vocabulary recognition affect information extraction performance? (Section 5)",
99
- "cite_spans": [],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "Introduction",
103
- "sec_num": null
104
- },
105
- {
106
- "text": "\u2022 For the learning algorithm employed, how much training and effort are required? (Section 6) \u2022 How much do lists of names contribute to performance? (Section 7) 2 Algorithms and Data",
107
- "cite_spans": [],
108
- "ref_spans": [
109
- {
110
- "start": 82,
111
- "end": 93,
112
- "text": "(Section 6)",
113
- "ref_id": null
114
- },
115
- {
116
- "start": 150,
117
- "end": 161,
118
- "text": "(Section 7)",
119
- "ref_id": "TABREF4"
120
- }
121
- ],
122
- "eq_spans": [],
123
- "section": "Introduction",
124
- "sec_num": null
125
- },
126
- {
127
- "text": "The named entity (NE) task used for this evaluation requires the system to identify all named locations, named persons, named organizations, dates, times, monetary amounts, and percentages. The task definition is given in Chinchor, et al, (1998) .",
128
- "cite_spans": [
129
- {
130
- "start": 222,
131
- "end": 245,
132
- "text": "Chinchor, et al, (1998)",
133
- "ref_id": null
134
- }
135
- ],
136
- "ref_spans": [],
137
- "eq_spans": [],
138
- "section": "Task Definition and Data",
139
- "sec_num": "2.1"
140
- },
141
- {
142
- "text": "For speech recognition, roughly 175 hours of news broadcasts (roughly 1.2m words of audio) were available from the National Institute for Science and Technology (NIST) for training. All of that data includes both the audio and a manual transcription. The test set consisted of 3 hours of news (roughly 25k words).",
143
- "cite_spans": [],
144
- "ref_spans": [],
145
- "eq_spans": [],
146
- "section": "Task Definition and Data",
147
- "sec_num": "2.1"
148
- },
149
- {
150
- "text": "For the combined OCR/NE system, the OCR component was trained on the University of Washington English Image Database, which is comprised primarily of technical journal articles. The NE system was trained separately on 690K words of 1993 Wall Street Journal (WSJ) data (roughly 1250 articles), including development data from the Sixth Message Understanding Conference (MUC-6) Named Entity evaluation. The test set was approximately 20K words of separate WSJ data (roughly 45 articles), also taken from the MUC-6 data set. Both test and training texts were original text (no OCR errors) in mixed case with normal punctuation. Printing the on-line text, rather than using the original newsprint, produced the images for OCR, which were all scanned at 600 DPI.",
151
- "cite_spans": [],
152
- "ref_spans": [],
153
- "eq_spans": [],
154
- "section": "Task Definition and Data",
155
- "sec_num": "2.1"
156
- },
157
- {
158
- "text": "The information extraction system tested is IdentiFinder(TM), which has previously been detailed in Bikel et al. (1997 Bikel et al. ( , 1999 . In that system, an HMM labels each word either with one of the desired classes (e.g., person, organization, etc.) or with the label NOT-A-NAME (to represent \"none of the desired classes\"). The states of the HMM fall into regions, one region for each desired class plus one for NOT-A-NAME. (See Figure 2-1 .) The HMM thus has a model of each desired class and of the other text.",
159
- "cite_spans": [
160
- {
161
- "start": 100,
162
- "end": 118,
163
- "text": "Bikel et al. (1997",
164
- "ref_id": "BIBREF1"
165
- },
166
- {
167
- "start": 119,
168
- "end": 140,
169
- "text": "Bikel et al. ( , 1999",
170
- "ref_id": "BIBREF2"
171
- }
172
- ],
173
- "ref_spans": [
174
- {
175
- "start": 437,
176
- "end": 447,
177
- "text": "Figure 2-1",
178
- "ref_id": null
179
- }
180
- ],
181
- "eq_spans": [],
182
- "section": "Algorithms",
183
- "sec_num": "2.2"
184
- },
185
- {
186
- "text": "Note that the implementation is not confined to the seven name classes used in the NE task; the particular classes to be recognized can be easily changed via a parameter.",
187
- "cite_spans": [],
188
- "ref_spans": [],
189
- "eq_spans": [],
190
- "section": "Algorithms",
191
- "sec_num": "2.2"
192
- },
193
- {
194
- "text": "Within each of the regions, we use a statistical bigram language model, and emit exactly one word upon entering each state. Therefore, the number of states in each of the name-class regions is equal to the vocabulary size. Additionally, there are two special states, the START-OF-SENTENCE and END-OF-SENTENCE states. In addition to generating the word, states may also generate features of that word.",
195
- "cite_spans": [],
196
- "ref_spans": [],
197
- "eq_spans": [],
198
- "section": "Algorithms",
199
- "sec_num": "2.2"
200
- },
201
- {
202
- "text": "END-OF SENTENCE",
203
- "cite_spans": [],
204
- "ref_spans": [],
205
- "eq_spans": [],
206
- "section": "START-OF-SENTENCE",
207
- "sec_num": null
208
- },
209
- {
210
- "text": "The output of each of the speech recognizers is in SNOR (speech normalized orthographic representation) format, a format which is largely unpunctuated and in all capital letters (apostrophes and periods after spoken letters are preserved). When a typical NE extraction system runs on ordinary English text, it uses punctuation and capitalization as features that contribute to its decisions. In order to learn how much degradation in performance is caused by the absence of these features from SNOR format, we performed the following experiment.",
211
- "cite_spans": [],
212
- "ref_spans": [],
213
- "eq_spans": [],
214
- "section": "Figure 2-1: Pictorial representation of conceptual model 3 Effect of Textual Clues",
215
- "sec_num": null
216
- },
217
- {
218
- "text": "We took a corpus that had full punctuation and mixed case and preprocessed it to make three new versions: one with all upper case letters but punctuation preserved, one with original case but punctuation marks removed, and one with both case and punctuation removed. We then partitioned all four versions of the corpus into a training set and a held-out test set, using the same partition in all four versions, and measured IdentiFinder's performance.",
219
- "cite_spans": [],
220
- "ref_spans": [],
221
- "eq_spans": [],
222
- "section": "Figure 2-1: Pictorial representation of conceptual model 3 Effect of Textual Clues",
223
- "sec_num": null
224
- },
225
- {
226
- "text": "The corpus we used for this experiment was the transcriptions of the second 100 hours of the Broadcast News acoustic modelling data, comprising 114 episodes. We partitioned this data to form a training set of 98 episodes (640,000 words) and a test set of 16 episodes (130,000 words). Because the test transcriptions were created by humans, they have a 0% word error rate. The results are shown in Table 3 -1. The removal of case information has the greater effect, reducing performance by 2.3 points, while the loss of punctuation reduces performance by 1.4 points. The loss from removing both features is 3.4 points, less than the sum of the individual degradations. This suggests that there are some events where both mixed case and punctuation are required to lead IdentiFinder to the correct answer. It should be noted that because the data are transcriptions of speech, no version of the corpus contains all the textual clues that would appear in newspaper text like the MUC-7 New York Times data. In particular, numbers are written out in words as they would be spoken, not represented using digits, and abbreviations such as \"Dr.\", \"Jr.\" or \"Sept.\" are expanded out to their full spoken word. We conclude that the degradation in performance going from newspaper text to SNOR recognizer output is at least 3.4 points in the 0% WER case, and probably more due to these other missing text clues.",
227
- "cite_spans": [],
228
- "ref_spans": [
229
- {
230
- "start": 397,
231
- "end": 404,
232
- "text": "Table 3",
233
- "ref_id": "TABREF1"
234
- }
235
- ],
236
- "eq_spans": [],
237
- "section": "Figure 2-1: Pictorial representation of conceptual model 3 Effect of Textual Clues",
238
- "sec_num": null
239
- },
240
- {
241
- "text": "The OCR experiments were performed using the system described in Makhoul et al. (1998) . Recognition was performed at the character level, rather than the word level, so the vocabulary is not closed (unlike the ASR results discussed in subsequent sections). 3. OCR on degraded images (13.7% WER).",
242
- "cite_spans": [
243
- {
244
- "start": 65,
245
- "end": 86,
246
- "text": "Makhoul et al. (1998)",
247
- "ref_id": "BIBREF5"
248
- }
249
- ],
250
- "ref_spans": [],
251
- "eq_spans": [],
252
- "section": "Optical Character (OCR) Recognition",
253
- "sec_num": "4.1"
254
- },
255
- {
256
- "text": "4.\" OCR on degraded images, processed with a weak character language model (19.1% WER)",
257
- "cite_spans": [],
258
- "ref_spans": [],
259
- "eq_spans": [],
260
- "section": "Optical Character (OCR) Recognition",
261
- "sec_num": "4.1"
262
- },
263
- {
264
- "text": "For the second and third conditions, 1.3M characters of Wall Street Journal were used for OCR language model training: the fourth condition used a much weaker character language model, which accounts for the poorer performance.",
265
- "cite_spans": [],
266
- "ref_spans": [],
267
- "eq_spans": [],
268
- "section": "Optical Character (OCR) Recognition",
269
- "sec_num": "4.1"
270
- },
271
- {
272
- "text": "The interpolated line has been fit to the performance of the OCR-based systems, with a slope indicating 0.6 points of F-measure lost for each percentage point increase in word error. The line has been extrapolated to 0% WER: the actual 0% WER condition is 95.4, which only slightly exceeds the projected value. Figure 5 -1 shows IdentiFinder's performance on all speech systems in the 1998 Hub-4 evaluations (Przybocki, et al., 1999) . These experiments were run in co-operation with NIST. The interpolated line has been fit to the errorful transcripts, and then extrapolated out to 0% WER speech. As can be seen, the line fits the data extremely well, and has a slope of 0.7 points of F-measure lost for each additional 1% of word error rate. The fact that the extrapolated ' These figures do not reflect the best possible performance of the OCR system: for example, when testing on degraded data, it would be usual to include representative data in training.",
273
- "cite_spans": [
274
- {
275
- "start": 408,
276
- "end": 433,
277
- "text": "(Przybocki, et al., 1999)",
278
- "ref_id": "BIBREF8"
279
- }
280
- ],
281
- "ref_spans": [
282
- {
283
- "start": 311,
284
- "end": 319,
285
- "text": "Figure 5",
286
- "ref_id": null
287
- }
288
- ],
289
- "eq_spans": [],
290
- "section": "Optical Character (OCR) Recognition",
291
- "sec_num": "4.1"
292
- },
293
- {
294
- "text": "This was not a concern for this experiment, however, which focussed on name finding performance. line slightly overestimates the actual performance at 0% WER (given by a A) indicates that the degradation may be sub-linear in the range 0-15% WER. ",
295
- "cite_spans": [],
296
- "ref_spans": [],
297
- "eq_spans": [],
298
- "section": "Optical Character (OCR) Recognition",
299
- "sec_num": "4.1"
300
- },
301
- {
302
- "text": "It is generally agreed that out-of-vocabulary (OOV) words do not have a major impact on the word error rate achieved by large vocabulary speech recognizers doing transcription. The reason is that speech lexicons are designed to include the most frequent words, thus ensuring that OOV words will represent only a small fraction of the words in any test set. However, we have seen that the ,OOV rate for words that are part of named-entities can be as much as a factor of ten greater than the baseline OOV for non-name words. This could make OOV a major problem for NE extraction from speech. Most modem speech recognizers employ a vocabulary of roughly 60,000 words; using a larger lexicon introduces more errors from acoustic perplexity than it fixes through enlarged vocabulary. It is clear from the table that the only name category that might suffer a significant OOV problem with a 60K vocabulary is PERSONs. One might imagine that a more carefully constructed lexicon could reduce the OOV rate for PERSONs while still staying within the 60,000 word limit. However, even if a cleverly designed 60K lexicon succeeded in having the name coverage of the frequency-ordered 120K word lexicon (which contains roughly 40,000 more proper names than the 60K lexicon), it would reduce the PERSON OOV rate by only 4% absolute.",
303
- "cite_spans": [],
304
- "ref_spans": [],
305
- "eq_spans": [],
306
- "section": "Out of Vocabulary Rates for Names",
307
- "sec_num": null
308
- },
309
- {
310
- "text": "Effect of training set size",
311
- "cite_spans": [],
312
- "ref_spans": [],
313
- "eq_spans": [],
314
- "section": "6",
315
- "sec_num": null
316
- },
317
- {
318
- "text": "We have measured NE performance in the context of speech as a function of training set size and found that the performance increases logarithmically with the amount of training data for 15% WER test data as well as for 0% WER input. However the growth rate is slower for 15% WER test data. We constructed small training sets of various size by randomly selecting sets of 6, 12, 25, and 49 episodes from the second 100 hours of annotated Broadcast News training data. We also defined a training set of 98 episodes from the second 100 hours, as well as sets containing the full 98 episodes plus some or all of the first 100 hours of Broadcast News training. Our largest training set contained 1.2 million words, and our smallest a mere 30,000 words. All training data were converted to SNOR format.",
319
- "cite_spans": [],
320
- "ref_spans": [],
321
- "eq_spans": [],
322
- "section": "Automatic Speech Recognition",
323
- "sec_num": "6.1"
324
- },
325
- {
326
- "text": "Given that PERSONs account for roughly 50% of the named-entities in broadcast news, the maximum gain in F measure available for doubling the lexicon size is 2 points. Moreover, this gain would require that every PERSON name added to the vocabulary be recognized properly --an unlikely prospect, since most of these words will not appear in the acoustic training for the recognizer. For these reasons, we conclude that the OOV problem is not a major factor in determining NE performance from speech.",
327
- "cite_spans": [],
328
- "ref_spans": [],
329
- "eq_spans": [],
330
- "section": "Automatic Speech Recognition",
331
- "sec_num": "6.1"
332
- },
333
- {
334
- "text": "For each training set, we trained a separate IdentiFinder model and evaluated it on two versions of the 1998 Hub4-IE data --the 0% WER transcription created by a human, and an ASR transcript with 15%. The results are plotted in Figure 6 -1. The slopes of the interpolated lines predict that IdentiFinder's performance on 15% WER speech will increase by 1.5 points for each additional doubling of the training data, while performance goes up 1.8 points per doubling of the training for perfect speech input. Possibly, the difference in slope of the two lines is that the real value of increasing the training set lies in increasing the number of distinct rare names that appear. Once an example is in the training, IdentiFinder is able to extract it and use it in test. However, when the test data is recognizer output, the rare names are less likely to appear in the test, either because they don't appear in the speech lexicon or they are poorly trained in the speech model and misrecognized. If they don't appear in the test, IdentiFinder can't make full use of the additional training, and thus performance on errorful input increases more slowly than it does on error-free input text.",
335
- "cite_spans": [],
336
- "ref_spans": [
337
- {
338
- "start": 228,
339
- "end": 236,
340
- "text": "Figure 6",
341
- "ref_id": null
342
- }
343
- ],
344
- "eq_spans": [],
345
- "section": "Automatic Speech Recognition",
346
- "sec_num": "6.1"
347
- },
348
- {
349
- "text": "A similar relationship between training size and performance is seen for the OCR test condition.",
350
- "cite_spans": [],
351
- "ref_spans": [],
352
- "eq_spans": [],
353
- "section": "Optical Character Recognition",
354
- "sec_num": "6.2"
355
- },
356
- {
357
- "text": "The training was partitioned by documents into equal sized sets: While this graph of this data in Figure 6 -2 shows a logarithmic improvement, as with the ASR experiments, the rate of improvement is substantially less, roughly 0.9 increase in Fmeasure for doubling the training data. This may be explained by the difference in difficulty between the two tests: even with only 77.5k words of training, the 0% WER performance exceeds the ASR system trained on 1.2M words.",
358
- "cite_spans": [],
359
- "ref_spans": [
360
- {
361
- "start": 98,
362
- "end": 106,
363
- "text": "Figure 6",
364
- "ref_id": null
365
- }
366
- ],
367
- "eq_spans": [],
368
- "section": "function of training data for speech.",
369
- "sec_num": null
370
- },
371
- {
372
- "text": "full point, while on recognizer produced output, performance goes u~p by only 0.3 points. / 0% WER 15% WER Without lists 89.5 81.9 With lists 90.5 82.2 ",
373
- "cite_spans": [],
374
- "ref_spans": [],
375
- "eq_spans": [],
376
- "section": "function of training data for speech.",
377
- "sec_num": null
378
- },
379
- {
380
- "text": "Like most NE extraction systems, IdentiFinder can use lists of strings of known to be names to estimate the probability that a word will be a name, given that it appears on a particular list. We trained two models on 1.2 million words of SNOR data, one with lists and one without. We tested on the human transcription (0% WER) and the ASR (15% WER) versions of the 1998 evaluation transcripts. Table 7 -1 shows the results. We see that on human constructed transcripts, lists improve the performance by a To our knowledge, no other information extraction technology has been applied to OCR material.",
381
- "cite_spans": [],
382
- "ref_spans": [
383
- {
384
- "start": 394,
385
- "end": 401,
386
- "text": "Table 7",
387
- "ref_id": "TABREF4"
388
- }
389
- ],
390
- "eq_spans": [],
391
- "section": "Effect of Lists",
392
- "sec_num": null
393
- },
394
- {
395
- "text": "For audio materials, three related efforts were benchmarked on NE extraction from broadcast news. Palmer, et al. (1999) employs an HMM very similar to that reported for IdentifFinder (Bikel et al., 1997 (Bikel et al., ,1999 . Renals et al. (1999) reports on a rule-based system and an HMM integrated with a speech recognizer. Appelt and Martin (1999) report on the TEXTPRO system, which recognises names using manually written finite state sales.",
396
- "cite_spans": [
397
- {
398
- "start": 98,
399
- "end": 119,
400
- "text": "Palmer, et al. (1999)",
401
- "ref_id": "BIBREF7"
402
- },
403
- {
404
- "start": 183,
405
- "end": 202,
406
- "text": "(Bikel et al., 1997",
407
- "ref_id": "BIBREF1"
408
- },
409
- {
410
- "start": 203,
411
- "end": 223,
412
- "text": "(Bikel et al., ,1999",
413
- "ref_id": "BIBREF2"
414
- },
415
- {
416
- "start": 226,
417
- "end": 246,
418
- "text": "Renals et al. (1999)",
419
- "ref_id": "BIBREF9"
420
- },
421
- {
422
- "start": 326,
423
- "end": 350,
424
- "text": "Appelt and Martin (1999)",
425
- "ref_id": "BIBREF0"
426
- }
427
- ],
428
- "ref_spans": [],
429
- "eq_spans": [],
430
- "section": "Effect of Lists",
431
- "sec_num": null
432
- },
433
- {
434
- "text": "Of these, the Palmer system and TEXTPRO report results on five different word error rates. Both degrade linearly, about .7F, with each 1% increase in WER from ASR. None report the effect of training set size, capitalization, punctuation, or out-of-vocabulary items.",
435
- "cite_spans": [],
436
- "ref_spans": [],
437
- "eq_spans": [],
438
- "section": "Effect of Lists",
439
- "sec_num": null
440
- },
441
- {
442
- "text": "Of the four systems, IdentiFinder represents state-of-the-art performance. Of all the systems evaluated, those with the simple architecture of ASR followed by information extraction performed markedly better than the system where extraction was more integrated with ASR.",
443
- "cite_spans": [],
444
- "ref_spans": [],
445
- "eq_spans": [],
446
- "section": "Effect of Lists",
447
- "sec_num": null
448
- },
449
- {
450
- "text": "In general, these results compare favorably with results reported in the Message Understanding Conference (Chinchor, et al., 1998) . The highest NE score in MUC-7 was 93.39; for 0% WER, our best score was 90.5 without case and punctuation which costs about 3.4 points.",
451
- "cite_spans": [
452
- {
453
- "start": 106,
454
- "end": 130,
455
- "text": "(Chinchor, et al., 1998)",
456
- "ref_id": null
457
- }
458
- ],
459
- "ref_spans": [],
460
- "eq_spans": [],
461
- "section": "Effect of Lists",
462
- "sec_num": null
463
- },
464
- {
465
- "text": "First and foremost, the hidden Markov model is quite robust in the face of errorful input. Performance on both speech and OCR input degrades linearly as a function of word error. Even, without case information or punctuation in the input, the performance on the broadcast news task is above 90%, with only a 3.4 point degradation in performance due to missing textual clues. Performance even with 15% word error degrades by only about 8 points of F for both OCR and ASR systems.",
466
- "cite_spans": [],
467
- "ref_spans": [],
468
- "eq_spans": [],
469
- "section": "Conclusions",
470
- "sec_num": "9"
471
- },
472
- {
473
- "text": "Second, because annotation can be performed quickly and inexpensively by non-experts, training-based systems like IdentiFinder offer a powerful advantage in moving to new languages and new domains. In our experience, annotation of English typically proceeds at 5k words per hour or more.",
474
- "cite_spans": [],
475
- "ref_spans": [],
476
- "eq_spans": [],
477
- "section": "Conclusions",
478
- "sec_num": "9"
479
- },
480
- {
481
- "text": "This means interesting performance can be achieved with as little as 20 hours of student annotation (i.e., at least 100k words).",
482
- "cite_spans": [],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "Conclusions",
486
- "sec_num": "9"
487
- },
488
- {
489
- "text": "Increasing training continually improves performance, generally as the logarithm of the training set size. On transcribed speech, performance is already good (89.3 on 0% WER) with only 100 hours or 643K words of training data.",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "Conclusions",
494
- "sec_num": "9"
495
- },
496
- {
497
- "text": "Third, though errors due to words out of the vocabulary of the speech recognizer are a problem, they represent only about 15% of the errors made by the combined speech recognition and named entity system.",
498
- "cite_spans": [],
499
- "ref_spans": [],
500
- "eq_spans": [],
501
- "section": "Conclusions",
502
- "sec_num": "9"
503
- },
504
- {
505
- "text": "Fourth, we used exactly the same training data, modeling, and search algorithm for errorful input as we do for error-free input. For OCR, we trained on correct newswire once only for both correct text input 0% (WER) and for a variety of errorful text input conditions. For speech, we simply transformed text training data into SNOR format and retrained. Using this approach, the only cost of handling errorful input from OCR or ASR was a small amount of computing time. There were no rules to rewrite, no lists to change, and no vocabulary adjustments.",
506
- "cite_spans": [],
507
- "ref_spans": [],
508
- "eq_spans": [],
509
- "section": "Conclusions",
510
- "sec_num": "9"
511
- },
512
- {
513
- "text": "Even so, the degradation in performance on errorful input is no worse than the word error rate of the OCR/ASR system.",
514
- "cite_spans": [],
515
- "ref_spans": [],
516
- "eq_spans": [],
517
- "section": "Conclusions",
518
- "sec_num": "9"
519
- }
520
- ],
521
- "back_matter": [
522
- {
523
- "text": "The work reported here was supported in part by the Defense Advanced Research Projects Agency. Technical agent for part of this work was NRaD under contract number N66001-97-D-8501. The views and conclusions contained in this document are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of the Defense Advanced Research Projects Agency or the United States Government.",
524
- "cite_spans": [],
525
- "ref_spans": [],
526
- "eq_spans": [],
527
- "section": "Acknowledgments",
528
- "sec_num": null
529
- }
530
- ],
531
- "bib_entries": {
532
- "BIBREF0": {
533
- "ref_id": "b0",
534
- "title": "Named Entity Extraction from Speech: Approach and Results Using the Text.Pro System",
535
- "authors": [
536
- {
537
- "first": "D",
538
- "middle": [
539
- "E"
540
- ],
541
- "last": "Appelt",
542
- "suffix": ""
543
- },
544
- {
545
- "first": "D",
546
- "middle": [],
547
- "last": "Martin",
548
- "suffix": ""
549
- }
550
- ],
551
- "year": 1999,
552
- "venue": "Proceedings Of The DARPA Broadcast News Workshop",
553
- "volume": "",
554
- "issue": "",
555
- "pages": "51--54",
556
- "other_ids": {},
557
- "num": null,
558
- "urls": [],
559
- "raw_text": "D. E. Appelt, D. Martin, \"Named Entity Extraction from Speech: Approach and Results Using the Text.Pro System,\" Proceedings Of The DARPA Broadcast News Workshop, February 28-March 3, Morgan Kaufmann Publishers, pp 51-54 (1999).",
560
- "links": null
561
- },
562
- "BIBREF1": {
563
- "ref_id": "b1",
564
- "title": "Nymble: a High-Performance Learning Namefinder",
565
- "authors": [
566
- {
567
- "first": "D",
568
- "middle": [],
569
- "last": "Bikel",
570
- "suffix": ""
571
- },
572
- {
573
- "first": "S",
574
- "middle": [],
575
- "last": "Miller",
576
- "suffix": ""
577
- },
578
- {
579
- "first": "R",
580
- "middle": [],
581
- "last": "Schwartz",
582
- "suffix": ""
583
- },
584
- {
585
- "first": "R",
586
- "middle": [],
587
- "last": "Weischedel",
588
- "suffix": ""
589
- }
590
- ],
591
- "year": 1997,
592
- "venue": "Fifth Conference on Applied Natural Language Processing",
593
- "volume": "",
594
- "issue": "",
595
- "pages": "194--201",
596
- "other_ids": {},
597
- "num": null,
598
- "urls": [],
599
- "raw_text": "D. Bikel, S. Miller, R. Schwartz, and R. Weischedel, 'Nymble: a High-Performance Learning Name- finder\". In Fifth Conference on Applied Natural Language Processing, (published by ACL) pp 194- 201 (1997).",
600
- "links": null
601
- },
602
- "BIBREF2": {
603
- "ref_id": "b2",
604
- "title": "An Algorithm that Learns What's in a Name",
605
- "authors": [
606
- {
607
- "first": "D",
608
- "middle": [],
609
- "last": "Bikel",
610
- "suffix": ""
611
- },
612
- {
613
- "first": "R",
614
- "middle": [],
615
- "last": "Schwartz",
616
- "suffix": ""
617
- },
618
- {
619
- "first": "R",
620
- "middle": [],
621
- "last": "Weischedel",
622
- "suffix": ""
623
- }
624
- ],
625
- "year": 1999,
626
- "venue": "Machine Learning",
627
- "volume": "34",
628
- "issue": "",
629
- "pages": "211--231",
630
- "other_ids": {},
631
- "num": null,
632
- "urls": [],
633
- "raw_text": "D. Bikel, R. Schwartz, and R. Weischedel, \"An Algorithm that Learns What's in a Name,\" Machine Learning 34, pp 211-231, (1999).",
634
- "links": null
635
- },
636
- "BIBREF3": {
637
- "ref_id": "b3",
638
- "title": "MUC-7 Named Entity Task Definition Version 3.5\". Available by ftp from ftp.muc.saic.com/pub/MUC/MUC7-guidelines",
639
- "authors": [
640
- {
641
- "first": "N",
642
- "middle": [],
643
- "last": "Chinchor",
644
- "suffix": ""
645
- }
646
- ],
647
- "year": 1997,
648
- "venue": "",
649
- "volume": "",
650
- "issue": "",
651
- "pages": "",
652
- "other_ids": {},
653
- "num": null,
654
- "urls": [],
655
- "raw_text": "N. Chinchor, \"MUC-7 Named Entity Task Definition Version 3.5\". Available by ftp from ftp.muc.saic.com/pub/MUC/MUC7-guidelines. (1997).",
656
- "links": null
657
- },
658
- "BIBREF4": {
659
- "ref_id": "b4",
660
- "title": "HUB-4 Named Entity Task Definition Version 4.8\". Available by ftp from www.nist.gov/speech/hub4_98",
661
- "authors": [
662
- {
663
- "first": "N",
664
- "middle": [],
665
- "last": "Chincor",
666
- "suffix": ""
667
- },
668
- {
669
- "first": "P",
670
- "middle": [],
671
- "last": "Robinson",
672
- "suffix": ""
673
- },
674
- {
675
- "first": "E",
676
- "middle": [],
677
- "last": "Brown",
678
- "suffix": ""
679
- }
680
- ],
681
- "year": 1998,
682
- "venue": "",
683
- "volume": "",
684
- "issue": "",
685
- "pages": "",
686
- "other_ids": {},
687
- "num": null,
688
- "urls": [],
689
- "raw_text": "N. Chincor, P. Robinson, E. Brown, \"HUB-4 Named Entity Task Definition Version 4.8\". Available by ftp from www.nist.gov/speech/hub4_98. (1998).",
690
- "links": null
691
- },
692
- "BIBREF5": {
693
- "ref_id": "b5",
694
- "title": "A Script-Independent Methodology for Optical Character Recognition",
695
- "authors": [
696
- {
697
- "first": "J",
698
- "middle": [],
699
- "last": "Makhoul",
700
- "suffix": ""
701
- },
702
- {
703
- "first": "R",
704
- "middle": [],
705
- "last": "Schwartz",
706
- "suffix": ""
707
- },
708
- {
709
- "first": "C",
710
- "middle": [],
711
- "last": "Lapre",
712
- "suffix": ""
713
- },
714
- {
715
- "first": "I",
716
- "middle": [],
717
- "last": "Bazzi",
718
- "suffix": ""
719
- }
720
- ],
721
- "year": 1998,
722
- "venue": "Pattern Recognition",
723
- "volume": "",
724
- "issue": "",
725
- "pages": "1285--1294",
726
- "other_ids": {},
727
- "num": null,
728
- "urls": [],
729
- "raw_text": "J. Makhoul, R. Schwartz, C. Lapre, and I. Bazzi, \"A Script-Independent Methodology for Optical Character Recognition,\", Pattern Recognition, pp 1285-1294 (1998).",
730
- "links": null
731
- },
732
- "BIBREF6": {
733
- "ref_id": "b6",
734
- "title": "Advances in the BBN BYBLOS OCR System",
735
- "authors": [
736
- {
737
- "first": "Z",
738
- "middle": [],
739
- "last": "Lu",
740
- "suffix": ""
741
- },
742
- {
743
- "first": "R",
744
- "middle": [],
745
- "last": "Schwartz",
746
- "suffix": ""
747
- },
748
- {
749
- "first": "P",
750
- "middle": [],
751
- "last": "Natarajan",
752
- "suffix": ""
753
- },
754
- {
755
- "first": "I",
756
- "middle": [],
757
- "last": "Bazzi",
758
- "suffix": ""
759
- },
760
- {
761
- "first": "J",
762
- "middle": [],
763
- "last": "Makhoul",
764
- "suffix": ""
765
- }
766
- ],
767
- "year": 1999,
768
- "venue": "Proceedings of the International Conference on Document Analysis and Recognition",
769
- "volume": "",
770
- "issue": "",
771
- "pages": "",
772
- "other_ids": {},
773
- "num": null,
774
- "urls": [],
775
- "raw_text": "Z. Lu, R. Schwartz, P. Natarajan, I. Bazzi, J. Makhoul, \"Advances in the BBN BYBLOS OCR System,\" Proceedings of the International Conference on Document Analysis and Recognition, (1999).",
776
- "links": null
777
- },
778
- "BIBREF7": {
779
- "ref_id": "b7",
780
- "title": "Information Extraction from Broadcast News Speech Data",
781
- "authors": [
782
- {
783
- "first": "D",
784
- "middle": [
785
- "D"
786
- ],
787
- "last": "Palmer",
788
- "suffix": ""
789
- },
790
- {
791
- "first": "J",
792
- "middle": [
793
- "D"
794
- ],
795
- "last": "Burger",
796
- "suffix": ""
797
- },
798
- {
799
- "first": "M",
800
- "middle": [],
801
- "last": "Ostendorf",
802
- "suffix": ""
803
- }
804
- ],
805
- "year": 1999,
806
- "venue": "Proceedings Of The DARPA Broadcast News Workshop",
807
- "volume": "",
808
- "issue": "",
809
- "pages": "41--46",
810
- "other_ids": {},
811
- "num": null,
812
- "urls": [],
813
- "raw_text": "D. D. Palmer, J. D. Burger, M. Ostendorf, \"Information Extraction from Broadcast News Speech Data,\" Proceedings Of The DARPA Broadcast News Workshop, February 28-March 3, Morgan Kaufmann Publishers, pp 41-46 (1999).",
814
- "links": null
815
- },
816
- "BIBREF8": {
817
- "ref_id": "b8",
818
- "title": "Hub-4 Information Extraction Evaluation",
819
- "authors": [
820
- {
821
- "first": "M",
822
- "middle": [
823
- "A"
824
- ],
825
- "last": "Przybocki",
826
- "suffix": ""
827
- },
828
- {
829
- "first": "J",
830
- "middle": [
831
- "G"
832
- ],
833
- "last": "Fiscus",
834
- "suffix": ""
835
- },
836
- {
837
- "first": "J",
838
- "middle": [
839
- "S"
840
- ],
841
- "last": "Garofolo",
842
- "suffix": ""
843
- },
844
- {
845
- "first": "D",
846
- "middle": [
847
- "S"
848
- ],
849
- "last": "Pallett",
850
- "suffix": ""
851
- }
852
- ],
853
- "year": 1998,
854
- "venue": "Proceedings Of The DARPA Broadcast News Workshop",
855
- "volume": "",
856
- "issue": "",
857
- "pages": "13--18",
858
- "other_ids": {},
859
- "num": null,
860
- "urls": [],
861
- "raw_text": "M. A. Przybocki, J. G. Fiscus, J. S. Garofolo, D. S. Pallett, \"1998 Hub-4 Information Extraction Evaluation,\" Proceedings Of The DARPA Broadcast News Workshop, February 28-March 3, Morgan Kaufmann Publishers, pp 13-18 (1999).",
862
- "links": null
863
- },
864
- "BIBREF9": {
865
- "ref_id": "b9",
866
- "title": "Baseline IE-NE Experiments Using the SPRACH/LASIE System",
867
- "authors": [
868
- {
869
- "first": "S",
870
- "middle": [],
871
- "last": "Renals",
872
- "suffix": ""
873
- },
874
- {
875
- "first": "Y",
876
- "middle": [],
877
- "last": "Gotoh",
878
- "suffix": ""
879
- },
880
- {
881
- "first": "R",
882
- "middle": [],
883
- "last": "Gaizauskas",
884
- "suffix": ""
885
- },
886
- {
887
- "first": "M",
888
- "middle": [],
889
- "last": "Stevenson",
890
- "suffix": ""
891
- }
892
- ],
893
- "year": 1999,
894
- "venue": "Proceedings Of The DARPA Broadcast News Workshop",
895
- "volume": "",
896
- "issue": "",
897
- "pages": "47--50",
898
- "other_ids": {},
899
- "num": null,
900
- "urls": [],
901
- "raw_text": "S. Renals, Y. Gotoh, R. Gaizauskas, M. Stevenson, \"Baseline IE-NE Experiments Using the SPRACH/LASIE System,\" Proceedings Of The DARPA Broadcast News Workshop, February 28- March 3, Morgan Kaufmann Publishers, pp 47-50 (1999).",
902
- "links": null
903
- }
904
- },
905
- "ref_entries": {
906
- "FIGREF0": {
907
- "type_str": "figure",
908
- "uris": null,
909
- "num": null,
910
- "text": "Figure 4-1 shows IdentiFinder's performance under 4 conditions of varying word error rate (WER):1. Original text (no OCR, 0% WER) 2. OCR from high-quality (laser-printed) text images (2.7% WER)"
911
- },
912
- "FIGREF1": {
913
- "type_str": "figure",
914
- "uris": null,
915
- "num": null,
916
- "text": "Figure 4-1: IdentiFinder Named Entity performance as a function of OCR word error rate 4.2 Automatic Speech Recognition (ASR)"
917
- },
918
- "FIGREF2": {
919
- "type_str": "figure",
920
- "uris": null,
921
- "num": null,
922
- "text": "Figure 4-2: IdentiFinder named-entity performance as a function of word error rate (in cooperation with NIST) 5 Out of Vocabulary Rates for Names"
923
- },
924
- "FIGREF3": {
925
- "type_str": "figure",
926
- "uris": null,
927
- "num": null,
928
- "text": "Figure 6-1: Performance as a"
929
- },
930
- "FIGREF4": {
931
- "type_str": "figure",
932
- "uris": null,
933
- "num": null,
934
- "text": "Performance as a function of training data for OCR."
935
- },
936
- "TABREF1": {
937
- "html": null,
938
- "type_str": "table",
939
- "content": "<table/>",
940
- "num": null,
941
- "text": ""
942
- },
943
- "TABREF4": {
944
- "html": null,
945
- "type_str": "table",
946
- "content": "<table/>",
947
- "num": null,
948
- "text": ""
949
- }
950
- }
951
- }
952
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1045.json DELETED
@@ -1,963 +0,0 @@
1
- {
2
- "paper_id": "A00-1045",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:13.220809Z"
6
- },
7
- "title": "Improving Testsuites via Instrumentation",
8
- "authors": [
9
- {
10
- "first": "Norbert",
11
- "middle": [
12
- "Brsker"
13
- ],
14
- "last": "Eschenweg",
15
- "suffix": "",
16
- "affiliation": {},
17
- "email": ""
18
- }
19
- ],
20
- "year": "",
21
- "venue": null,
22
- "identifiers": {},
23
- "abstract": "This paper explores the usefulness of a technique from software engineering, namely code instrumentation, for the development of large-scale natural language grammars. Information about the usage of grammar rules in test sentences is used to detect untested rules, redundant test sentences, and likely causes of overgeneration. Results show that less than half of a large-coverage grammar for German is actually tested by two large testsuites, and that i0-30% of testing time is redundant. The methodology applied can be seen as a re-use of grammar writing knowledge for testsuite compilation.",
24
- "pdf_parse": {
25
- "paper_id": "A00-1045",
26
- "_pdf_hash": "",
27
- "abstract": [
28
- {
29
- "text": "This paper explores the usefulness of a technique from software engineering, namely code instrumentation, for the development of large-scale natural language grammars. Information about the usage of grammar rules in test sentences is used to detect untested rules, redundant test sentences, and likely causes of overgeneration. Results show that less than half of a large-coverage grammar for German is actually tested by two large testsuites, and that i0-30% of testing time is redundant. The methodology applied can be seen as a re-use of grammar writing knowledge for testsuite compilation.",
30
- "cite_spans": [],
31
- "ref_spans": [],
32
- "eq_spans": [],
33
- "section": "Abstract",
34
- "sec_num": null
35
- }
36
- ],
37
- "body_text": [
38
- {
39
- "text": "Computational Linguistics (CL) has moved towards the marketplace: One finds programs employing CLtechniques in every software shop: Speech Recognition, Grammar and Style Checking, and even Machine Translation are available as products. While this demonstrates the applicability of the research done, it also calls for a rigorous development methodology of such CL application products.",
40
- "cite_spans": [],
41
- "ref_spans": [],
42
- "eq_spans": [],
43
- "section": "Introduction",
44
- "sec_num": "1"
45
- },
46
- {
47
- "text": "In this paper,lI describe the adaptation of a technique from Software Engineering, namely code instrumentation, to grammar development. Instrumentation is based on the simple idea of marking any piece of code used in processing, and evaluating this usage information afterwards. The application I present here is the evaluation and improvement of grammar and testsuites; other applications are possible.",
48
- "cite_spans": [],
49
- "ref_spans": [],
50
- "eq_spans": [],
51
- "section": "Introduction",
52
- "sec_num": "1"
53
- },
54
- {
55
- "text": "Both software and grammar development are similar processes: They result in a system transforming some input into some output, based on a functional specification (e.g., cf. (Ciravegna et al., 1998) for the application of a particular software design methodology to linguistic engineering). Although Grammar",
56
- "cite_spans": [
57
- {
58
- "start": 174,
59
- "end": 198,
60
- "text": "(Ciravegna et al., 1998)",
61
- "ref_id": "BIBREF2"
62
- }
63
- ],
64
- "ref_spans": [],
65
- "eq_spans": [],
66
- "section": "Software Engineering vs. Grammar Engineering",
67
- "sec_num": "1.1"
68
- },
69
- {
70
- "text": "Engineering usually is not based on concrete specifications, research from linguistics provides an informal specification. Software Engineering developed many methods to assess the quality of a program, ranging from static analysis of the program code to dynamic testing of the program's behavior. Here, we adapt dynamic testing, which means running the implemented program against a set of test cases. The test cases are designed to maximize the probability of detecting errors in the program, i.e., incorrect conditions, incompatible assumptions on subsequent branches, etc. (for overviews, cf. (Hetzel, 1988; Liggesmeyer, 1990) ).",
71
- "cite_spans": [
72
- {
73
- "start": 597,
74
- "end": 611,
75
- "text": "(Hetzel, 1988;",
76
- "ref_id": "BIBREF6"
77
- },
78
- {
79
- "start": 612,
80
- "end": 630,
81
- "text": "Liggesmeyer, 1990)",
82
- "ref_id": "BIBREF9"
83
- }
84
- ],
85
- "ref_spans": [],
86
- "eq_spans": [],
87
- "section": "Software Engineering vs. Grammar Engineering",
88
- "sec_num": "1.1"
89
- },
90
- {
91
- "text": "Engineering How can we fruitfully apply the idea of measuring the coverage of a set of test cases to grammar development? I argue that by exploring the relation between grammar and testsuite, one can improve both of them. Even the traditional usage of testsuites to indicate grammar gaps or overgeneration can profit from a precise indication of the grammar rules used to parse the sentences (cf. Sec.4). Conversely, one may use the grammar to improve the testsuite, both in terms of its coverage (cf. Sec.3.1) and its economy (cf. Sec.3.2).",
92
- "cite_spans": [],
93
- "ref_spans": [],
94
- "eq_spans": [],
95
- "section": "Instrumentation in Grammar",
96
- "sec_num": "1.2"
97
- },
98
- {
99
- "text": "Viewed this way, testsuite writing can benefit from grammar development because both describe the syntactic constructions of a natural language. Testsuites systematically list these constructions, while grammars give generative procedures to construct them. Since there are currently many more grammars than testsuites, we may re-use the work that has gone into the grammars for the improvement of testsuites.",
100
- "cite_spans": [],
101
- "ref_spans": [],
102
- "eq_spans": [],
103
- "section": "Instrumentation in Grammar",
104
- "sec_num": "1.2"
105
- },
106
- {
107
- "text": "The work reported here is situated in a large cooperative project aiming at the development of largecoverage grammars for three languages. The grammars have been developed over years by different people, which makes the existence of tools for navigation, testing, and documentation mandatory. Although the sample rules given below are in the format of LFG, nothing of the methodology relies on VP~V $=T; NP?$= (I\" OBJ); PP* {$= (T OBL); 156 ($ ADJUNCT);}. ",
108
- "cite_spans": [],
109
- "ref_spans": [],
110
- "eq_spans": [],
111
- "section": "Instrumentation in Grammar",
112
- "sec_num": "1.2"
113
- },
114
- {
115
- "text": "Measures from Software Engineering cannot be simply transferred to Grammar Engineering, because the structure of programs is different from that of unification grammars. Nevertheless, the structure of a grammar allows the derivation of suitable measures, similar to the structure of programs; this is discussed in Sec.2.1. The actual instrumentation of the grammar depends on the formalism used, and is discussed in Sec.2.2.",
116
- "cite_spans": [],
117
- "ref_spans": [],
118
- "eq_spans": [],
119
- "section": "Grammar Instrumentation",
120
- "sec_num": "2"
121
- },
122
- {
123
- "text": "Consider the LFG grammar rule in Fig. 1 . 2 On first view, one could require of a testsuite that each such rule is exercised at least once. ~rther thought will indicate that there are hidden alternatives, namely the optionality of the NP and the PP. The rule can only be said to be thoroughly tested if test cases exist which test both presence and absence of optional constituents (requiring 4 test cases for this rule). In addition to context-free rules, unification grammars contain equations of various sorts, as illustrated in Fig.1 . Since these annotations may also contain disjunctions, a testsuite with complete rule coverage is not guaranteed to exercise all equation alternatives. The phrase-structure-based criterion defined above must be refined to cover all equation alternatives in the rule (requiring two test cases for the PP annotation). Even if we assume that (as, e.g., in LFG) there is at least one equation associated with each constituent, equation coverage does not subsume rule coverage: Optional constituents introduce a rule disjunct (without the constituent) that is not characterizable by an equation. A measure might thus be defined as follows: disjunct coverage The disjunct coverage of a testsuite is the quotient number of disjuncts tested Tdis = number of disjuncts in grammar 2Notation: ?/*/+ represent optionality/iteration including/excluding zero occurrences on categories. Annotations to a category specify equality (=) or set membership (6) of feature values, or non-existence of features (-1); they are terminated by a semicolon ( ; ). Disjunctions are given in braces ({... I-.. }). $ ($) are metavariables representing the feature structure corresponding to the mother (daughter) of the rule. Comments are enclosed in quotation marks (\"... \"). Cf. (Kaplan and Bresnan, 1982) for an introduction to LFG notation.",
124
- "cite_spans": [
125
- {
126
- "start": 1791,
127
- "end": 1817,
128
- "text": "(Kaplan and Bresnan, 1982)",
129
- "ref_id": "BIBREF7"
130
- }
131
- ],
132
- "ref_spans": [
133
- {
134
- "start": 33,
135
- "end": 39,
136
- "text": "Fig. 1",
137
- "ref_id": "FIGREF0"
138
- },
139
- {
140
- "start": 532,
141
- "end": 537,
142
- "text": "Fig.1",
143
- "ref_id": "FIGREF0"
144
- }
145
- ],
146
- "eq_spans": [],
147
- "section": "Coverage Criteria",
148
- "sec_num": "2.1"
149
- },
150
- {
151
- "text": "where a disjunct is either a phrase-structure alternative, or an annotation alternative. Optional constituents (and equations, if the formalism allows them) have to be treated as a disjunction of the constituent and an empty category (cf. the instrumented rule in Fig.2 for an example).",
152
- "cite_spans": [],
153
- "ref_spans": [
154
- {
155
- "start": 264,
156
- "end": 269,
157
- "text": "Fig.2",
158
- "ref_id": null
159
- }
160
- ],
161
- "eq_spans": [],
162
- "section": "Coverage Criteria",
163
- "sec_num": "2.1"
164
- },
165
- {
166
- "text": "Instead of considering disjuncts in isolation, one might take their interaction into account. The most complete test criterion, doing this to the fullest extent possible, can be defined as follows:",
167
- "cite_spans": [],
168
- "ref_spans": [],
169
- "eq_spans": [],
170
- "section": "Coverage Criteria",
171
- "sec_num": "2.1"
172
- },
173
- {
174
- "text": "interaction coverage The interaction coverage of a testsuite is the quotient number of disjunct combinations tested Tinter = number of legal disjunct combinations There are methodological problems in this criterion, however. First, the set of legal combinations may not be easily definable, due to far-reaching dependencies between disjuncts in different rules, and second, recursion leads to infinitely many legal disjunct combinations as soon as we take the number of usages of a disjunct into account. Requiring complete interaction coverage is infeasible in practice, similar to the path coverage criterion in Software Engineering.",
175
- "cite_spans": [],
176
- "ref_spans": [],
177
- "eq_spans": [],
178
- "section": "Coverage Criteria",
179
- "sec_num": "2.1"
180
- },
181
- {
182
- "text": "We will say that an analysis (and the sentence receiving this analysis) relies on a grammar disjunct if this disjunct was used in constructing the analysis.",
183
- "cite_spans": [],
184
- "ref_spans": [],
185
- "eq_spans": [],
186
- "section": "Coverage Criteria",
187
- "sec_num": "2.1"
188
- },
189
- {
190
- "text": "Basically, grammar instrumentation is identical to program instrumentation: For each disjunct in a given source grammar, we add grammar code that will identify this disjunct in the solution produced, iff that disjunct has been used in constructing the solution.",
191
- "cite_spans": [],
192
- "ref_spans": [],
193
- "eq_spans": [],
194
- "section": "Instrumentation",
195
- "sec_num": "2.2"
196
- },
197
- {
198
- "text": "Assuming a unique numbering of disjuncts, an annotation of the form DISJUNCT-nn = + can be used for marking. To determine whether a certain disjunct was used in constructing a solution, one only needs to check whether the associated feature occurs (at some level of embedding) in the solution. Alternatively, if set-valued features are available, one can use a set-valued feature DISJUNCTS to collect atomic symbols representing one disjunct each:",
199
- "cite_spans": [],
200
- "ref_spans": [],
201
- "eq_spans": [],
202
- "section": "Instrumentation",
203
- "sec_num": "2.2"
204
- },
205
- {
206
- "text": "DISJUNCT-nn 6 DISJUNCTS.",
207
- "cite_spans": [],
208
- "ref_spans": [],
209
- "eq_spans": [],
210
- "section": "Instrumentation",
211
- "sec_num": "2.2"
212
- },
213
- {
214
- "text": "One restriction is imposed by using the unification formalism, though: One occurrence of the mark cannot be distinguished from two occurrences, since the second application of the equation introduces no new information. The markers merely unify, and there is no way of counting. (Frank et al., 1998) ). In this way, from the root node of each solution the set of all disjuncts used can be collected, together with a usage count. Fig. 2 shows the rule from Fig.1 with such an instrumentation; equations of the form DISJUNCT-nnE o* express membership of the disjunct-specific atom DISJUNCT-nn in the sentence's multiset of disjunct markers.",
215
- "cite_spans": [
216
- {
217
- "start": 279,
218
- "end": 299,
219
- "text": "(Frank et al., 1998)",
220
- "ref_id": "BIBREF4"
221
- }
222
- ],
223
- "ref_spans": [
224
- {
225
- "start": 429,
226
- "end": 435,
227
- "text": "Fig. 2",
228
- "ref_id": null
229
- },
230
- {
231
- "start": 456,
232
- "end": 461,
233
- "text": "Fig.1",
234
- "ref_id": "FIGREF0"
235
- }
236
- ],
237
- "eq_spans": [],
238
- "section": "Instrumentation",
239
- "sec_num": "2.2"
240
- },
241
- {
242
- "text": "Tool support is mandatory for a scenario such as instrumentation: Nobody will manually add equations such as those in Fig. 2 to several hundred rules. Based on the format of the grammar rules, an algorithm instrumenting a grammar can be written down easily.",
243
- "cite_spans": [],
244
- "ref_spans": [
245
- {
246
- "start": 118,
247
- "end": 124,
248
- "text": "Fig. 2",
249
- "ref_id": null
250
- }
251
- ],
252
- "eq_spans": [],
253
- "section": "Processing Tools",
254
- "sec_num": "2.3"
255
- },
256
- {
257
- "text": "Given a grammar and a testsuite or corpus to compare, first an instrumented grammar must be constructed using such an algorithm. This instrumented grammar is then used to parse the testsuite, yielding a set of solutions associated with information about usage of grammar disjuncts. Up to this point, the process is completely automatic. The following two sections discuss two possibilities to evaluate this information.",
258
- "cite_spans": [],
259
- "ref_spans": [],
260
- "eq_spans": [],
261
- "section": "Processing Tools",
262
- "sec_num": "2.3"
263
- },
264
- {
265
- "text": "This section addresses the aspects of completeness (\"does the testsuite exercise all disjuncts in the grammar?\") and economy of a testsuite (\"is it minimal?\").",
266
- "cite_spans": [],
267
- "ref_spans": [],
268
- "eq_spans": [],
269
- "section": "Quality of Testsuites",
270
- "sec_num": "3"
271
- },
272
- {
273
- "text": "Complementing other work on testsuite construction (cf. Sec.5), we will assume that a grammar is already available, and that a testsuite has to be constructed or extended. While one may argue that grammar and testsuite should be developed in parallel, such that the coding of a new grammar disjunct is accompanied by the addition of suitable test cases, and vice versa, this is seldom the case. Apart from the existence of grammars which lack a testsuite, and to which this procedure could be usefully applied, there is the more principled obstacle of the evolution of the grammar, leading to states where previously necessary rules silently loose their usefulness, because their function is taken over by some other rules, structured differently. This is detectable by instrumentation, as discussed in Sec.3.1.",
274
- "cite_spans": [],
275
- "ref_spans": [],
276
- "eq_spans": [],
277
- "section": "Quality of Testsuites",
278
- "sec_num": "3"
279
- },
280
- {
281
- "text": "On the other hand, once there is a testsuite, you want to use it in the most economic way, avoiding redundant tests. Sec.3.2 shows that there are different levels of redundancy in a testsuite, dependent on the specific grammar used. Reduction of this redundancy can speed up the test activity, and give a clearer picture of the grammar's performance.",
282
- "cite_spans": [],
283
- "ref_spans": [],
284
- "eq_spans": [],
285
- "section": "Quality of Testsuites",
286
- "sec_num": "3"
287
- },
288
- {
289
- "text": "If the disjunct coverage of a testsuite is 1 for some grammar, the testsuite is complete w.r.t, this grammar. Such a testsuite can reliably be used to monitor changes in the grammar: Any reduction in the grammar's coverage will show up in the failure of some test case (for negative test cases, cf. Sec.4).",
290
- "cite_spans": [],
291
- "ref_spans": [],
292
- "eq_spans": [],
293
- "section": "Testsuite Completeness",
294
- "sec_num": "3.1"
295
- },
296
- {
297
- "text": "If there is no complete testsuite, one can -via instrumentation -identify disjuncts in the grammar for which no test case exists. There might be either (i) appropriate, but untested, disjuncts calling for the addition of a test case, or (ii) inappropriate disjuncts, for which one cannot construct a grammatical test case relying on them (e.g., left-overs from rearranging the grammar). Grammar instrumentation singles out all untested disjuncts automatically, but cases (i) and (ii) have to be distinguished manually.",
298
- "cite_spans": [],
299
- "ref_spans": [],
300
- "eq_spans": [],
301
- "section": "Testsuite Completeness",
302
- "sec_num": "3.1"
303
- },
304
- {
305
- "text": "Checking completeness of our local testsuite of 1787 items, we found that only 1456 out of 3730 grammar disjuncts ir~ our German grammar were tested, yielding Tdis = 0.39 (the TSNLP testsuite containing 1093 items tests only 1081 disjuncts, yielding Tdis = 0.28). 3 Fig.3 shows an example of a gap in our testsuite (there are no examples of circumpositions), while Fig.4 shows an inapproppriate disjunct thus discovered (the category ADVadj has been eliminated in the lexicon, but not in all rules). Another error class is illustrated by Fig.5 , which shows a rule that can never be used due to an LFG coherence violation; the grammar is inconsistent here. 4 3There are, of course, unparsed but grammatical test cases in both testsuites, which have not been taken into account in these figures. This explains the difference to the overall number of 1582 items in the German TSNLP testsuite. 4Test cases using a free dative pronoun may be in the testsuite, but receive no analysis since the grammatical function FREEDAT is not defined as such in the configuration section. ",
306
- "cite_spans": [],
307
- "ref_spans": [
308
- {
309
- "start": 266,
310
- "end": 271,
311
- "text": "Fig.3",
312
- "ref_id": null
313
- },
314
- {
315
- "start": 365,
316
- "end": 370,
317
- "text": "Fig.4",
318
- "ref_id": null
319
- },
320
- {
321
- "start": 538,
322
- "end": 543,
323
- "text": "Fig.5",
324
- "ref_id": null
325
- }
326
- ],
327
- "eq_spans": [],
328
- "section": "Testsuite Completeness",
329
- "sec_num": "3.1"
330
- },
331
- {
332
- "text": "Besides being complete, a testsuite must be economical, i.e., contain as few items as possible without sacrificing its diagnostic capabilities. Instrumentation can identify redundant test cases. Three criteria can be applied in determining whether a test case is redundant: similarity There is a set of other test cases which jointly rely on all disjunct on which the test case under consideration relies.",
333
- "cite_spans": [],
334
- "ref_spans": [],
335
- "eq_spans": [],
336
- "section": "Testsuite Economy",
337
- "sec_num": "3.2"
338
- },
339
- {
340
- "text": "equivalence There is a single test case which relies on exactly the same combination(s) of disjuncts.",
341
- "cite_spans": [],
342
- "ref_spans": [],
343
- "eq_spans": [],
344
- "section": "Testsuite Economy",
345
- "sec_num": "3.2"
346
- },
347
- {
348
- "text": "strict equivalence There is a single test case which is equivalent to and, additionally, relies on the disjuncts exactly as often as, the test case under consideration.",
349
- "cite_spans": [],
350
- "ref_spans": [],
351
- "eq_spans": [],
352
- "section": "Testsuite Economy",
353
- "sec_num": "3.2"
354
- },
355
- {
356
- "text": "For all criteria, lexical and structural ambiguities must be taken into account. Fig.6 shows some equivalent test cases derived from our testsuite: Example 1 illustrates the distinction between equivalence and strict equivalence; the test cases contain different numbers of attributive adjectives, but are nevertheless considered equivalent. Example 2 shows that our grammar does not make any distinction between adverbial usage and secondary (subject or object) predication. Example 3 shows test cases which should not be considered equivalent, and is discussed below.",
357
- "cite_spans": [],
358
- "ref_spans": [
359
- {
360
- "start": 81,
361
- "end": 86,
362
- "text": "Fig.6",
363
- "ref_id": null
364
- }
365
- ],
366
- "eq_spans": [],
367
- "section": "Testsuite Economy",
368
- "sec_num": "3.2"
369
- },
370
- {
371
- "text": "The reduction we achieved in size and processing time is shown in 'He eats the schnitzel naked/raw/quickly.'",
372
- "cite_spans": [],
373
- "ref_spans": [],
374
- "eq_spans": [],
375
- "section": "Testsuite Economy",
376
- "sec_num": "3.2"
377
- },
378
- {
379
- "text": "3 Otto versucht oft zu lachen.",
380
- "cite_spans": [],
381
- "ref_spans": [],
382
- "eq_spans": [],
383
- "section": "Testsuite Economy",
384
- "sec_num": "3.2"
385
- },
386
- {
387
- "text": "Otto versucht zu lachen.",
388
- "cite_spans": [],
389
- "ref_spans": [],
390
- "eq_spans": [],
391
- "section": "Testsuite Economy",
392
- "sec_num": "3.2"
393
- },
394
- {
395
- "text": "'Otto (often) tries to laugh.' Figure 6 : Sets of equivalent test cases ily selected), and one without similar test cases. The last was constructed using a simple heuristic: Starting with the sentence relying on the most disjuncts, working towards sentences relying on fewer disjuncts, a sentence was selected only if it relied on a disjunct on which no previously selected sentence relied. Assuming that a disjunct working correctly once will work correctly more than once, we did not consider strict equivalence. We envisage the following use of this redundancy detection: There clearly are linguistic reasons to distinguish all test cases in example 2, so they cannot simply be deleted from the testsuite. Rather, their equivalence indicates that the grammar is not yet perfect (or never will be, if it remains purely syntactic). Such equivalences could be interpreted as The level of equivalence can be taken as a limited interaction test: These test cases represent one complete selection of grammar disjuncts, and (given the grammar) there is nothing we can gain by checking a test case if an equivalent one was tested. Thus, this level of redundancy may be used for ensuring the quality of grammar changes prior to their incorporation into the production version of the grammar. The level of similarity contains much less test cases, and does not test any (systematic) interaction between disjuncts. Thus, it may be used during development as a quick rule-of-thumb procedure detecting serious errors only. Coming back to example 3 in Fig.6 , building equivalence classes also helps in detecting grammar errors: If, according to the grammar, two cases are equivalent which actually aren't, the grammar is incorrect. Example 3 shows two test cases which are syntactically different in that the first contains the adverbial oft, while the other doesn't. The reason why they are equivalent is an incorrect rule that assigns an incorrect reading to the second test case, where the infinitival particle \"zu\" functions as an adverbial.",
396
- "cite_spans": [],
397
- "ref_spans": [
398
- {
399
- "start": 31,
400
- "end": 39,
401
- "text": "Figure 6",
402
- "ref_id": null
403
- },
404
- {
405
- "start": 1541,
406
- "end": 1546,
407
- "text": "Fig.6",
408
- "ref_id": null
409
- }
410
- ],
411
- "eq_spans": [],
412
- "section": "Testsuite Economy",
413
- "sec_num": "3.2"
414
- },
415
- {
416
- "text": "To control overgeneration, appropriately marked ungrammatical sentences are important in every testsuite. Instrumentation as proposed here only looks at successful parses, but can still be applied in this context: If an ungrammatical test case receives an analysis, instrumentation informs us about the disjuncts used in the incorrect analysis. One (or more) of these disjuncts must be incorrect, or the sentence would not have received a solution. We exploit this information by accumulation across the entire test suite, looking for disjuncts that appear in unusually high proportion in parseable ungrammatical test cases.",
417
- "cite_spans": [],
418
- "ref_spans": [],
419
- "eq_spans": [],
420
- "section": "Negative Test Cases",
421
- "sec_num": "4"
422
- },
423
- {
424
- "text": "In this manner, six grammar disjuncts are singled out by the parseable ungrammatical test cases in the TSNLP testsuite. The most prominent disjunct appears in 26 sentences (listed in Fig.7) , of which group 1 is really grammatical and the rest fall into two groups: A partial VP with object NP, interpreted as an imperative sentence (group 2), and a weird interaction with the tokenizer incorrectly handling capitalization (group 3).",
425
- "cite_spans": [],
426
- "ref_spans": [
427
- {
428
- "start": 183,
429
- "end": 189,
430
- "text": "Fig.7)",
431
- "ref_id": null
432
- }
433
- ],
434
- "eq_spans": [],
435
- "section": "Negative Test Cases",
436
- "sec_num": "4"
437
- },
438
- {
439
- "text": "Far from being conclusive, the similarity of these sentences derived from a suspicious grammar disjunct, and the clear relation of the sentences to only two exactly specifiable grammar errors make it plausible that this approach is very promising in reducing overgeneration. Although there are a number of efforts to construct reusable large-coverage testsuites, none has to my knowledge explored how existing grammars could be used for this purpose.",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "Negative Test Cases",
444
- "sec_num": "4"
445
- },
446
- {
447
- "text": "Starting with (Flickinger et al., 1987) , testsuites have been drawn up from a linguistic viewpoint, \"in-]ormed by [the] study of linguistics and [reflecting] the grammatical issues that linguists have concerned themselves with\" (Flickinger et al., 1987, , p.4) . Although the question is not explicitly addressed in (Balkan et al., 1994) , all the testsuites reviewed there also seem to follow the same methodology. The TSNLP project (Lehmann and Oepen, 1996) and its successor DiET (Netter et al., 1998) , which built large multilingual testsuites, likewise fall into this category. The use of corpora (with various levels of annotation) has been studied, but even here the recommendations are that much manual work is required to turn corpus examples into test cases (e.g., (Balkan and Fouvry, 1995) ). The reason given is that corpus sentences neither contain linguistic phenomena in isolation, nor do they contain systematic variation. Corpora thus are used only as an inspiration. (Oepen and Flickinger, 1998) stress the interdependence between application and testsuite, but don't comment on the relation between grammar and testsuite.",
448
- "cite_spans": [
449
- {
450
- "start": 14,
451
- "end": 39,
452
- "text": "(Flickinger et al., 1987)",
453
- "ref_id": "BIBREF3"
454
- },
455
- {
456
- "start": 115,
457
- "end": 158,
458
- "text": "[the] study of linguistics and [reflecting]",
459
- "ref_id": null
460
- },
461
- {
462
- "start": 229,
463
- "end": 261,
464
- "text": "(Flickinger et al., 1987, , p.4)",
465
- "ref_id": null
466
- },
467
- {
468
- "start": 317,
469
- "end": 338,
470
- "text": "(Balkan et al., 1994)",
471
- "ref_id": "BIBREF1"
472
- },
473
- {
474
- "start": 435,
475
- "end": 460,
476
- "text": "(Lehmann and Oepen, 1996)",
477
- "ref_id": "BIBREF8"
478
- },
479
- {
480
- "start": 484,
481
- "end": 505,
482
- "text": "(Netter et al., 1998)",
483
- "ref_id": "BIBREF10"
484
- },
485
- {
486
- "start": 777,
487
- "end": 802,
488
- "text": "(Balkan and Fouvry, 1995)",
489
- "ref_id": "BIBREF0"
490
- },
491
- {
492
- "start": 987,
493
- "end": 1015,
494
- "text": "(Oepen and Flickinger, 1998)",
495
- "ref_id": "BIBREF12"
496
- }
497
- ],
498
- "ref_spans": [],
499
- "eq_spans": [],
500
- "section": "Negative Test Cases",
501
- "sec_num": "4"
502
- },
503
- {
504
- "text": "The approach presented tries to make available the linguistic knowledge that went into the grammar for development of testsuites. Grammar development and testsuite compilation are seen as complementary and interacting processes, not as isolated modules. We have seen that even large testsuites cover only a fraction of existing large-coverage grammars, and presented evidence that there is a considerable amount of redundancy within existing testsuites. To empirically validate that the procedures outlined above improve grammar and testsuite, careful grammar development is required. Based on the information derived from parsing with instrumented grammars, the changes and their effects need to be evaluated. In addition to this empirical work, instrumentation can be applied to other areas in Grammar Engineering, e.g., to detect sources of spurious ambiguities, to select sample sentences relying on a disjunct for documentation, or to assist in the construction of additional test cases. Methodological work is also required for the definition of a practical and intuitive criterion to measure limited interaction coverage.",
505
- "cite_spans": [],
506
- "ref_spans": [],
507
- "eq_spans": [],
508
- "section": "Conclusion",
509
- "sec_num": "6"
510
- },
511
- {
512
- "text": "Each existing grammar development environment undoubtely offers at least some basic tools for comparing the grammar's coverage with a testsuite. Regrettably, these tools are seldomly presented publicly (which accounts for the short list of such references). It is my belief that the thorough discussion of such infrastructure items (tools and methods) is of more immediate importance to the quality of the lingware than the discussion of open linguistic problems.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "Conclusion",
517
- "sec_num": "6"
518
- },
519
- {
520
- "text": "1The work reported here was conducted during my time at the Institut fiir Maschinelle Sprachverarbeitung (IMS), Stuttgart University, Germany.",
521
- "cite_spans": [],
522
- "ref_spans": [],
523
- "eq_spans": [],
524
- "section": "",
525
- "sec_num": null
526
- }
527
- ],
528
- "back_matter": [],
529
- "bib_entries": {
530
- "BIBREF0": {
531
- "ref_id": "b0",
532
- "title": "Corpus-based test suite generation. TSNLP-WP 2.2",
533
- "authors": [
534
- {
535
- "first": "L",
536
- "middle": [],
537
- "last": "Balkan",
538
- "suffix": ""
539
- },
540
- {
541
- "first": "F",
542
- "middle": [],
543
- "last": "Fouvry",
544
- "suffix": ""
545
- }
546
- ],
547
- "year": 1995,
548
- "venue": "",
549
- "volume": "",
550
- "issue": "",
551
- "pages": "",
552
- "other_ids": {},
553
- "num": null,
554
- "urls": [],
555
- "raw_text": "L. Balkan and F. Fouvry. 1995. Corpus-based test suite generation. TSNLP-WP 2.2, University of Essex.",
556
- "links": null
557
- },
558
- "BIBREF1": {
559
- "ref_id": "b1",
560
- "title": "Test Suite Design Annotation Scheme",
561
- "authors": [
562
- {
563
- "first": "L",
564
- "middle": [],
565
- "last": "Balkan",
566
- "suffix": ""
567
- },
568
- {
569
- "first": "S",
570
- "middle": [],
571
- "last": "Meijer",
572
- "suffix": ""
573
- },
574
- {
575
- "first": "D",
576
- "middle": [],
577
- "last": "Arnold",
578
- "suffix": ""
579
- },
580
- {
581
- "first": "D",
582
- "middle": [],
583
- "last": "Estival",
584
- "suffix": ""
585
- },
586
- {
587
- "first": "K",
588
- "middle": [],
589
- "last": "Falkedal",
590
- "suffix": ""
591
- }
592
- ],
593
- "year": 1994,
594
- "venue": "TSNLP-WP2",
595
- "volume": "2",
596
- "issue": "",
597
- "pages": "",
598
- "other_ids": {},
599
- "num": null,
600
- "urls": [],
601
- "raw_text": "L. Balkan, S. Meijer, D. Arnold, D. Estival, and K. Falkedal. 1994. Test Suite Design Annotation Scheme. TSNLP-WP2.2, University of Essex.",
602
- "links": null
603
- },
604
- "BIBREF2": {
605
- "ref_id": "b2",
606
- "title": "Developing language reesources and applications with geppetto",
607
- "authors": [
608
- {
609
- "first": "F",
610
- "middle": [],
611
- "last": "Ciravegna",
612
- "suffix": ""
613
- },
614
- {
615
- "first": "A",
616
- "middle": [],
617
- "last": "Lavelli",
618
- "suffix": ""
619
- },
620
- {
621
- "first": "D",
622
- "middle": [],
623
- "last": "Petrelli",
624
- "suffix": ""
625
- },
626
- {
627
- "first": "F",
628
- "middle": [],
629
- "last": "Pianesi",
630
- "suffix": ""
631
- }
632
- ],
633
- "year": 1998,
634
- "venue": "Proc. 1st Int'l Con/. on Language Resources and Evaluation",
635
- "volume": "",
636
- "issue": "",
637
- "pages": "28--30",
638
- "other_ids": {},
639
- "num": null,
640
- "urls": [],
641
- "raw_text": "F. Ciravegna, A. Lavelli, D. Petrelli, and F. Pianesi. 1998. Developing language reesources and appli- cations with geppetto. In Proc. 1st Int'l Con/. on Language Resources and Evaluation, pages 619- 625. Granada/Spain, 28-30 May 1998.",
642
- "links": null
643
- },
644
- "BIBREF3": {
645
- "ref_id": "b3",
646
- "title": "Toward Evaluation o/ NLP Systems",
647
- "authors": [
648
- {
649
- "first": "D",
650
- "middle": [],
651
- "last": "Flickinger",
652
- "suffix": ""
653
- },
654
- {
655
- "first": "J",
656
- "middle": [],
657
- "last": "Nerbonne",
658
- "suffix": ""
659
- },
660
- {
661
- "first": "I",
662
- "middle": [],
663
- "last": "Sag",
664
- "suffix": ""
665
- },
666
- {
667
- "first": "T",
668
- "middle": [],
669
- "last": "Wasow",
670
- "suffix": ""
671
- }
672
- ],
673
- "year": 1987,
674
- "venue": "",
675
- "volume": "",
676
- "issue": "",
677
- "pages": "",
678
- "other_ids": {},
679
- "num": null,
680
- "urls": [],
681
- "raw_text": "D. Flickinger, J. Nerbonne, I. Sag, and T. Wa- sow. 1987. Toward Evaluation o/ NLP Systems. Hewlett-Packard Laboratories, Palo Alto/CA.",
682
- "links": null
683
- },
684
- "BIBREF4": {
685
- "ref_id": "b4",
686
- "title": "Optimality theory style constraint ranking in large-scale lfg gramma",
687
- "authors": [
688
- {
689
- "first": "A",
690
- "middle": [],
691
- "last": "Frank",
692
- "suffix": ""
693
- },
694
- {
695
- "first": "T",
696
- "middle": [
697
- "H"
698
- ],
699
- "last": "King",
700
- "suffix": ""
701
- },
702
- {
703
- "first": "J",
704
- "middle": [],
705
- "last": "Kuhn",
706
- "suffix": ""
707
- },
708
- {
709
- "first": "J",
710
- "middle": [],
711
- "last": "Maxwell",
712
- "suffix": ""
713
- }
714
- ],
715
- "year": 1998,
716
- "venue": "Proc. of the LFG98",
717
- "volume": "",
718
- "issue": "",
719
- "pages": "",
720
- "other_ids": {},
721
- "num": null,
722
- "urls": [],
723
- "raw_text": "A. Frank, T.H. King, J. Kuhn, and J. Maxwell. 1998. Optimality theory style constraint ranking in large-scale lfg gramma. In Proc. of the LFG98",
724
- "links": null
725
- },
726
- "BIBREF6": {
727
- "ref_id": "b6",
728
- "title": "The complete guide to software testing",
729
- "authors": [
730
- {
731
- "first": "W",
732
- "middle": [
733
- "C"
734
- ],
735
- "last": "Hetzel",
736
- "suffix": ""
737
- }
738
- ],
739
- "year": 1988,
740
- "venue": "",
741
- "volume": "",
742
- "issue": "",
743
- "pages": "",
744
- "other_ids": {},
745
- "num": null,
746
- "urls": [],
747
- "raw_text": "W.C. Hetzel. 1988. The complete guide to software testing. QED Information Sciences, Inc. Welles- ley/MA 02181.",
748
- "links": null
749
- },
750
- "BIBREF7": {
751
- "ref_id": "b7",
752
- "title": "Lexicalfunctional grammar: A formal system for grammatical representation",
753
- "authors": [
754
- {
755
- "first": "R",
756
- "middle": [
757
- "M"
758
- ],
759
- "last": "Kaplan",
760
- "suffix": ""
761
- },
762
- {
763
- "first": "J",
764
- "middle": [],
765
- "last": "Bresnan",
766
- "suffix": ""
767
- }
768
- ],
769
- "year": 1982,
770
- "venue": "The Mental Representation of Grammatical Relations",
771
- "volume": "",
772
- "issue": "",
773
- "pages": "173--281",
774
- "other_ids": {},
775
- "num": null,
776
- "urls": [],
777
- "raw_text": "R.M. Kaplan and J. Bresnan. 1982. Lexical- functional grammar: A formal system for gram- matical representation. In J. Bresnan and R.M. Kaplan, editors, The Mental Representation of Grammatical Relations, pages 173-281. Cam- bridge, MA: MIT Press.",
778
- "links": null
779
- },
780
- "BIBREF8": {
781
- "ref_id": "b8",
782
- "title": "TSNLP -Test Suites for Natural Language Processing",
783
- "authors": [
784
- {
785
- "first": "S",
786
- "middle": [],
787
- "last": "Lehmann",
788
- "suffix": ""
789
- },
790
- {
791
- "first": "S",
792
- "middle": [],
793
- "last": "Oepen",
794
- "suffix": ""
795
- }
796
- ],
797
- "year": 1996,
798
- "venue": "Proc. 16th Int'l Con]. on Computational Linguistics",
799
- "volume": "",
800
- "issue": "",
801
- "pages": "711--716",
802
- "other_ids": {},
803
- "num": null,
804
- "urls": [],
805
- "raw_text": "S. Lehmann and S. Oepen. 1996. TSNLP -Test Suites for Natural Language Processing. In Proc. 16th Int'l Con]. on Computational Linguistics, pages 711-716. Copenhagen/DK.",
806
- "links": null
807
- },
808
- "BIBREF9": {
809
- "ref_id": "b9",
810
- "title": "Modultest und Modulverij~ka-tion",
811
- "authors": [
812
- {
813
- "first": "P",
814
- "middle": [],
815
- "last": "Liggesmeyer",
816
- "suffix": ""
817
- }
818
- ],
819
- "year": 1990,
820
- "venue": "Angewandte Informatik",
821
- "volume": "4",
822
- "issue": "",
823
- "pages": "",
824
- "other_ids": {},
825
- "num": null,
826
- "urls": [],
827
- "raw_text": "P. Liggesmeyer. 1990. Modultest und Modulverij~ka- tion. Angewandte Informatik 4. Mannheim: BI Wissenschaftsverlag.",
828
- "links": null
829
- },
830
- "BIBREF10": {
831
- "ref_id": "b10",
832
- "title": "Diet -diagnostic and evaluation tools for nlp applications",
833
- "authors": [
834
- {
835
- "first": "K",
836
- "middle": [],
837
- "last": "Netter",
838
- "suffix": ""
839
- },
840
- {
841
- "first": "S",
842
- "middle": [],
843
- "last": "Armstrong",
844
- "suffix": ""
845
- },
846
- {
847
- "first": "T",
848
- "middle": [],
849
- "last": "Kiss",
850
- "suffix": ""
851
- },
852
- {
853
- "first": "J",
854
- "middle": [],
855
- "last": "Klein",
856
- "suffix": ""
857
- },
858
- {
859
- "first": "S",
860
- "middle": [],
861
- "last": "Lehman",
862
- "suffix": ""
863
- }
864
- ],
865
- "year": 1998,
866
- "venue": "Proc. 1st",
867
- "volume": "",
868
- "issue": "",
869
- "pages": "",
870
- "other_ids": {},
871
- "num": null,
872
- "urls": [],
873
- "raw_text": "K. Netter, S. Armstrong, T. Kiss, J. Klein, and S. Lehman. 1998. Diet -diagnostic and eval- uation tools for nlp applications. In Proc. 1st",
874
- "links": null
875
- },
876
- "BIBREF11": {
877
- "ref_id": "b11",
878
- "title": "on Language Resources and Evaluation",
879
- "authors": [],
880
- "year": 1998,
881
- "venue": "",
882
- "volume": "",
883
- "issue": "",
884
- "pages": "28--30",
885
- "other_ids": {},
886
- "num": null,
887
- "urls": [],
888
- "raw_text": "Int'l Con/. on Language Resources and Evalua- tion, pages 573-579. Granada/Spain, 28-30 May 1998.",
889
- "links": null
890
- },
891
- "BIBREF12": {
892
- "ref_id": "b12",
893
- "title": "Towards systematic grammar profiling:test suite techn. 10 years afte",
894
- "authors": [
895
- {
896
- "first": "S",
897
- "middle": [],
898
- "last": "Oepen",
899
- "suffix": ""
900
- },
901
- {
902
- "first": "D",
903
- "middle": [
904
- "P"
905
- ],
906
- "last": "Flickinger",
907
- "suffix": ""
908
- }
909
- ],
910
- "year": 1998,
911
- "venue": "Journal of Computer Speech and Language",
912
- "volume": "12",
913
- "issue": "",
914
- "pages": "411--435",
915
- "other_ids": {},
916
- "num": null,
917
- "urls": [],
918
- "raw_text": "S. Oepen and D.P. Flickinger. 1998. Towards sys- tematic grammar profiling:test suite techn. 10 years afte. Journal of Computer Speech and Lan- guage, 12:411-435.",
919
- "links": null
920
- }
921
- },
922
- "ref_entries": {
923
- "FIGREF0": {
924
- "type_str": "figure",
925
- "uris": null,
926
- "num": null,
927
- "text": "Sample Rule the choice of linguistic or computational paradigm."
928
- },
929
- "FIGREF1": {
930
- "type_str": "figure",
931
- "uris": null,
932
- "num": null,
933
- "text": "have used a special feature of our grammar development environment: Following the LFG spirit of different representation levels associated with each solution (so-called projections), it provides for a multiset of symbols associated with the complete solution, where structural embedding plays no role (so-called optimality projection; see"
934
- },
935
- "FIGREF2": {
936
- "type_str": "figure",
937
- "uris": null,
938
- "num": null,
939
- "text": "Figure 3: Appropriate untested disjunct ADVP=~ { { e DISJUNCT-021 E o*; I ADVadj 4=1` DISJUNCT-022 E o* \"unused disjunct\" ; } ADVstd 4=1\" DISJUNCT-023 E o, \"unused disjunct\" ; } I .,. } Figure 4: Inappropriate disjunct"
940
- },
941
- "FIGREF3": {
942
- "type_str": "figure",
943
- "uris": null,
944
- "num": null,
945
- "text": "Figure 7: Sentences relying on a suspicious disjunct"
946
- },
947
- "TABREF0": {
948
- "num": null,
949
- "html": null,
950
- "content": "<table><tr><td>, which contains measure-</td></tr><tr><td>ments for a test run containing only the parseable</td></tr><tr><td>test cases, one without equivalent test cases (for ev-</td></tr><tr><td>ery set of equivalent test cases, one was arbitrar-</td></tr></table>",
951
- "type_str": "table",
952
- "text": ""
953
- },
954
- "TABREF2": {
955
- "num": null,
956
- "html": null,
957
- "content": "<table><tr><td/><td>Dieselbe schlafen .</td></tr><tr><td>Die schlafen.</td><td>Das schlafen .</td></tr><tr><td/><td>Eines schlafen.</td></tr><tr><td>3 Man schlafen .</td><td>Jede schlafen .</td></tr><tr><td>Dieser schlafen .</td><td>Dieses schlafen.</td></tr><tr><td>Ich schlafen .</td><td>Eine schlafen .</td></tr><tr><td>Der schlafen.</td><td>Meins schlafen.</td></tr><tr><td>Jeder schlafen.</td><td>Dasjenige schlafen.</td></tr><tr><td>Derjenige schlafen .</td><td>Jedes schlafen .</td></tr><tr><td>Jener schlafen .</td><td>Diejenige schlafen.</td></tr><tr><td>Keiner schlafen .</td><td>Jenes schlafen.</td></tr><tr><td>Derselbe schlafen.</td><td>Keines schlafen .</td></tr><tr><td>Er schlafen.</td><td>Dasselbe schlafen.</td></tr><tr><td>Irgendjemand schlafen .</td><td/></tr></table>",
958
- "type_str": "table",
959
- "text": "Der Test fg.llt leicht."
960
- }
961
- }
962
- }
963
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-1046.json DELETED
@@ -1,1548 +0,0 @@
1
- {
2
- "paper_id": "A00-1046",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:38.204220Z"
6
- },
7
- "title": "The Efficiency of Multimodal Interaction for a Map-based Task",
8
- "authors": [
9
- {
10
- "first": "Philip",
11
- "middle": [],
12
- "last": "Cohen",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "Oregon Graduate Institute of Science & Technology",
17
- "location": {
18
- "addrLine": "20000 N.W. Walker Road Beaverton",
19
- "postCode": "97006",
20
- "country": "Oregon"
21
- }
22
- },
23
- "email": ""
24
- },
25
- {
26
- "first": "David",
27
- "middle": [],
28
- "last": "Mcgee",
29
- "suffix": "",
30
- "affiliation": {
31
- "laboratory": "",
32
- "institution": "Oregon Graduate Institute of Science & Technology",
33
- "location": {
34
- "addrLine": "20000 N.W. Walker Road Beaverton",
35
- "postCode": "97006",
36
- "country": "Oregon"
37
- }
38
- },
39
- "email": ""
40
- },
41
- {
42
- "first": "Josh",
43
- "middle": [],
44
- "last": "Clow",
45
- "suffix": "",
46
- "affiliation": {
47
- "laboratory": "",
48
- "institution": "Oregon Graduate Institute of Science & Technology",
49
- "location": {
50
- "addrLine": "20000 N.W. Walker Road Beaverton",
51
- "postCode": "97006",
52
- "country": "Oregon"
53
- }
54
- },
55
- "email": ""
56
- }
57
- ],
58
- "year": "",
59
- "venue": null,
60
- "identifiers": {},
61
- "abstract": "This paper compares the efficiency of using a standard direct-manipulation graphical user interface (GUI) with that of using the QuickSet pen/voice multimodal interface for supporting a military task. In this task, a user places military units and control measures (e.g., various types of lines, obstacles, objectives) on a map. Four military personnel designed and entered their own simulation scenarios via both interfaces. Analyses revealed that the multimodal interface led to an average 3.5-fold speed improvement in the average entity creation time, including all error handling. The mean time to repair errors also was 4.3 times faster when interacting multimodally. Finally, all subjects reported a strong preference for multimodal interaction. These results indicate a substantial efficiency advantage for multimodal over GUI-based interaction during map-based tasks.",
62
- "pdf_parse": {
63
- "paper_id": "A00-1046",
64
- "_pdf_hash": "",
65
- "abstract": [
66
- {
67
- "text": "This paper compares the efficiency of using a standard direct-manipulation graphical user interface (GUI) with that of using the QuickSet pen/voice multimodal interface for supporting a military task. In this task, a user places military units and control measures (e.g., various types of lines, obstacles, objectives) on a map. Four military personnel designed and entered their own simulation scenarios via both interfaces. Analyses revealed that the multimodal interface led to an average 3.5-fold speed improvement in the average entity creation time, including all error handling. The mean time to repair errors also was 4.3 times faster when interacting multimodally. Finally, all subjects reported a strong preference for multimodal interaction. These results indicate a substantial efficiency advantage for multimodal over GUI-based interaction during map-based tasks.",
68
- "cite_spans": [],
69
- "ref_spans": [],
70
- "eq_spans": [],
71
- "section": "Abstract",
72
- "sec_num": null
73
- }
74
- ],
75
- "body_text": [
76
- {
77
- "text": "Nearly two decades ago at ACL'80, Professor Ben Shneiderman challenged the field of natural language processing as follows:",
78
- "cite_spans": [],
79
- "ref_spans": [],
80
- "eq_spans": [],
81
- "section": "Introduction",
82
- "sec_num": null
83
- },
84
- {
85
- "text": "In constructing computer systems which mimic rather than serve people, the developer may miss opportunities for applying the unique and powerful features of a computer: extreme speed, capacity to repeat tedious operations accurately, virtually unlimited storage for data, and distinctive input/output devices. Although the slow rate of human speech makes menu selection impractical, high-speed computer displays make menu selection an appealing alternative. Joysticks, light pens or the \"mouse\" are extremely rapid and accurate ways of selecting and moving graphic symbols or text on a display screen. Taking advantage of these and other computer-specific techniques will enable designers to create powerful tools without natural language commands. [20, p. 139] He also challenged us to go beyond mere claims, but to demonstrate the benefits of natural language processing technologies empirically. Since then, not only has there been a long period of unprecedented innovation in hardware, software architectures, speech processing, and natural language processing, but NLP research has also embraced empirical methods as one of its foundations. Still, we have yet to defend claims empirically that technologies for processing natural human communication are more efficient, effective, and/or preferred, than interfaces that are best viewed as \"tools,\" especially interfaces involving a direct manipulation style of interaction. The present research attempts to take a small step in this direction.",
86
- "cite_spans": [
87
- {
88
- "start": 749,
89
- "end": 761,
90
- "text": "[20, p. 139]",
91
- "ref_id": null
92
- }
93
- ],
94
- "ref_spans": [],
95
- "eq_spans": [],
96
- "section": "Introduction",
97
- "sec_num": null
98
- },
99
- {
100
- "text": "In fact, it has often been claimed that spoken language-based human-computer interaction will not only be more natural but also more efficient than keyboard-based interaction. Many of these claims derive from early modality comparison studies [1] , which found a 2-3 fold speedup in task performance when people communicated with each other by telephone vs. by keyboard. Studies of the use of some of the initial commercial speech recognition systems have reported efficiency gains of approximately 20% -40% on a variety of interactive hands-busy tasks [10] compared with keyboard input. Although these results were promising, once the time needed for error correction was included, the speed advantage of speech often evaporated [18] ~. A recent study of speech-based dictation systems [9] reported that dictation resulted in a slower and more errorful method of text creation than typing. From such results, it is often concluded that the age of spoken human-computer interaction is not yet upon us.",
101
- "cite_spans": [
102
- {
103
- "start": 243,
104
- "end": 246,
105
- "text": "[1]",
106
- "ref_id": "BIBREF0"
107
- },
108
- {
109
- "start": 553,
110
- "end": 557,
111
- "text": "[10]",
112
- "ref_id": "BIBREF9"
113
- },
114
- {
115
- "start": 730,
116
- "end": 734,
117
- "text": "[18]",
118
- "ref_id": "BIBREF17"
119
- },
120
- {
121
- "start": 787,
122
- "end": 790,
123
- "text": "[9]",
124
- "ref_id": "BIBREF8"
125
- }
126
- ],
127
- "ref_spans": [],
128
- "eq_spans": [],
129
- "section": "Introduction",
130
- "sec_num": null
131
- },
132
- {
133
- "text": "Most of these studies have compared speech with typing, However, in order to affect mainstream computing, spoken interaction would at a minimum need to be found to be superior to graphical user interfaces (GUIs) for a variety of tasks. In an early study of one component of GUIs, Rudnicky [18] compared spoken interaction with use of a scroll bar, finding that error correction wiped out the speed advantages of speech, but users still preferred to speak. Pausch and Leatherby [17] examined the use of simple speaker-dependent discrete speech commands with a graphical editor, as compared with the standard menu-based interface. With a 19-word vocabulary, subjects were found to create drawings 21% faster using speech and mouse than with the menu-based system. They conjectured that reduction in mouse-movement was the source of the advantage. In general, more research comparing speech and spokenlanguage-based interfaces with graphical user interfaces still is needed.",
134
- "cite_spans": [
135
- {
136
- "start": 289,
137
- "end": 293,
138
- "text": "[18]",
139
- "ref_id": "BIBREF17"
140
- },
141
- {
142
- "start": 477,
143
- "end": 481,
144
- "text": "[17]",
145
- "ref_id": "BIBREF16"
146
- }
147
- ],
148
- "ref_spans": [],
149
- "eq_spans": [],
150
- "section": "Introduction",
151
- "sec_num": null
152
- },
153
- {
154
- "text": "We hypothesize that one reason for the equivocal nature of these results is that speech is often being asked to perform an unnatural act the interface design requires people to speak when other modalities of communication would be more appropriate. In the past, strengths and weaknesses of various communication modalities have been described [2, 6, 13] , and a strategy of developing multimodal user interfaces has been developed using the strengths of one mode to overcome weaknesses in another,",
155
- "cite_spans": [
156
- {
157
- "start": 343,
158
- "end": 346,
159
- "text": "[2,",
160
- "ref_id": "BIBREF1"
161
- },
162
- {
163
- "start": 347,
164
- "end": 349,
165
- "text": "6,",
166
- "ref_id": "BIBREF5"
167
- },
168
- {
169
- "start": 350,
170
- "end": 353,
171
- "text": "13]",
172
- "ref_id": "BIBREF12"
173
- }
174
- ],
175
- "ref_spans": [],
176
- "eq_spans": [],
177
- "section": "Introduction",
178
- "sec_num": null
179
- },
180
- {
181
- "text": "Interface simulation studies I See also [6, 10] for a survey of results. comparing multimodal (speech/pen) interaction with speech-only have found a 35% reduction in user errors, a 30% reduction in spoken dysfluencies (which lead to recognition errors), a 10% increase in speed, and a 100% user preference for multimodal interaction over speech-only in a map-based task [14] . These results suggest that multimodal interaction may well offer advantages over GUI's for map-based tasks, and may also offer advantages for supporting error correction during dictation [16, 19] .",
182
- "cite_spans": [
183
- {
184
- "start": 40,
185
- "end": 43,
186
- "text": "[6,",
187
- "ref_id": "BIBREF5"
188
- },
189
- {
190
- "start": 44,
191
- "end": 47,
192
- "text": "10]",
193
- "ref_id": "BIBREF9"
194
- },
195
- {
196
- "start": 370,
197
- "end": 374,
198
- "text": "[14]",
199
- "ref_id": "BIBREF13"
200
- },
201
- {
202
- "start": 564,
203
- "end": 568,
204
- "text": "[16,",
205
- "ref_id": "BIBREF15"
206
- },
207
- {
208
- "start": 569,
209
- "end": 572,
210
- "text": "19]",
211
- "ref_id": "BIBREF18"
212
- }
213
- ],
214
- "ref_spans": [],
215
- "eq_spans": [],
216
- "section": "Introduction",
217
- "sec_num": null
218
- },
219
- {
220
- "text": "In order to investigate these issues, we undertook a study comparing a multimodal and a graphical user interface that were built for the same map-based task ~.",
221
- "cite_spans": [],
222
- "ref_spans": [],
223
- "eq_spans": [],
224
- "section": "Introduction",
225
- "sec_num": null
226
- },
227
- {
228
- "text": "This study compares a graphical user interface pen/voice multimodal direct-manipulation with the QuickSet interface [4] for supporting a common military planning/simulation task. In this task, a user arrays forces on a map by placing icons representing military units (e.g., the 82 n~ Airbome Division) and \"control measures,\" (e.g., various types of lines, obstacles, and objectives). A shared backend application subsystem, called Exlnit, takes the user specifications and attempts to decompose the higher echelon units into their constituents. It then positions the constituent units on the map, subject to the control measures and features of the terrain.",
229
- "cite_spans": [
230
- {
231
- "start": 116,
232
- "end": 119,
233
- "text": "[4]",
234
- "ref_id": "BIBREF3"
235
- }
236
- ],
237
- "ref_spans": [],
238
- "eq_spans": [],
239
- "section": "S t u d y ~",
240
- "sec_num": "1"
241
- },
242
- {
243
- "text": "Exlnit provides a direct manipulation GUI (built by MRJ Corp.) based on the Microsoft Windows suite of interface tools, including a tree-browser, drop-down scrolling lists, buttons (see Figure 1 ). Many military systems incorporate similar user interface tools for accomplishing these types of tasks (e.g., ModSAF [7] ). The tree-browser is used to represent and access the collection of military units. The user employs the unit browser to explore the echelon hierarchy until the desired unit is located. The user then selects that unit, and drags it onto the map in order to position it on the terrain. The system then asks for confirmation of the unit's placement. Once confirmed, Exlnit invokes its deployment server to decompose the unit into its constituents and position them on the terrain. Because this is a time-consuming process depending on the echelon of the unit, only companies and smaller units were considered.",
244
- "cite_spans": [
245
- {
246
- "start": 314,
247
- "end": 317,
248
- "text": "[7]",
249
- "ref_id": "BIBREF6"
250
- }
251
- ],
252
- "ref_spans": [
253
- {
254
- "start": 186,
255
- "end": 194,
256
- "text": "Figure 1",
257
- "ref_id": "FIGREF0"
258
- }
259
- ],
260
- "eq_spans": [],
261
- "section": "ExInit's GUI",
262
- "sec_num": "1.2"
263
- },
264
- {
265
- "text": "To create a linear or area control measure, the user pulls down a list of all control measure 4 types, then scrolls and selects the desired type. Then the user pushes a button to start entering points, selects the desired locations, and finally clicks the button to exit the point creation mode. The user is asked to confirm that the selected points are correct, after which the system connects them and creates a control measure object of the appropriate type.",
266
- "cite_spans": [],
267
- "ref_spans": [],
268
- "eq_spans": [],
269
- "section": "ExInit's GUI",
270
- "sec_num": "1.2"
271
- },
272
- {
273
- "text": "Finally, there are many more features to this GUI, but they were not considered for the present comparison. The system and its GUI were well-received by the client, and were used to develop the largest known distributed simulation (60,000 entities) for the US Government's Synthetic Theater of War program (STOW).",
274
- "cite_spans": [],
275
- "ref_spans": [],
276
- "eq_spans": [],
277
- "section": "ExInit's GUI",
278
- "sec_num": "1.2"
279
- },
280
- {
281
- "text": "QuickSet is a multimodal (pen/voice) interface for map-based tasks. With this system, a user can create entities on a map by simultaneously speaking and drawing [4] . With pen-based, spoken, or multimodal input, the user can annotate the map, creating points, lines, and areas of various types (see Figure 2 ). In virtue of its distributed multiagent architecture, QuickSet operates in various heterogeneous hardware configurations, including wearable, handheld, desktop, and wall-sized. Moreover, it controls numerous backend applications, including 3D terrain visualization [5] military simulation, disaster management [15] and medical informatics.",
282
- "cite_spans": [
283
- {
284
- "start": 161,
285
- "end": 164,
286
- "text": "[4]",
287
- "ref_id": "BIBREF3"
288
- },
289
- {
290
- "start": 576,
291
- "end": 579,
292
- "text": "[5]",
293
- "ref_id": "BIBREF4"
294
- },
295
- {
296
- "start": 621,
297
- "end": 625,
298
- "text": "[15]",
299
- "ref_id": "BIBREF14"
300
- }
301
- ],
302
- "ref_spans": [
303
- {
304
- "start": 299,
305
- "end": 307,
306
- "text": "Figure 2",
307
- "ref_id": "FIGREF1"
308
- }
309
- ],
310
- "eq_spans": [],
311
- "section": "QuickSet's Multimodai Interface",
312
- "sec_num": "1.3"
313
- },
314
- {
315
- "text": "The system operates as follows: When the pen is placed on the screen, the speech recognizer is activated, thereby allowing users to speak and gesture simultaneously. For this task, the user either selects a spot on the map and speaks the name of a unit to be placed there (e.g, \"mechanized company\"), or draws a control measure while speaking its name (e.g., \"phase line green\"). In response, QuickSet creates the appropriate military icon on its map and asks for confirmation.",
316
- "cite_spans": [],
317
- "ref_spans": [],
318
- "eq_spans": [],
319
- "section": "QuickSet's Multimodai Interface",
320
- "sec_num": "1.3"
321
- },
322
- {
323
- "text": "Speech and gesture are recognized in parallel, with the speech interpreted by a definite-clause natural language parser. For this study, IBM's Voice Type Application Factory, a continuous, speakerindependent speech recognition system, was used with a bigram grammar and 662-word vocabulary. In general, analyses of spoken language and of gesture each produce a list of interpretations represented as typed feature structures [8] . The language supported by the system essentially consists of complex noun phrases, including attached prepositional phrases and gerunds, and a small collection of sentence forms. Utterances can be just spoken, or coupled with pen-based gestures. Multimodal integration searches among the set of interpretations for the best joint interpretation [8, 22] , which often disambiguates both speech and gesture simultaneously [15] . Typed feature structure unification provides the basic information fusion operation. Taking advantage of the system's mutual disambiguation capability, QuickSet confirms its interpretation of the user input after multimodal integration [11] , thereby allowing the system to correct recognition and interpretation errors. If the result is acceptable, the user needs only to proceed; only unacceptable results require explicit disconfirmation. Finally, the multimodal interpretation is sent directly to the Exlnit deployment server, effectively bypassing the Exlnit GUI.",
324
- "cite_spans": [
325
- {
326
- "start": 425,
327
- "end": 428,
328
- "text": "[8]",
329
- "ref_id": null
330
- },
331
- {
332
- "start": 776,
333
- "end": 779,
334
- "text": "[8,",
335
- "ref_id": null
336
- },
337
- {
338
- "start": 780,
339
- "end": 783,
340
- "text": "22]",
341
- "ref_id": "BIBREF21"
342
- },
343
- {
344
- "start": 851,
345
- "end": 855,
346
- "text": "[15]",
347
- "ref_id": "BIBREF14"
348
- },
349
- {
350
- "start": 1094,
351
- "end": 1098,
352
- "text": "[11]",
353
- "ref_id": "BIBREF10"
354
- }
355
- ],
356
- "ref_spans": [],
357
- "eq_spans": [],
358
- "section": "QuickSet's Multimodai Interface",
359
- "sec_num": "1.3"
360
- },
361
- {
362
- "text": "The study involved four subjects who were retired US military domain experts, including a US Army National Guard Brigadier General, a US Army Reserve Major, a US Marine Corps Captain, and a US Army communications specialist. Each of the subjects was a frequent computer user, and all had familiarity both with GUIs built around the Microsoft user interface tools as well as with pen-and-paper based drawing of unit symbology and diagrams on maps. Not having used either system before, the subjects were given 30 minutes to learn the Exlnit GUI, and the same amount of time to learn QuickSet. The subjects created scenarios of their own design, using entities common to both systems, first on paper, then with each of the two systems. The scenarios had 8-21 units, and 9-33 control measures. The order of interface styles was counterbalanced across subjects in this within-subject design. The systems were run on a Pentium Pro 200MHz computer with an Input Technologies 14\" color flat-panel display. Stylus input was used for QuickSet, and keyboard and mouse were employed with the GUI.",
363
- "cite_spans": [],
364
- "ref_spans": [],
365
- "eq_spans": [],
366
- "section": "Procedure",
367
- "sec_num": "2"
368
- },
369
- {
370
- "text": "The mean time needed for each expert subject to create and position a unit or control measure was calculated for each interface. The time to create an entity began when the mouse entered the relevant interface tool or the time when the microphone was engaged by placing the pen on the map. Mouse \"travel\" time to the desired interface tool was not included because the pen could not be tracked when it was out of the sensitivity range of the digitizer. Timing ended when the system asked for confirmation of its impending action. Separate creation time calculations were made for units and control measures because the GUI employed different user interface tools for each. Also, whereas the set of QuickSet units was a subset of the units available to the GUI, the set of control measures was identical for QuickSet and the GUI.",
371
- "cite_spans": [],
372
- "ref_spans": [],
373
- "eq_spans": [],
374
- "section": "Procedure",
375
- "sec_num": "2"
376
- },
377
- {
378
- "text": "The entity creation times reported in this study include correction of all errors needed for both QuickSet and the GUI. Error correction time was accumulated for each attempt until a user confirmation (explicit or implicit) was achieved, or until the entire entity creation attempt was aborted. Only 4 multimodal interactions (total = 20.7 secs.) and 1 GUI interaction (total = 43.2 secs.) were aborted. Errors for QuickSet included out-of-vocabulary or grammar, procedural errors (e.g., not clicking on the map), disfluencies, and recognition errors. For the GUI, errors involved failure to enter or leave drawing mode, selecting the wrong unit in the browser, disconfirming, etc. Overall, QuickSet provided an 88.5% successful understanding rate.",
379
- "cite_spans": [],
380
- "ref_spans": [],
381
- "eq_spans": [],
382
- "section": "Procedure",
383
- "sec_num": "2"
384
- },
385
- {
386
- "text": "By saying \"multiple Xs,\" the QuickSet user could enter a \"mode\" in which he was creating an entity of type X (e.g., a mechanized company). To support this process, the system stored a discourse referent that was then unified with subsequent input. The user needed only to say \"here\" and touch the screen in order to create another entity of that type at that location? In these cases, the time taken to enter the mode was amortized over the entities created. Likewise, the time taken to open the unit browser to show the desired unit was amortized over the units of that type created before the browser was again scrolled.",
387
- "cite_spans": [],
388
- "ref_spans": [],
389
- "eq_spans": [],
390
- "section": "Procedure",
391
- "sec_num": "2"
392
- },
393
- {
394
- "text": "Analyses revealed that multimodal interaction resulted in a 3.7-fold speed increase in creating units compared to the GUI, paired t-test, t (3) = 5.791, p < 0.005, one-tailed. In addition, it provided a 3.3-fold increase in creating control measures paired t-test t (3) = 8.298, p < 0.002, one-tailed (see Table I ). 6 Much of this speed differential can be traced to the need to browse the echelons of the US military, scrolling long lists of units with the GUI (e.g., 126 units are in the list of US Army companies), followed by a separate dragging operation to position the selected unit. In contrast, QuickSet users specified the type of entity directly, and supplied its location in parallel. Likewise, the speed differential for the control measures may be attributed to the user's ability to both-draw and speak in parallel, where the GUI required separate actions for going into and out of drawing mode, for selecting the type of control measure, and for selecting appropriate points on the map.",
395
- "cite_spans": [],
396
- "ref_spans": [
397
- {
398
- "start": 306,
399
- "end": 313,
400
- "text": "Table I",
401
- "ref_id": "TABREF0"
402
- }
403
- ],
404
- "eq_spans": [],
405
- "section": "Results",
406
- "sec_num": "3"
407
- },
408
- {
409
- "text": "Although there were fewer errors on average when using the direct manipulation GUI, they were not significantly fewer than when interacting multimodally. In contrast, the time needed to repair an error was significantly lower when interacting multimodally than with the GUI, paired t-test, t (3) = 4.703, p<0.009, onetailed. On balance, the same users completing the same tasks spent 26% more total time correcting errors with the GUI than with the multimodal interface.",
410
- "cite_spans": [],
411
- "ref_spans": [],
412
- "eq_spans": [],
413
- "section": "Results",
414
- "sec_num": "3"
415
- },
416
- {
417
- "text": "s In general, the user could at that point say anything that would unify with the type of entity being created, such as \"facing two two five degrees in defensive posture.\" This would add additional data to the type of entity being created. Similar data could be added via the GUI, but it required interacting with a dialogue box that was only created after the unit's constituents were loaded (a time-consuming operation). Since QuickSet users could supply the data before the constituents were loaded, it was deemed more fair to ignore this QuickSet capability even though it speeds up multimodal interaction considerably, and employs more extensive natural language processing.",
418
- "cite_spans": [],
419
- "ref_spans": [],
420
- "eq_spans": [],
421
- "section": "Results",
422
- "sec_num": "3"
423
- },
424
- {
425
- "text": "It should be pointed out that the paired t-test takes into consideration the number of subjects. Thus, these findings at these significance levels are particularly strong. A second set of nonparametric tests (Wilcox on signed ranks) were also performed, indicating that multimodal interaction was significantly faster (p < 0.034, one-tailed), in creating units and control measures, and also in correcting errors. ",
426
- "cite_spans": [],
427
- "ref_spans": [],
428
- "eq_spans": [],
429
- "section": "Results",
430
- "sec_num": "3"
431
- },
432
- {
433
- "text": "Mean time m seconds required to create various types of entities and to repair errors when interacting multimodally versus with the Exlnit GUI The expert users were interviewed after the study regarding which interface they preferred and why. Multimodal interaction was strongly preferred by all users. Reasons cited included its efficiency and its support of precise drawing of linear and area features.",
434
- "cite_spans": [],
435
- "ref_spans": [],
436
- "eq_spans": [],
437
- "section": "i' 6 I 9i6",
438
- "sec_num": null
439
- },
440
- {
441
- "text": "This study indicates that when the user knows what s/he wants, there can be substantial efficiency advantages of multimodal interaction over direct manipulation GUIs for a map-based taste. Despite having only four subjects, the results exhibited extremely strong statistical significance. These results stand in contrast to prior research [6, 9, 10, 18] in which speed advantages of spoken input were washed out by the cost of correcting recognition errors.",
442
- "cite_spans": [
443
- {
444
- "start": 339,
445
- "end": 342,
446
- "text": "[6,",
447
- "ref_id": "BIBREF5"
448
- },
449
- {
450
- "start": 343,
451
- "end": 345,
452
- "text": "9,",
453
- "ref_id": "BIBREF8"
454
- },
455
- {
456
- "start": 346,
457
- "end": 349,
458
- "text": "10,",
459
- "ref_id": "BIBREF9"
460
- },
461
- {
462
- "start": 350,
463
- "end": 353,
464
- "text": "18]",
465
- "ref_id": "BIBREF17"
466
- }
467
- ],
468
- "ref_spans": [],
469
- "eq_spans": [],
470
- "section": "Conclusions",
471
- "sec_num": null
472
- },
473
- {
474
- "text": "In the present study, not only was multimodal interaction substantially faster than GUI-based interaction, even including error correction times, error correction itself was four times more costly with a GUI than with multimodal interaction. These findings do not support those of Karat et al. [9] who found that for correcting errors in a dictation task, keyboard-mouse input led to a 2.3-fold speed increase over speech. Both sets of findings might be reconciled by noting that advantages of any type of user interface, especially spoken and multimodal interaction, may be task dependent.",
475
- "cite_spans": [
476
- {
477
- "start": 294,
478
- "end": 297,
479
- "text": "[9]",
480
- "ref_id": "BIBREF8"
481
- }
482
- ],
483
- "ref_spans": [],
484
- "eq_spans": [],
485
- "section": "Conclusions",
486
- "sec_num": null
487
- },
488
- {
489
- "text": "We attribute the findings here to the ability of multimodal interfaces to support parallel specification of complementary parts of a communicative act, as well as direct rather than hierarchical or scrolled access to types of entities.",
490
- "cite_spans": [],
491
- "ref_spans": [],
492
- "eq_spans": [],
493
- "section": "Conclusions",
494
- "sec_num": null
495
- },
496
- {
497
- "text": "Moreover, because the user can employ each mode for its strengths s/he can offload different aspects of the communication to different human cognitive systems, leading to greater efficiency [21] and fewer user errors [131.",
498
- "cite_spans": [
499
- {
500
- "start": 190,
501
- "end": 194,
502
- "text": "[21]",
503
- "ref_id": "BIBREF20"
504
- }
505
- ],
506
- "ref_spans": [],
507
- "eq_spans": [],
508
- "section": "Conclusions",
509
- "sec_num": null
510
- },
511
- {
512
- "text": "It might be claimed that these results apply only to this GUI, and that a different GUI might offer superior performance. First, it is worth noting that the same pattern of results were found for the two GUI elements (drop-down list and hierarchical browser). Thus, the results cannot simply be attributed to the misuse of a hierarchical tool. Second, we point out that this GUI was developed as a product, and that many military systems use very similar user interface tools for the same purposes (selecting units)/ Thus, these results may have substantial practical impact for users performing this task. More generally, one study cannot establish results for all possible user interfaces. There will certainly be occasions in which a menubased GUI will be superior to a multimodal interface -e.g., when the user does not in fact know what s/he wants and needs to browse. Other GUI interface tools, such as a search field with command completion, can be envisioned that would provide direct access. However, it is arguable that such an interface element belongs squarely to graphical user interfaces, but draws more on features of language. Also, it would require the user to type, even in circumstances (such as mobile usage) where typing would be infeasible. Given our philosophy of using each modality for its strengths, we believe multimodal and graphical user interfaces should be integrated, rather than cast as opposites. Finally, we would expect that these advantages of multimodal interaction may generalize to other tasks and other user interfaces in which selection among many possible options is required.",
513
- "cite_spans": [],
514
- "ref_spans": [],
515
- "eq_spans": [],
516
- "section": "Conclusions",
517
- "sec_num": null
518
- },
519
- {
520
- "text": "Obviously, a small experiment only illuminates a small space. But it should be clear that when current technologies are blended into a synergistic multimodal interface the result may provide substantial improvements on some types of tasks heretofore performed with graphical user interface technologies.",
521
- "cite_spans": [],
522
- "ref_spans": [],
523
- "eq_spans": [],
524
- "section": "Conclusions",
525
- "sec_num": null
526
- },
527
- {
528
- "text": "A high-performance spoken language system was also developed for a similar task[ 12] but to our knowledge it was not formally evaluated against the relevant GUI. 3 A case study of one user was reported in[3]. This paper reports a fuller study, with different users, statistical analyses, and an expanded set of dependent measures (including error correction).",
529
- "cite_spans": [],
530
- "ref_spans": [],
531
- "eq_spans": [],
532
- "section": "",
533
- "sec_num": null
534
- },
535
- {
536
- "text": "There were 45 entries, viewable in a window of size 9. The entries consisted of linear features (boundaries, obstacles, etc.), then areas.",
537
- "cite_spans": [],
538
- "ref_spans": [],
539
- "eq_spans": [],
540
- "section": "",
541
- "sec_num": null
542
- },
543
- {
544
- "text": "In fact, a recent experiment by the US Marines had mobile combatants using small portable computers with a similar direct manipulation interface as they participated in field exercises. The user interface was generally regarded as the weakest aspect of the experiment.",
545
- "cite_spans": [],
546
- "ref_spans": [],
547
- "eq_spans": [],
548
- "section": "",
549
- "sec_num": null
550
- }
551
- ],
552
- "back_matter": [
553
- {
554
- "text": "We conjecture that the more we can take advantage of the strengths of spoken language technology, the larger this advantage will become. Future research should be searching for more such tasks, and developing more general toolkits that support rapid adaptation of multimodal technologies to support them.",
555
- "cite_spans": [],
556
- "ref_spans": [],
557
- "eq_spans": [],
558
- "section": "acknowledgement",
559
- "sec_num": null
560
- },
561
- {
562
- "text": "This work was supported in part by the Information Technology and Information Systems offices of DARPA under multiple contract numbers DABT63-95-C-007 and N66001-99-D-8503, and in part by ONR grant N00014-95-1-1164.Many thanks to Jay Pittman for the ExInit integration, Michael Johnston for ExInit vocabulary and grammar development, Liang Chen for graphics and military symbology, Sharon Oviatt for advice in experimental analysis, and to our test subjects.",
563
- "cite_spans": [],
564
- "ref_spans": [],
565
- "eq_spans": [],
566
- "section": "Acknowledgements",
567
- "sec_num": null
568
- }
569
- ],
570
- "bib_entries": {
571
- "BIBREF0": {
572
- "ref_id": "b0",
573
- "title": "Studies in interactive communication: I. The effects of four communication modes on the behavior of teams during cooperative problem solving",
574
- "authors": [
575
- {
576
- "first": "A",
577
- "middle": [],
578
- "last": "Chapanis",
579
- "suffix": ""
580
- },
581
- {
582
- "first": "R",
583
- "middle": [
584
- "B"
585
- ],
586
- "last": "Ochsman",
587
- "suffix": ""
588
- },
589
- {
590
- "first": "R",
591
- "middle": [
592
- "N"
593
- ],
594
- "last": "Parrish",
595
- "suffix": ""
596
- },
597
- {
598
- "first": "G",
599
- "middle": [
600
- "D"
601
- ],
602
- "last": "Weeks",
603
- "suffix": ""
604
- }
605
- ],
606
- "year": 1972,
607
- "venue": "Human Factors",
608
- "volume": "14",
609
- "issue": "",
610
- "pages": "487--509",
611
- "other_ids": {},
612
- "num": null,
613
- "urls": [],
614
- "raw_text": "Chapanis, A., Ochsman, R.B., Parrish, R.N., Weeks, G. D., Studies in interactive communication: I. The effects of four communication modes on the behavior of teams during cooperative problem solving. Human Factors, 1972. 14: pp. 487-509.",
615
- "links": null
616
- },
617
- "BIBREF1": {
618
- "ref_id": "b1",
619
- "title": "Synergistic use of natural language and direct manipulation",
620
- "authors": [
621
- {
622
- "first": "P",
623
- "middle": [
624
- "R"
625
- ],
626
- "last": "Cohen",
627
- "suffix": ""
628
- },
629
- {
630
- "first": "M",
631
- "middle": [],
632
- "last": "Dalrymple",
633
- "suffix": ""
634
- },
635
- {
636
- "first": "D",
637
- "middle": [
638
- "B"
639
- ],
640
- "last": "Moran",
641
- "suffix": ""
642
- },
643
- {
644
- "first": "F",
645
- "middle": [],
646
- "last": "Pereira",
647
- "suffix": ""
648
- },
649
- {
650
- "first": "J",
651
- "middle": [],
652
- "last": "Sullivan",
653
- "suffix": ""
654
- },
655
- {
656
- "first": "R",
657
- "middle": [],
658
- "last": "Gargan",
659
- "suffix": ""
660
- },
661
- {
662
- "first": "J",
663
- "middle": [],
664
- "last": "Schlossberg",
665
- "suffix": ""
666
- },
667
- {
668
- "first": "S",
669
- "middle": [],
670
- "last": "Tyler",
671
- "suffix": ""
672
- }
673
- ],
674
- "year": 1989,
675
- "venue": "Proc. of the Human-Factors in Computing Systems Con-ference (CHI'89)",
676
- "volume": "",
677
- "issue": "",
678
- "pages": "227--234",
679
- "other_ids": {},
680
- "num": null,
681
- "urls": [],
682
- "raw_text": "Cohen, P.R., Dalrymple, M., Moran, D.B., Pereira, F,, Sullivan, J., Gargan, R., Schlossberg, J., and Tyler, S., Synergistic use of natural language and direct manipulation, in Proc. of the Human- Factors in Computing Systems Con-ference (CHI'89). 1989, ACM Press: New York, pp. 227-234.",
683
- "links": null
684
- },
685
- "BIBREF2": {
686
- "ref_id": "b2",
687
- "title": "The efficiency of multimodal interaction: A case study",
688
- "authors": [
689
- {
690
- "first": "P",
691
- "middle": [
692
- "R"
693
- ],
694
- "last": "Cohen",
695
- "suffix": ""
696
- },
697
- {
698
- "first": "M",
699
- "middle": [],
700
- "last": "Johnston",
701
- "suffix": ""
702
- },
703
- {
704
- "first": "D",
705
- "middle": [],
706
- "last": "Mcgee",
707
- "suffix": ""
708
- },
709
- {
710
- "first": "S",
711
- "middle": [],
712
- "last": "Oviatt",
713
- "suffix": ""
714
- },
715
- {
716
- "first": "J",
717
- "middle": [],
718
- "last": "Clow",
719
- "suffix": ""
720
- },
721
- {
722
- "first": "I",
723
- "middle": [],
724
- "last": "Smith",
725
- "suffix": ""
726
- }
727
- ],
728
- "year": 1998,
729
- "venue": "the Proceedings of the 5th International Conference on Spoken Language Processing",
730
- "volume": "2",
731
- "issue": "",
732
- "pages": "249--252",
733
- "other_ids": {},
734
- "num": null,
735
- "urls": [],
736
- "raw_text": "Cohen, P.R., Johnston, M., McGee, D., Oviatt, S., Clow, J., and Smith, I., The efficiency of multimodal interaction: A case study, in the Proceedings of the 5th International Conference on Spoken Language Processing, Sydney, Australia, 1998, 2: pp. 249-252.",
737
- "links": null
738
- },
739
- "BIBREF3": {
740
- "ref_id": "b3",
741
- "title": "QuickSet: Multimodal interaction for distributed applications",
742
- "authors": [
743
- {
744
- "first": "P",
745
- "middle": [
746
- "R"
747
- ],
748
- "last": "Cohen",
749
- "suffix": ""
750
- },
751
- {
752
- "first": "M",
753
- "middle": [],
754
- "last": "Johnston",
755
- "suffix": ""
756
- },
757
- {
758
- "first": "D",
759
- "middle": [],
760
- "last": "Mcgee",
761
- "suffix": ""
762
- },
763
- {
764
- "first": "S",
765
- "middle": [],
766
- "last": "Oviatt",
767
- "suffix": ""
768
- },
769
- {
770
- "first": "J",
771
- "middle": [],
772
- "last": "Pittman",
773
- "suffix": ""
774
- },
775
- {
776
- "first": "I",
777
- "middle": [],
778
- "last": "Smith",
779
- "suffix": ""
780
- },
781
- {
782
- "first": "L",
783
- "middle": [],
784
- "last": "Chen",
785
- "suffix": ""
786
- },
787
- {
788
- "first": "J",
789
- "middle": [],
790
- "last": "Clow",
791
- "suffix": ""
792
- }
793
- ],
794
- "year": 1997,
795
- "venue": "Proc. of the Fifth A CM International Multmedia Conference",
796
- "volume": "",
797
- "issue": "",
798
- "pages": "31--40",
799
- "other_ids": {},
800
- "num": null,
801
- "urls": [],
802
- "raw_text": "Cohen, P.R., Johnston, M., McGee, D., Oviatt, S., Pittman, J., Smith, I., Chen, L., Clow, J., QuickSet: Multimodal interaction for distributed applications, in Proc. of the Fifth A CM International Multmedia Conference, E. Glinert, Editor. 1997, ACM Press: New York. pp. 31-40.",
803
- "links": null
804
- },
805
- "BIBREF4": {
806
- "ref_id": "b4",
807
- "title": "Multimodal Interaction for 2D and 3D Environments",
808
- "authors": [
809
- {
810
- "first": "P",
811
- "middle": [
812
- "R"
813
- ],
814
- "last": "Cohen",
815
- "suffix": ""
816
- },
817
- {
818
- "first": "D",
819
- "middle": [],
820
- "last": "Mcgee",
821
- "suffix": ""
822
- },
823
- {
824
- "first": "S",
825
- "middle": [],
826
- "last": "Oviatt",
827
- "suffix": ""
828
- },
829
- {
830
- "first": "L",
831
- "middle": [],
832
- "last": "Wu",
833
- "suffix": ""
834
- },
835
- {
836
- "first": "J",
837
- "middle": [],
838
- "last": "Clow",
839
- "suffix": ""
840
- },
841
- {
842
- "first": "R",
843
- "middle": [],
844
- "last": "King",
845
- "suffix": ""
846
- },
847
- {
848
- "first": "S",
849
- "middle": [],
850
- "last": "Julier",
851
- "suffix": ""
852
- },
853
- {
854
- "first": "L",
855
- "middle": [],
856
- "last": "Rosenblum",
857
- "suffix": ""
858
- }
859
- ],
860
- "year": 1999,
861
- "venue": "IEEE Computer Graphics and Applications",
862
- "volume": "19",
863
- "issue": "4",
864
- "pages": "10--13",
865
- "other_ids": {},
866
- "num": null,
867
- "urls": [],
868
- "raw_text": "Cohen, P.R., McGee, D., Oviatt, S., Wu, L., Clow, J., King, R., Julier, S., Rosenblum, L., Multimodal Interaction for 2D and 3D Environments. IEEE Computer Graphics and Applications, 1999. 19(4): pp. 10-13.",
869
- "links": null
870
- },
871
- "BIBREF5": {
872
- "ref_id": "b5",
873
- "title": "The Role of Voice Input for Human-Machine Communication",
874
- "authors": [
875
- {
876
- "first": "P",
877
- "middle": [
878
- "R"
879
- ],
880
- "last": "Cohen",
881
- "suffix": ""
882
- },
883
- {
884
- "first": "S",
885
- "middle": [
886
- "L"
887
- ],
888
- "last": "Oviatt",
889
- "suffix": ""
890
- }
891
- ],
892
- "year": 1995,
893
- "venue": "Proc. of the National Academy of Sciences",
894
- "volume": "92",
895
- "issue": "",
896
- "pages": "9921--9927",
897
- "other_ids": {},
898
- "num": null,
899
- "urls": [],
900
- "raw_text": "Cohen, P.R. and Oviatt, S.L., The Role of Voice Input for Human-Machine Communication. Proc. of the National Academy of Sciences, 1995. 92: pp. 9921- 9927.",
901
- "links": null
902
- },
903
- "BIBREF6": {
904
- "ref_id": "b6",
905
- "title": "ModSAF Development Status",
906
- "authors": [
907
- {
908
- "first": "A",
909
- "middle": [
910
- "J"
911
- ],
912
- "last": "Courtemanche",
913
- "suffix": ""
914
- },
915
- {
916
- "first": "A",
917
- "middle": [],
918
- "last": "Ceranowicz",
919
- "suffix": ""
920
- }
921
- ],
922
- "year": 1995,
923
- "venue": "the Proc. of the Fifth Con-ference on Computer Generated Forces and Behavioral Representation",
924
- "volume": "",
925
- "issue": "",
926
- "pages": "3--13",
927
- "other_ids": {},
928
- "num": null,
929
- "urls": [],
930
- "raw_text": "Courtemanche, A.J., Ceranowicz, A., ModSAF Development Status., in the Proc. of the Fifth Con-ference on Computer Generated Forces and Behavioral Rep- resentation, Orlando, 1995, Univ. of Central Florida, pp. 3-13.",
931
- "links": null
932
- },
933
- "BIBREF7": {
934
- "ref_id": "b7",
935
- "title": "Unification-based multimodal integration",
936
- "authors": [
937
- {
938
- "first": "Johnston",
939
- "middle": [],
940
- "last": "",
941
- "suffix": ""
942
- },
943
- {
944
- "first": "M",
945
- "middle": [],
946
- "last": "Cohen",
947
- "suffix": ""
948
- },
949
- {
950
- "first": "P",
951
- "middle": [
952
- "R"
953
- ],
954
- "last": "Mcgee",
955
- "suffix": ""
956
- },
957
- {
958
- "first": "D",
959
- "middle": [],
960
- "last": "Oviatt",
961
- "suffix": ""
962
- },
963
- {
964
- "first": "S",
965
- "middle": [
966
- "L"
967
- ],
968
- "last": "Pittman",
969
- "suffix": ""
970
- },
971
- {
972
- "first": "J",
973
- "middle": [
974
- "A"
975
- ],
976
- "last": "Smith",
977
- "suffix": ""
978
- },
979
- {
980
- "first": "I",
981
- "middle": [],
982
- "last": "",
983
- "suffix": ""
984
- }
985
- ],
986
- "year": 1997,
987
- "venue": "the Proc. of the 35th Annual Meeting of the Association for Computational Linguistics (ACL) and 8th Conference of the European Chapter",
988
- "volume": "",
989
- "issue": "",
990
- "pages": "281--288",
991
- "other_ids": {},
992
- "num": null,
993
- "urls": [],
994
- "raw_text": "8, Johnston, M., Cohen, P. R., McGee, D., Oviatt, S. L., Pittman, J. A., Smith., I. Unification-based multimodal integration., in the Proc. of the 35th Annual Meeting of the Association for Computational Linguistics (ACL) and 8th Conference of the European Chapter of the ACL, 1997, pp. 281-288.",
995
- "links": null
996
- },
997
- "BIBREF8": {
998
- "ref_id": "b8",
999
- "title": "Patterns of entry and correction in large vocabulary continuous speech recognition systems",
1000
- "authors": [
1001
- {
1002
- "first": "C",
1003
- "middle": [],
1004
- "last": "Karat",
1005
- "suffix": ""
1006
- },
1007
- {
1008
- "first": "C",
1009
- "middle": [],
1010
- "last": "Halverson",
1011
- "suffix": ""
1012
- },
1013
- {
1014
- "first": "D",
1015
- "middle": [],
1016
- "last": "Horn",
1017
- "suffix": ""
1018
- },
1019
- {
1020
- "first": "J",
1021
- "middle": [],
1022
- "last": "Karat",
1023
- "suffix": ""
1024
- }
1025
- ],
1026
- "year": 1999,
1027
- "venue": "the Proc. of Human Factors in Com-puting Systems",
1028
- "volume": "",
1029
- "issue": "",
1030
- "pages": "568--575",
1031
- "other_ids": {},
1032
- "num": null,
1033
- "urls": [],
1034
- "raw_text": "Karat, C., Halverson, C., Horn, D., and Karat, J., Patterns of entry and correction in large vocabulary continuous speech recognition systems, in the Proc. of Human Factors in Com-puting Systems, New York, 1999, ACM Press, pp. 568-575.",
1035
- "links": null
1036
- },
1037
- "BIBREF9": {
1038
- "ref_id": "b9",
1039
- "title": "The utility of speech input in user-computer interfaces",
1040
- "authors": [
1041
- {
1042
- "first": "G",
1043
- "middle": [
1044
- "L"
1045
- ],
1046
- "last": "Martin",
1047
- "suffix": ""
1048
- }
1049
- ],
1050
- "year": 1989,
1051
- "venue": "International Journal of Man-machine Studies",
1052
- "volume": "30",
1053
- "issue": "4",
1054
- "pages": "355--375",
1055
- "other_ids": {},
1056
- "num": null,
1057
- "urls": [],
1058
- "raw_text": "Martin, G.L., The utility of speech input in user-computer interfaces. International Journal of Man-machine Studies, 1989. 30(4): pp. 355-375.",
1059
- "links": null
1060
- },
1061
- "BIBREF10": {
1062
- "ref_id": "b10",
1063
- "title": "Confirmation in Multimodal Systems",
1064
- "authors": [
1065
- {
1066
- "first": "D",
1067
- "middle": [],
1068
- "last": "Mcgee",
1069
- "suffix": ""
1070
- },
1071
- {
1072
- "first": "P",
1073
- "middle": [
1074
- "R"
1075
- ],
1076
- "last": "Cohen",
1077
- "suffix": ""
1078
- },
1079
- {
1080
- "first": "S",
1081
- "middle": [
1082
- "L"
1083
- ],
1084
- "last": "Oviatt",
1085
- "suffix": ""
1086
- }
1087
- ],
1088
- "year": 1998,
1089
- "venue": "Proc. of the 17th International Conference on Computational Linguistics (COLING 98) and 36th Annual Meeting of the Association for Computational Linguistics (ACL98)",
1090
- "volume": "",
1091
- "issue": "",
1092
- "pages": "823--829",
1093
- "other_ids": {},
1094
- "num": null,
1095
- "urls": [],
1096
- "raw_text": "McGee, D., Cohen, P.R., and Oviatt, S.L., Confirmation in Multimodal Systems, in Proc. of the 17th International Conference on Computational Linguistics (COLING 98) and 36th Annual Meeting of the Association for Computational Linguistics (ACL98). 1998: Montreal, Canada. pp. 823- 829.",
1097
- "links": null
1098
- },
1099
- "BIBREF11": {
1100
- "ref_id": "b11",
1101
- "title": "CommandTalk: A Spoken-Language Interface for Battlefield Simulations",
1102
- "authors": [
1103
- {
1104
- "first": "R",
1105
- "middle": [],
1106
- "last": "Moore",
1107
- "suffix": ""
1108
- },
1109
- {
1110
- "first": "J",
1111
- "middle": [],
1112
- "last": "Dowding",
1113
- "suffix": ""
1114
- },
1115
- {
1116
- "first": "H",
1117
- "middle": [],
1118
- "last": "Bratt",
1119
- "suffix": ""
1120
- },
1121
- {
1122
- "first": "J",
1123
- "middle": [],
1124
- "last": "Gawron",
1125
- "suffix": ""
1126
- },
1127
- {
1128
- "first": "Y",
1129
- "middle": [],
1130
- "last": "Gorfu",
1131
- "suffix": ""
1132
- },
1133
- {
1134
- "first": "A",
1135
- "middle": [],
1136
- "last": "Cheyer",
1137
- "suffix": ""
1138
- }
1139
- ],
1140
- "year": 1997,
1141
- "venue": "Proc. of the 5th Conference on Applied Natural Language Processing",
1142
- "volume": "",
1143
- "issue": "",
1144
- "pages": "1--7",
1145
- "other_ids": {},
1146
- "num": null,
1147
- "urls": [],
1148
- "raw_text": "Moore, R., Dowding, J., Bratt, H., Gawron, J., Gorfu, Y., Cheyer, A., CommandTalk: A Spoken-Language Interface for Battlefield Simulations, Proc. of the 5th Conference on Applied Natural Language Processing, Association for Computational Linguistics, 1997: Washington, DC. pp. 1-7.",
1149
- "links": null
1150
- },
1151
- "BIBREF12": {
1152
- "ref_id": "b12",
1153
- "title": "Pen/Voice: Complementary multimodal communication",
1154
- "authors": [
1155
- {
1156
- "first": "S",
1157
- "middle": [
1158
- "L"
1159
- ],
1160
- "last": "Oviatt",
1161
- "suffix": ""
1162
- }
1163
- ],
1164
- "year": null,
1165
- "venue": "Proc. of Speech Tech'92",
1166
- "volume": "",
1167
- "issue": "",
1168
- "pages": "238--241",
1169
- "other_ids": {},
1170
- "num": null,
1171
- "urls": [],
1172
- "raw_text": "Oviatt, S. L., Pen/Voice: Complementary multimodal communication, Proc. of Speech Tech'92, New York, 238-241",
1173
- "links": null
1174
- },
1175
- "BIBREF13": {
1176
- "ref_id": "b13",
1177
- "title": "Multimodal interactive maps: Designing for human performance",
1178
- "authors": [
1179
- {
1180
- "first": "S",
1181
- "middle": [
1182
- "L"
1183
- ],
1184
- "last": "Oviatt",
1185
- "suffix": ""
1186
- }
1187
- ],
1188
- "year": 1997,
1189
- "venue": "",
1190
- "volume": "12",
1191
- "issue": "",
1192
- "pages": "93--129",
1193
- "other_ids": {},
1194
- "num": null,
1195
- "urls": [],
1196
- "raw_text": "Oviatt, S.L., Multimodal interactive maps: Designing for human performance. Human Computer Interaction, 1997. 12: pp. 93- 129.",
1197
- "links": null
1198
- },
1199
- "BIBREF14": {
1200
- "ref_id": "b14",
1201
- "title": "Mutual disambiguation of recognition errors in a multimodal architecture",
1202
- "authors": [
1203
- {
1204
- "first": "S",
1205
- "middle": [
1206
- "L"
1207
- ],
1208
- "last": "Oviatt",
1209
- "suffix": ""
1210
- }
1211
- ],
1212
- "year": 1999,
1213
- "venue": "the Proc. of the Conference on Human Factors in Computing System",
1214
- "volume": "",
1215
- "issue": "",
1216
- "pages": "576--583",
1217
- "other_ids": {},
1218
- "num": null,
1219
- "urls": [],
1220
- "raw_text": "Oviatt, S.L., Mutual disambiguation of recognition errors in a multimodal architecture, in the Proc. of the Conference on Human Factors in Computing System, New York, 1999, ACM Press, pp. 576-583.",
1221
- "links": null
1222
- },
1223
- "BIBREF15": {
1224
- "ref_id": "b15",
1225
- "title": "Designing the user interface for multimodal speech and gesture applications: State-of-the-art systems and research directions for 2000 and beyond",
1226
- "authors": [
1227
- {
1228
- "first": "S",
1229
- "middle": [
1230
- "L"
1231
- ],
1232
- "last": "Oviatt",
1233
- "suffix": ""
1234
- },
1235
- {
1236
- "first": "P",
1237
- "middle": [
1238
- "R"
1239
- ],
1240
- "last": "Cohen",
1241
- "suffix": ""
1242
- },
1243
- {
1244
- "first": "L",
1245
- "middle": [],
1246
- "last": "Wu",
1247
- "suffix": ""
1248
- },
1249
- {
1250
- "first": "J",
1251
- "middle": [],
1252
- "last": "Vergo",
1253
- "suffix": ""
1254
- },
1255
- {
1256
- "first": "L",
1257
- "middle": [],
1258
- "last": "Duncan",
1259
- "suffix": ""
1260
- },
1261
- {
1262
- "first": "B",
1263
- "middle": [],
1264
- "last": "Suhm",
1265
- "suffix": ""
1266
- },
1267
- {
1268
- "first": "J",
1269
- "middle": [],
1270
- "last": "Bers",
1271
- "suffix": ""
1272
- },
1273
- {
1274
- "first": "T",
1275
- "middle": [],
1276
- "last": "Holzman",
1277
- "suffix": ""
1278
- },
1279
- {
1280
- "first": "T",
1281
- "middle": [],
1282
- "last": "Winograd",
1283
- "suffix": ""
1284
- },
1285
- {
1286
- "first": "J",
1287
- "middle": [],
1288
- "last": "Landay",
1289
- "suffix": ""
1290
- },
1291
- {
1292
- "first": "J",
1293
- "middle": [],
1294
- "last": "Larson",
1295
- "suffix": ""
1296
- },
1297
- {
1298
- "first": "D",
1299
- "middle": [],
1300
- "last": "Ferro",
1301
- "suffix": ""
1302
- }
1303
- ],
1304
- "year": null,
1305
- "venue": "",
1306
- "volume": "",
1307
- "issue": "",
1308
- "pages": "",
1309
- "other_ids": {},
1310
- "num": null,
1311
- "urls": [],
1312
- "raw_text": "Oviatt, S.L., Cohen, P. R., Wu, L., Vergo, J., Duncan, L., Suhm, B., Bers, J., Holzman, T., Winograd, T., Landay, J., Larson, J., Ferro, D., Designing the user interface for multimodal speech and gesture applications: State-of-the-art systems and research directions for 2000 and beyond. In submission.",
1313
- "links": null
1314
- },
1315
- "BIBREF16": {
1316
- "ref_id": "b16",
1317
- "title": "A study comparing mouse-only vs. mouse-plusvoice input for a graphical editor",
1318
- "authors": [
1319
- {
1320
- "first": "R",
1321
- "middle": [],
1322
- "last": "Pausch",
1323
- "suffix": ""
1324
- },
1325
- {
1326
- "first": "J",
1327
- "middle": [
1328
- "H"
1329
- ],
1330
- "last": "Leatherby",
1331
- "suffix": ""
1332
- }
1333
- ],
1334
- "year": 1991,
1335
- "venue": "Journal of the American Voice Input~Output Society",
1336
- "volume": "9",
1337
- "issue": "2",
1338
- "pages": "55--66",
1339
- "other_ids": {},
1340
- "num": null,
1341
- "urls": [],
1342
- "raw_text": "Pausch, R. and Leatherby, J. H., A study comparing mouse-only vs. mouse-plus- voice input for a graphical editor, Journal of the American Voice Input~Output Society, 9:2, July, 1991, pp 55-66",
1343
- "links": null
1344
- },
1345
- "BIBREF17": {
1346
- "ref_id": "b17",
1347
- "title": "Mode Preference in a simple data-retrieval task",
1348
- "authors": [
1349
- {
1350
- "first": "A",
1351
- "middle": [
1352
- "I"
1353
- ],
1354
- "last": "Rudnicky",
1355
- "suffix": ""
1356
- }
1357
- ],
1358
- "year": 1993,
1359
- "venue": "ARPA Human Language Technology Workshop",
1360
- "volume": "",
1361
- "issue": "",
1362
- "pages": "",
1363
- "other_ids": {},
1364
- "num": null,
1365
- "urls": [],
1366
- "raw_text": "Rudnicky, A.I., Mode Preference in a simple data-retrieval task, in ARPA Human Language Technology Workshop. March 1993: Princeton, New Jersey.",
1367
- "links": null
1368
- },
1369
- "BIBREF18": {
1370
- "ref_id": "b18",
1371
- "title": "Model-based and empirical evaluation of multimodal interactive error correction",
1372
- "authors": [
1373
- {
1374
- "first": "B",
1375
- "middle": [],
1376
- "last": "Suhm",
1377
- "suffix": ""
1378
- },
1379
- {
1380
- "first": "B",
1381
- "middle": [],
1382
- "last": "Myers",
1383
- "suffix": ""
1384
- },
1385
- {
1386
- "first": "A",
1387
- "middle": [],
1388
- "last": "Waibel",
1389
- "suffix": ""
1390
- }
1391
- ],
1392
- "year": 1999,
1393
- "venue": "the Proc. of the Conf. on Human Factors in Computing Systems",
1394
- "volume": "",
1395
- "issue": "",
1396
- "pages": "584--591",
1397
- "other_ids": {},
1398
- "num": null,
1399
- "urls": [],
1400
- "raw_text": "Suhm, B., Myers, B., and Waibel, A., Model-based and empirical evaluation of multimodal interactive error correction, in the Proc. of the Conf. on Human Factors in Computing Systems, New York, 1999, ACM Press, 584-591.",
1401
- "links": null
1402
- },
1403
- "BIBREF19": {
1404
- "ref_id": "b19",
1405
- "title": "Natural vs. precise concise languages for human operation of computers: Research issues and experimental approaches",
1406
- "authors": [
1407
- {
1408
- "first": "B",
1409
- "middle": [],
1410
- "last": "Shneiderman",
1411
- "suffix": ""
1412
- }
1413
- ],
1414
- "year": 1980,
1415
- "venue": "Proceedings of the 18 u' Annual Meeting of the Association for Computational Linguistics, and Parasession on Topics in Interactive Discourse",
1416
- "volume": "",
1417
- "issue": "",
1418
- "pages": "139--141",
1419
- "other_ids": {},
1420
- "num": null,
1421
- "urls": [],
1422
- "raw_text": "Shneiderman, B., Natural vs. precise concise languages for human operation of computers: Research issues and experimental approaches. Proceedings of the 18 u' Annual Meeting of the Association for Computational Linguistics, and Parasession on Topics in Interactive Discourse, Univ. of Pennsylvania, June, 1980, pp. 139-141.",
1423
- "links": null
1424
- },
1425
- "BIBREF20": {
1426
- "ref_id": "b20",
1427
- "title": "Compatibility and resource competition between modalities of input, central processing, and output",
1428
- "authors": [
1429
- {
1430
- "first": "C",
1431
- "middle": [],
1432
- "last": "Wickens",
1433
- "suffix": ""
1434
- },
1435
- {
1436
- "first": "D",
1437
- "middle": [],
1438
- "last": "Sandry",
1439
- "suffix": ""
1440
- },
1441
- {
1442
- "first": "M",
1443
- "middle": [],
1444
- "last": "Vidulich",
1445
- "suffix": ""
1446
- }
1447
- ],
1448
- "year": 1983,
1449
- "venue": "Human Factors",
1450
- "volume": "25",
1451
- "issue": "2",
1452
- "pages": "227--248",
1453
- "other_ids": {},
1454
- "num": null,
1455
- "urls": [],
1456
- "raw_text": "Wickens, C., Sandry, D., and Vidulich, M., Compatibility and resource competition between modalities of input, central processing, and output. Human Factors, 1983.25(2): pp. 227-248.",
1457
- "links": null
1458
- },
1459
- "BIBREF21": {
1460
- "ref_id": "b21",
1461
- "title": "Statistical multimodal integration for intelligent HCI",
1462
- "authors": [
1463
- {
1464
- "first": "L",
1465
- "middle": [],
1466
- "last": "Wu",
1467
- "suffix": ""
1468
- },
1469
- {
1470
- "first": "S",
1471
- "middle": [],
1472
- "last": "Oviatt",
1473
- "suffix": ""
1474
- },
1475
- {
1476
- "first": "L",
1477
- "middle": [],
1478
- "last": "Cohen",
1479
- "suffix": ""
1480
- },
1481
- {
1482
- "first": "P",
1483
- "middle": [
1484
- "R. ; Y H"
1485
- ],
1486
- "last": "Hu",
1487
- "suffix": ""
1488
- },
1489
- {
1490
- "first": "J",
1491
- "middle": [],
1492
- "last": "Larsen",
1493
- "suffix": ""
1494
- },
1495
- {
1496
- "first": "E",
1497
- "middle": [],
1498
- "last": "Wilson",
1499
- "suffix": ""
1500
- },
1501
- {
1502
- "first": "Douglas",
1503
- "middle": [],
1504
- "last": "",
1505
- "suffix": ""
1506
- },
1507
- {
1508
- "first": "S",
1509
- "middle": [],
1510
- "last": "",
1511
- "suffix": ""
1512
- }
1513
- ],
1514
- "year": 1999,
1515
- "venue": "Neural Networks for Signal Processing",
1516
- "volume": "",
1517
- "issue": "",
1518
- "pages": "487--496",
1519
- "other_ids": {},
1520
- "num": null,
1521
- "urls": [],
1522
- "raw_text": "Wu, L., Oviatt, S., L. and Cohen, P. R., Statistical multimodal integration for intelligent HCI, in Neural Networks for Signal Processing, Y.H. Hu, Larsen, J., Wilson, E., and Douglas, S., Editors. 1999, IEEE Press: New York. pp. 487-496.",
1523
- "links": null
1524
- }
1525
- },
1526
- "ref_entries": {
1527
- "FIGREF0": {
1528
- "type_str": "figure",
1529
- "num": null,
1530
- "uris": null,
1531
- "text": "The ExInit GUI"
1532
- },
1533
- "FIGREF1": {
1534
- "type_str": "figure",
1535
- "num": null,
1536
- "uris": null,
1537
- "text": "QuickSet"
1538
- },
1539
- "TABREF0": {
1540
- "text": "",
1541
- "html": null,
1542
- "content": "<table><tr><td colspan=\"2\">Create Units</td><td colspan=\"2\">Create Contr61 Measures</td><td>Repair Errors</td><td/></tr><tr><td>MM</td><td>GUI</td><td>MM</td><td>GUI</td><td>MM</td><td>GUI</td></tr><tr><td>8.4</td><td>25.6</td><td>6.5</td><td>27.5</td><td colspan=\"2\">12.9 49,3</td></tr><tr><td>6.0</td><td>14.4</td><td>5.2</td><td>19.0</td><td>7.7</td><td>30</td></tr><tr><td>6~3</td><td>27.2</td><td/><td/><td>II.6</td><td>56.1</td></tr><tr><td>4.0</td><td>18.5</td><td>4.0</td><td>17.7</td><td>6.3</td><td>23.0</td></tr><tr><td>:</td><td/><td/><td/><td/><td/></tr></table>",
1543
- "num": null,
1544
- "type_str": "table"
1545
- }
1546
- }
1547
- }
1548
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-2000.json DELETED
@@ -1,578 +0,0 @@
1
- {
2
- "paper_id": "A00-2000",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:30.789869Z"
6
- },
7
- "title": "NAACL 2000-PREFACE On behalf of the Program Committee for NAACL 2000, I am pleased to present you with the papers accepted for presentation at the First Meeting of the North American Chapter of the Association for Computational Linguistics, held in Seattle, Washington, April 29-May 4, 2000. NAACL received a gratifyingly large number of papers from around the world. Submissions were received from 28 countries. Reviewing was blind to all reviewers and area chairs. It was also highly selective. Out of 166 submissions, 43 were selected for presentation at NAACL 2000. Selecting the papers was not an easy task. In total, over 110 reviewers, representing 20 different countries, reported to a senior program committee consisting of eight area chairs. The senior program committee spent an intensive day at a meeting in Virginia reaching the final decisions. The area chairs and reviewers cannot be thanked enough for the conscientious and painstaking jobs they performed. All those who contributed are named on the following page, but I would particularly like to express my thanks here to the area chairs",
8
- "authors": [
9
- {
10
- "first": "Michael",
11
- "middle": [],
12
- "last": "Collins",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": ""
16
- },
17
- {
18
- "first": "Janyce",
19
- "middle": [],
20
- "last": "Wiebe",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- },
25
- {
26
- "first": "Steve",
27
- "middle": [],
28
- "last": "Abney",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- },
33
- {
34
- "first": "Francis",
35
- "middle": [],
36
- "last": "Bond",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": ""
40
- },
41
- {
42
- "first": "Eric",
43
- "middle": [],
44
- "last": "Brili",
45
- "suffix": "",
46
- "affiliation": {},
47
- "email": ""
48
- },
49
- {
50
- "first": "Ted",
51
- "middle": [],
52
- "last": "Briscoe",
53
- "suffix": "",
54
- "affiliation": {},
55
- "email": ""
56
- },
57
- {
58
- "first": "Jean",
59
- "middle": [],
60
- "last": "Carletta",
61
- "suffix": "",
62
- "affiliation": {},
63
- "email": ""
64
- },
65
- {
66
- "first": "Eugene",
67
- "middle": [],
68
- "last": "Charniak",
69
- "suffix": "",
70
- "affiliation": {},
71
- "email": ""
72
- },
73
- {
74
- "first": "Michael",
75
- "middle": [],
76
- "last": "Elhadad",
77
- "suffix": "",
78
- "affiliation": {},
79
- "email": ""
80
- },
81
- {
82
- "first": "Joshua",
83
- "middle": [],
84
- "last": "Goodman",
85
- "suffix": "",
86
- "affiliation": {},
87
- "email": ""
88
- },
89
- {
90
- "first": "Jan",
91
- "middle": [],
92
- "last": "Haji~",
93
- "suffix": "",
94
- "affiliation": {},
95
- "email": ""
96
- },
97
- {
98
- "first": "Eva",
99
- "middle": [],
100
- "last": "Haji6ova",
101
- "suffix": "",
102
- "affiliation": {},
103
- "email": ""
104
- },
105
- {
106
- "first": "Peter",
107
- "middle": [],
108
- "last": "Heeman",
109
- "suffix": "",
110
- "affiliation": {},
111
- "email": ""
112
- },
113
- {
114
- "first": "Beth-Ann",
115
- "middle": [],
116
- "last": "Hockey",
117
- "suffix": "",
118
- "affiliation": {},
119
- "email": ""
120
- },
121
- {
122
- "first": "Nasa",
123
- "middle": [
124
- "/"
125
- ],
126
- "last": "Riacs",
127
- "suffix": "",
128
- "affiliation": {},
129
- "email": ""
130
- },
131
- {
132
- "first": "Rukmini",
133
- "middle": [],
134
- "last": "Iyer",
135
- "suffix": "",
136
- "affiliation": {},
137
- "email": ""
138
- },
139
- {
140
- "first": "Bbn",
141
- "middle": [],
142
- "last": "Technologies",
143
- "suffix": "",
144
- "affiliation": {},
145
- "email": ""
146
- },
147
- {
148
- "first": "Mark",
149
- "middle": [
150
- "T"
151
- ],
152
- "last": "Johnson",
153
- "suffix": "",
154
- "affiliation": {},
155
- "email": ""
156
- },
157
- {
158
- "first": "Doug",
159
- "middle": [],
160
- "last": "Jones",
161
- "suffix": "",
162
- "affiliation": {},
163
- "email": ""
164
- },
165
- {
166
- "first": "John",
167
- "middle": [],
168
- "last": "Lafferty",
169
- "suffix": "",
170
- "affiliation": {},
171
- "email": ""
172
- },
173
- {
174
- "first": "James",
175
- "middle": [],
176
- "last": "Lester",
177
- "suffix": "",
178
- "affiliation": {},
179
- "email": ""
180
- },
181
- {
182
- "first": "Lori",
183
- "middle": [],
184
- "last": "Levin",
185
- "suffix": "",
186
- "affiliation": {},
187
- "email": ""
188
- },
189
- {
190
- "first": "Hang",
191
- "middle": [],
192
- "last": "Li",
193
- "suffix": "",
194
- "affiliation": {},
195
- "email": ""
196
- },
197
- {
198
- "first": "Nec",
199
- "middle": [],
200
- "last": "Naaclii",
201
- "suffix": "",
202
- "affiliation": {},
203
- "email": ""
204
- },
205
- {
206
- "first": "Ken",
207
- "middle": [],
208
- "last": "Litkowski",
209
- "suffix": "",
210
- "affiliation": {},
211
- "email": ""
212
- },
213
- {
214
- "first": "Adwait",
215
- "middle": [],
216
- "last": "Rathnaparkhi",
217
- "suffix": "",
218
- "affiliation": {},
219
- "email": ""
220
- },
221
- {
222
- "first": "Manny",
223
- "middle": [],
224
- "last": "Rayner",
225
- "suffix": "",
226
- "affiliation": {},
227
- "email": ""
228
- },
229
- {
230
- "first": "Jeff",
231
- "middle": [],
232
- "last": "Reynar",
233
- "suffix": "",
234
- "affiliation": {},
235
- "email": ""
236
- },
237
- {
238
- "first": "Klaus",
239
- "middle": [],
240
- "last": "Ries",
241
- "suffix": "",
242
- "affiliation": {},
243
- "email": ""
244
- },
245
- {
246
- "first": "Suzanne",
247
- "middle": [],
248
- "last": "Stevenson",
249
- "suffix": "",
250
- "affiliation": {},
251
- "email": ""
252
- },
253
- {
254
- "first": "Colin",
255
- "middle": [],
256
- "last": "Matheson",
257
- "suffix": "",
258
- "affiliation": {},
259
- "email": ""
260
- },
261
- {
262
- "first": "Massimo",
263
- "middle": [],
264
- "last": "Poesio",
265
- "suffix": "",
266
- "affiliation": {},
267
- "email": ""
268
- },
269
- {
270
- "first": "Daniel",
271
- "middle": [],
272
- "last": "Marcu",
273
- "suffix": "",
274
- "affiliation": {},
275
- "email": ""
276
- },
277
- {
278
- "first": "Lynn",
279
- "middle": [],
280
- "last": "Carlson",
281
- "suffix": "",
282
- "affiliation": {},
283
- "email": ""
284
- },
285
- {
286
- "first": "John",
287
- "middle": [
288
- "C"
289
- ],
290
- "last": "Henderson",
291
- "suffix": "",
292
- "affiliation": {},
293
- "email": ""
294
- },
295
- {
296
- "first": "Antonietta",
297
- "middle": [],
298
- "last": "Alonge",
299
- "suffix": "",
300
- "affiliation": {},
301
- "email": ""
302
- },
303
- {
304
- "first": "Francesca",
305
- "middle": [],
306
- "last": "Bertagna",
307
- "suffix": "",
308
- "affiliation": {},
309
- "email": ""
310
- },
311
- {
312
- "first": "Nicoletta",
313
- "middle": [],
314
- "last": "Calzolari",
315
- "suffix": "",
316
- "affiliation": {},
317
- "email": ""
318
- },
319
- {
320
- "first": "Adriana",
321
- "middle": [],
322
- "last": "Roventini",
323
- "suffix": "",
324
- "affiliation": {},
325
- "email": ""
326
- },
327
- {
328
- "first": "Erik",
329
- "middle": [
330
- "F Tjong"
331
- ],
332
- "last": "Kim",
333
- "suffix": "",
334
- "affiliation": {},
335
- "email": ""
336
- },
337
- {
338
- "first": "Christopher",
339
- "middle": [],
340
- "last": "Johnson",
341
- "suffix": "",
342
- "affiliation": {},
343
- "email": ""
344
- },
345
- {
346
- "first": "M",
347
- "middle": [
348
- "P"
349
- ],
350
- "last": "Harper",
351
- "suffix": "",
352
- "affiliation": {},
353
- "email": ""
354
- },
355
- {
356
- "first": "C",
357
- "middle": [
358
- "M"
359
- ],
360
- "last": "White",
361
- "suffix": "",
362
- "affiliation": {},
363
- "email": ""
364
- },
365
- {
366
- "first": "W",
367
- "middle": [],
368
- "last": "Wang",
369
- "suffix": "",
370
- "affiliation": {},
371
- "email": ""
372
- },
373
- {
374
- "first": "Takehito",
375
- "middle": [],
376
- "last": "Utsuro",
377
- "suffix": "",
378
- "affiliation": {},
379
- "email": ""
380
- },
381
- {
382
- "first": "Shigeyuki",
383
- "middle": [],
384
- "last": "Nishiokayama",
385
- "suffix": "",
386
- "affiliation": {},
387
- "email": ""
388
- },
389
- {
390
- "first": "Masakazu",
391
- "middle": [],
392
- "last": "Fujio",
393
- "suffix": "",
394
- "affiliation": {},
395
- "email": ""
396
- },
397
- {
398
- "first": "Martin",
399
- "middle": [],
400
- "last": "Chodorow",
401
- "suffix": "",
402
- "affiliation": {},
403
- "email": ""
404
- },
405
- {
406
- "first": "Stephan",
407
- "middle": [],
408
- "last": "Oepen",
409
- "suffix": "",
410
- "affiliation": {},
411
- "email": ""
412
- },
413
- {
414
- "first": "Hongyan",
415
- "middle": [],
416
- "last": "Jing",
417
- "suffix": "",
418
- "affiliation": {},
419
- "email": ""
420
- },
421
- {
422
- "first": "Kathleen",
423
- "middle": [
424
- "R"
425
- ],
426
- "last": "Mckeown",
427
- "suffix": "",
428
- "affiliation": {},
429
- "email": ""
430
- },
431
- {
432
- "first": "Jennifer",
433
- "middle": [],
434
- "last": "Chu-Carroll",
435
- "suffix": "",
436
- "affiliation": {},
437
- "email": ""
438
- },
439
- {
440
- "first": "Marilyn",
441
- "middle": [],
442
- "last": "Walker",
443
- "suffix": "",
444
- "affiliation": {},
445
- "email": ""
446
- },
447
- {
448
- "first": "Irene",
449
- "middle": [],
450
- "last": "Langkilde",
451
- "suffix": "",
452
- "affiliation": {},
453
- "email": ""
454
- },
455
- {
456
- "first": "Jerry",
457
- "middle": [],
458
- "last": "Wright",
459
- "suffix": "",
460
- "affiliation": {},
461
- "email": ""
462
- },
463
- {
464
- "first": "Allen",
465
- "middle": [],
466
- "last": "Gorin",
467
- "suffix": "",
468
- "affiliation": {},
469
- "email": ""
470
- },
471
- {
472
- "first": "Diane",
473
- "middle": [
474
- "J"
475
- ],
476
- "last": "Litman",
477
- "suffix": "",
478
- "affiliation": {},
479
- "email": ""
480
- },
481
- {
482
- "first": "Julia",
483
- "middle": [
484
- "B"
485
- ],
486
- "last": "Hirschberg",
487
- "suffix": "",
488
- "affiliation": {},
489
- "email": ""
490
- },
491
- {
492
- "first": "Scott",
493
- "middle": [],
494
- "last": "Miller",
495
- "suffix": "",
496
- "affiliation": {},
497
- "email": ""
498
- },
499
- {
500
- "first": "Kubota",
501
- "middle": [],
502
- "last": "Rie",
503
- "suffix": "",
504
- "affiliation": {},
505
- "email": ""
506
- },
507
- {
508
- "first": "",
509
- "middle": [],
510
- "last": "Ando",
511
- "suffix": "",
512
- "affiliation": {},
513
- "email": ""
514
- },
515
- {
516
- "first": "Diana",
517
- "middle": [],
518
- "last": "Mccarthy",
519
- "suffix": "",
520
- "affiliation": {},
521
- "email": ""
522
- },
523
- {
524
- "first": "Mark-Jan",
525
- "middle": [],
526
- "last": "Nederhof",
527
- "suffix": "",
528
- "affiliation": {},
529
- "email": ""
530
- },
531
- {
532
- "first": "Claire",
533
- "middle": [],
534
- "last": "Gardent",
535
- "suffix": "",
536
- "affiliation": {},
537
- "email": ""
538
- },
539
- {
540
- "first": "",
541
- "middle": [],
542
- "last": "Karsten",
543
- "suffix": "",
544
- "affiliation": {},
545
- "email": ""
546
- },
547
- {
548
- "first": "Martin",
549
- "middle": [],
550
- "last": "Romacker",
551
- "suffix": "",
552
- "affiliation": {},
553
- "email": ""
554
- }
555
- ],
556
- "year": "",
557
- "venue": null,
558
- "identifiers": {},
559
- "abstract": "In its first year, it was advantageous to co-locate NAACL with ANLP, an established conference. To coordinate the two conferences, submissions focusing on end-applications were invited to ANLP 2000, while submissions focusing on methodology were invited to NAACL 2000. Future NAACL conferences will encourage both types of submissions.",
560
- "pdf_parse": {
561
- "paper_id": "A00-2000",
562
- "_pdf_hash": "",
563
- "abstract": [
564
- {
565
- "text": "In its first year, it was advantageous to co-locate NAACL with ANLP, an established conference. To coordinate the two conferences, submissions focusing on end-applications were invited to ANLP 2000, while submissions focusing on methodology were invited to NAACL 2000. Future NAACL conferences will encourage both types of submissions.",
566
- "cite_spans": [],
567
- "ref_spans": [],
568
- "eq_spans": [],
569
- "section": "Abstract",
570
- "sec_num": null
571
- }
572
- ],
573
- "body_text": [],
574
- "back_matter": [],
575
- "bib_entries": {},
576
- "ref_entries": {}
577
- }
578
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-2001.json DELETED
@@ -1,1425 +0,0 @@
1
- {
2
- "paper_id": "A00-2001",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:49.003731Z"
6
- },
7
- "title": "Modelling Grounding",
8
- "authors": [
9
- {
10
- "first": "Colin",
11
- "middle": [],
12
- "last": "Matheson",
13
- "suffix": "",
14
- "affiliation": {
15
- "laboratory": "",
16
- "institution": "University of Edinburgh Edinburgh",
17
- "location": {
18
- "addrLine": "Scotland colin"
19
- }
20
- },
21
- "email": ""
22
- },
23
- {
24
- "first": "Massimo",
25
- "middle": [],
26
- "last": "Poesio",
27
- "suffix": "",
28
- "affiliation": {
29
- "laboratory": "",
30
- "institution": "University of Edinburgh Edinburgh",
31
- "location": {
32
- "country": "Scotland"
33
- }
34
- },
35
- "email": "[email protected]"
36
- },
37
- {
38
- "first": "David",
39
- "middle": [],
40
- "last": "Traum",
41
- "suffix": "",
42
- "affiliation": {
43
- "laboratory": "",
44
- "institution": "University of Maryland Maryland",
45
- "location": {
46
- "country": "USA"
47
- }
48
- },
49
- "email": "[email protected]"
50
- }
51
- ],
52
- "year": "",
53
- "venue": null,
54
- "identifiers": {},
55
- "abstract": "This paper describes an implementation of some key aspects of a theory of dialogue processing whose main concerns are to provide models of GROUNDING and of the role of DISCOURSE OBLIGATIONS in an agent's deliberation processes. Our system uses the TrindiKit dialogue move engine toolkit, which assumes a model of dialogue in which a participan. t's knowledge is characterised in terms of INFORMATION STATES which are subject to various kinds of updating mechanisms.",
56
- "pdf_parse": {
57
- "paper_id": "A00-2001",
58
- "_pdf_hash": "",
59
- "abstract": [
60
- {
61
- "text": "This paper describes an implementation of some key aspects of a theory of dialogue processing whose main concerns are to provide models of GROUNDING and of the role of DISCOURSE OBLIGATIONS in an agent's deliberation processes. Our system uses the TrindiKit dialogue move engine toolkit, which assumes a model of dialogue in which a participan. t's knowledge is characterised in terms of INFORMATION STATES which are subject to various kinds of updating mechanisms.",
62
- "cite_spans": [],
63
- "ref_spans": [],
64
- "eq_spans": [],
65
- "section": "Abstract",
66
- "sec_num": null
67
- }
68
- ],
69
- "body_text": [
70
- {
71
- "text": "In this paper we describe a preliminary implementation of a 'middle-level' dialogue management system. The key tasks of a dialogue manager are to update the representation of dialogue on the basis of processed input (generally, but not exclusively, language utterances), and to decide what (if anything) the system should do next. There is a wide range of opinions concerning how these tasks should be performed, and in particular, how the ongoing dialogue state should be represented: e.g., as something very specific to a particular domain, or according to some more general theory of (human or human inspired) dialogue processing. At one extreme, some systems represent only the (typically very rigid) transitions possible in a perceived dialogue for the given task, often using finite states in a transition network to represent the dialogue: examples of this are systems built using Nuance's DialogueBuilder or the CSLU's Rapid Application Prototyper. The other extreme is to build the dialogue processing theory on top of a full model of rational agency (e.g., (Bretier and Sadek, 1996) ). The approach we take here lies in between these two extremes: we use rich representations of information states, but simpler, more dialogue-specific deliberation methods, rather than a deductive reasoner working on the basis of an axiomatic theory of rational agency. We show in this paper that the theory of information states we propose can, nevertheless, be used to give a characterisation of dialogue acts such as those proposed by the Discourse Resource Initiative precise enough to formalise the deliberation process of a dialogue manager in a completely declarative fashion.",
72
- "cite_spans": [
73
- {
74
- "start": 1067,
75
- "end": 1092,
76
- "text": "(Bretier and Sadek, 1996)",
77
- "ref_id": "BIBREF3"
78
- }
79
- ],
80
- "ref_spans": [],
81
- "eq_spans": [],
82
- "section": "Introduction",
83
- "sec_num": "1"
84
- },
85
- {
86
- "text": "Our implementation is based on the approach to dialogue developed in (Traum, 1994; Poesio and Traum, 1997; Poesio and Traum, 1998; . This theory, like other action-based theories of dialogue, views dialogue participation in terms of agents performing dialogue acts, the effects of which are to update the information state of the participants in a dialogue. However, our view of dialogue act effects is closer in some respects to that of (Allwood, 1976; Allwood, 1994) and (Singh, 1998) than to the belief and intention model of (Sadek, 1991; Grosz and Sidner, 1990; Cohen and Levesque, 1990) . Particular emphasis is placed on the social commitments of the dialogue participants (obligations to act and commitments to propositions) without making explicit claims about the actual beliefs and intentions of the participants. Also, heavy emphasis is placed on how dialogue participants socially GROUND (Clark and Wilkes-Gibbs, 1986 ) the information expressed in dialogue: the information state assumed in this theory specifies which information is assumed to be already part of the common ground at a given point, and which part has been introduced, but not yet been established.",
87
- "cite_spans": [
88
- {
89
- "start": 69,
90
- "end": 82,
91
- "text": "(Traum, 1994;",
92
- "ref_id": "BIBREF23"
93
- },
94
- {
95
- "start": 83,
96
- "end": 106,
97
- "text": "Poesio and Traum, 1997;",
98
- "ref_id": null
99
- },
100
- {
101
- "start": 107,
102
- "end": 130,
103
- "text": "Poesio and Traum, 1998;",
104
- "ref_id": "BIBREF16"
105
- },
106
- {
107
- "start": 438,
108
- "end": 453,
109
- "text": "(Allwood, 1976;",
110
- "ref_id": "BIBREF0"
111
- },
112
- {
113
- "start": 454,
114
- "end": 468,
115
- "text": "Allwood, 1994)",
116
- "ref_id": "BIBREF1"
117
- },
118
- {
119
- "start": 473,
120
- "end": 486,
121
- "text": "(Singh, 1998)",
122
- "ref_id": "BIBREF18"
123
- },
124
- {
125
- "start": 529,
126
- "end": 542,
127
- "text": "(Sadek, 1991;",
128
- "ref_id": "BIBREF17"
129
- },
130
- {
131
- "start": 543,
132
- "end": 566,
133
- "text": "Grosz and Sidner, 1990;",
134
- "ref_id": "BIBREF11"
135
- },
136
- {
137
- "start": 567,
138
- "end": 592,
139
- "text": "Cohen and Levesque, 1990)",
140
- "ref_id": "BIBREF8"
141
- },
142
- {
143
- "start": 901,
144
- "end": 930,
145
- "text": "(Clark and Wilkes-Gibbs, 1986",
146
- "ref_id": "BIBREF6"
147
- }
148
- ],
149
- "ref_spans": [],
150
- "eq_spans": [],
151
- "section": "Introduction",
152
- "sec_num": "1"
153
- },
154
- {
155
- "text": "The rest of this paper is structured as follows. The theory of dialogue underlying the implementation is described in more detail in Section 2. Section 3 describes the implementation itself. Section 4 shows how the system updates its information state while participating in a fairly simple dialogue.",
156
- "cite_spans": [],
157
- "ref_spans": [],
158
- "eq_spans": [],
159
- "section": "Introduction",
160
- "sec_num": "1"
161
- },
162
- {
163
- "text": "One basic assumption underlying this work is that it is useful to analyse dialogues by describing the relevant 'information' that is available to each participant. The notion of INFORMATION STATE (IS) is therefore employed in deciding what the next action should be, and the effects of utterances are described in terms of the changes they bring about in ISs. A particular instantiation of a dialogue manager, from this point of view, consists of a definition of the contents of ISs plus a description of the update processes which map from IS to IS. Updates are typically triggered by 'full' dialogue acts such as assertions or directives, 1 of course, but the theory allows parts of utterances, including individual words and even subparts of words, to be the trigger. The update rules for dialogue acts that we assume here are a simplified version of the formalisations proposed in (Poesio and Traum, 1998; Traum et al., 1999) (henceforth, PTT) .",
164
- "cite_spans": [
165
- {
166
- "start": 885,
167
- "end": 909,
168
- "text": "(Poesio and Traum, 1998;",
169
- "ref_id": "BIBREF16"
170
- },
171
- {
172
- "start": 910,
173
- "end": 947,
174
- "text": "Traum et al., 1999) (henceforth, PTT)",
175
- "ref_id": null
176
- }
177
- ],
178
- "ref_spans": [],
179
- "eq_spans": [],
180
- "section": "Theoretical Background",
181
- "sec_num": "2"
182
- },
183
- {
184
- "text": "The main aspects of PTT which have been implemented concern the way discourse obligations are handled and the manner in which dialogue participants interact to add information to the common ground. Obligations are essentially social in nature, and directly characterise spoken dialogue; a typical example of a discourse obligation concerns the relationship between questions and answers. Poesio and Traum follow (Traum and Allen, 1994) in suggesting that the utterance of a question imposes an obligation on the hearer to address the question (e.g., by providing an answer), irrespective of intentions.",
185
- "cite_spans": [
186
- {
187
- "start": 388,
188
- "end": 435,
189
- "text": "Poesio and Traum follow (Traum and Allen, 1994)",
190
- "ref_id": null
191
- }
192
- ],
193
- "ref_spans": [],
194
- "eq_spans": [],
195
- "section": "Theoretical Background",
196
- "sec_num": "2"
197
- },
198
- {
199
- "text": "As for the process by which common ground is established, or GROUNDING (Clark and Schaefer, 1989; Traum, 1994) , the assumption in PTT is that classical speech act theory is inherently too simplistic in that it ignores the fact that co-operative interaction is essential in discourse; thus, for instance, simply asserting something does not make it become mutually 'known' (part of the common ground). It is actually necessary for the hearer to provide some kind of acknowledgement that the assertion has been received, understood or not understood, accepted or rejected, and so on. Poesio and Traum view the public information state as including both material that has already been grounded, indicated by GND here, and material that hasn't been grounded yet. These components of the information state are updated when GROUNDING ACTS such as acknowledgement are performed. Each new contribution results in a new DIS-COURSE UNIT (DU) being added to the information state (Traum, 1994) and recorded in a list of 'ungrounded discourse units' (UDUS); these DUs can then be subsequently grounded as the result, e.g., of (implicit or explicit) acknowledgements.",
200
- "cite_spans": [
201
- {
202
- "start": 71,
203
- "end": 97,
204
- "text": "(Clark and Schaefer, 1989;",
205
- "ref_id": "BIBREF5"
206
- },
207
- {
208
- "start": 98,
209
- "end": 110,
210
- "text": "Traum, 1994)",
211
- "ref_id": "BIBREF23"
212
- },
213
- {
214
- "start": 970,
215
- "end": 983,
216
- "text": "(Traum, 1994)",
217
- "ref_id": "BIBREF23"
218
- }
219
- ],
220
- "ref_spans": [],
221
- "eq_spans": [],
222
- "section": "Theoretical Background",
223
- "sec_num": "2"
224
- },
225
- {
226
- "text": "Implementing PTT In this section, we describe the details of the implementation. First, in Section 3.1, we describe the TrindiKit tool for building dialogue managers that we used to build our system. In Section 3.2, we describe the information states used in the implementation, an extension and simplification of the ideas from PTT discussed in the previous section. Then, in Section 3.3, we discuss how the information state is updated when dialogue acts are observed. Finally, 1We assume here the DRI classification of dialogue acts (Discourse Resource Initiative, 1997). in Section 3.4, we describe the rules used by the system to adopt intentions and perform its own actions. An extended example of how these mechanisms are used to track and participate in a dialogue is presented in Section 4.",
227
- "cite_spans": [],
228
- "ref_spans": [],
229
- "eq_spans": [],
230
- "section": "3",
231
- "sec_num": null
232
- },
233
- {
234
- "text": "The basis for our implementation is the TrindiKit dialogue move engine toolkit implemented as part of the TRINDI project . The toolkit provides support for developing dialogue systems, focusing on the central dialogue management components.",
235
- "cite_spans": [],
236
- "ref_spans": [],
237
- "eq_spans": [],
238
- "section": "TrindiKit",
239
- "sec_num": "3.1"
240
- },
241
- {
242
- "text": "The system architecture assumed by the TrindiKit is shown in Figure 1 . A prominent feature of this architecture is the information state, which serves as a central 'blackboard' that processing modules can examine (by means of defined CONDITIONS) or change (by means of defined OPERATIONS). The structure of the IS for a particular dialogue system is defined' by the developer who uses the TrindiKit to build that system, on the basis of his/her own theory of dialogue processing; no predefined notion of information state is provided. 2. The toolkit provides a number of abstract data-types such as lists, stacks, and records, along with associated conditions and operations, that can be used to implement the user's theory of information states; other abstract types can also be defined. In addition to this customisable notion of information state, TrindiKit provides a few system variables that can also used for intermodule communication. These include input for the raw observed (language) input, latest_moves which 2In TRINDI we are experimenting with multiple instantiations of three different theories of information state . contains the dialogue moves observed in the most recent turn, latest_speaker, and next_moves, containing the dialogue moves to be performed by the system in the next turn.",
243
- "cite_spans": [],
244
- "ref_spans": [
245
- {
246
- "start": 61,
247
- "end": 69,
248
- "text": "Figure 1",
249
- "ref_id": null
250
- }
251
- ],
252
- "eq_spans": [],
253
- "section": "TrindiKit",
254
- "sec_num": "3.1"
255
- },
256
- {
257
- "text": "A complete system is assumed to consist of several modules interacting via the IS. (See Figure 1 again.) The central component is called the DIA-LOGUE MOVE ENGINE (DME). The DME performs the processing needed to integrate the observed dialogue moves with the IS, and to select new moves for the system to perform. These two functions are encapsulated in the UPDATE and SELECTION submodules of the DME. The update and select modules are specified by means of typed rules, as well as sequencing procedures to determine when to apply the rules. We are here mainly concerned with UP-DATE RULES (urules), which consist of four parts: a name, a type, a list of conditions to check in the information state, and a list of operations to perform on the information state, urules are described in more detail below, in Section 3.3. There are also two modules outside the DME proper, but still crucial to a complete system: INTERPRETATION, which consumes the input and produces a list of dialogue acts in the latest_moves variable (potentially making reference to the current information state), and GENERATION, which produces NL output from the dialogue acts in the next_moves variable. Finally, there is a CONTROL module, that governs the sequencing (or parallel invocation) of the other modules. In this paper we focus on the IS and the DME; our current implementation only uses very simple interpretation and generation components.",
258
- "cite_spans": [],
259
- "ref_spans": [
260
- {
261
- "start": 88,
262
- "end": 96,
263
- "text": "Figure 1",
264
- "ref_id": null
265
- }
266
- ],
267
- "eq_spans": [],
268
- "section": "TrindiKit",
269
- "sec_num": "3.1"
270
- },
271
- {
272
- "text": "In this section we discuss the information state used in the current implementation. The main difference between the implemented IS and the theoretical proposal in (Poesio and Traum, 1998) is that in the implementation the information state is partitioned in fields, each containing information of different types, whereas in the theoretical version the information state is a single repository of facts (a DISCOURSE REPRESENTATION STRUCTURE). Other differences are discussed below. An example IS with some fields filled is shown in Figure 2 ; this is the IS which results from the second utterance in the example dialogue discussed in Section 4, A route please. 3",
273
- "cite_spans": [
274
- {
275
- "start": 164,
276
- "end": 188,
277
- "text": "(Poesio and Traum, 1998)",
278
- "ref_id": "BIBREF16"
279
- }
280
- ],
281
- "ref_spans": [
282
- {
283
- "start": 533,
284
- "end": 541,
285
- "text": "Figure 2",
286
- "ref_id": "FIGREF1"
287
- }
288
- ],
289
- "eq_spans": [],
290
- "section": "Information States in PTT",
291
- "sec_num": "3.2"
292
- },
293
- {
294
- "text": "The IS in Figure 2 is a record with two main parts, W and C. The first of these represents the system's (Wizard) view of his own mental state and of the (semi-)public information discussed in the dialogue; the second, his view of the user's (Caller) information state. This second part is needed to 3All diagrams in this paper are automatically generated from TrindiKit system internal representations and displayed using the Thistle dialogue editor (Calder, 1998 1 /,,,,o_,..,,,.,( w.:,,.-, grounded; as we are not concerned with this problem here, we will ignore C in what follows. w contains information on the grounded material (GND), on the ungrounded information (UDUS, PDU and CDU), and on W's intentions (INT) . GND contains the information that has already been grounded; the other fields contain information about the contributions still to be grounded. As noticed above, in PTT it is assumed that for each new utterance, a new DU is created and added to the IS. The current implementation differs from the full theory in that only two DUs are retained at each point; the current DU (CDU) and the previous DU (PDU). The CDU contains the information in the latest contribution, while the PDU contains information from the penultimate contribution. Information is moved from PDU to GND as a result of an ack (acknowledgement) dialogue act (see below.)",
295
- "cite_spans": [
296
- {
297
- "start": 450,
298
- "end": 463,
299
- "text": "(Calder, 1998",
300
- "ref_id": "BIBREF4"
301
- },
302
- {
303
- "start": 713,
304
- "end": 718,
305
- "text": "(INT)",
306
- "ref_id": null
307
- }
308
- ],
309
- "ref_spans": [
310
- {
311
- "start": 10,
312
- "end": 18,
313
- "text": "Figure 2",
314
- "ref_id": "FIGREF1"
315
- },
316
- {
317
- "start": 464,
318
- "end": 492,
319
- "text": "1 /,,,,o_,..,,,.,( w.:,,.-,",
320
- "ref_id": null
321
- }
322
- ],
323
- "eq_spans": [],
324
- "section": "Information States in PTT",
325
- "sec_num": "3.2"
326
- },
327
- {
328
- "text": "The DUs and the GND field contain four fields, representing obligations (OBL), the dialogue history (DH), propositions to which agents are socially committed (scP), and conditional updates (COND). The value of OBL is a list of action types: actions that agents are obliged to perform. An action type is specified by a PREDICATE, a DIALOGUE PARTICI-PANT, and a list of ARGUMENTS. The value of see is a list of a particular type of mental states, social commitments of agents to propositions. 4 These are specified by a DIALOGUE PARTICIPANT, and a PROPOSITION. Finally, the elements in DH are dia-4SCPs play much the same role in PTT as do beliefs in many BDI accounts of speech acts. logue actions, which are instances of dialogue action types. A dialogue action is specified by an action type, a dialogue act id, and a confidence level CONF (the confidence that an agent has that that dialogue act has been observed).",
329
- "cite_spans": [],
330
- "ref_spans": [],
331
- "eq_spans": [],
332
- "section": "Information States in PTT",
333
- "sec_num": "3.2"
334
- },
335
- {
336
- "text": "The situation in Figure 2 is the result of updates to the IS caused by utterance [2] in the dialogue in (6), which is assumed to generate a direct act as well as an assert act and an answer act. 5 That utterance is also assumed to contain an implicit acknowledgement of the original question; this understanding act has resulted in the contents of DU2 being grounded (and subsequently merged with GND), as discussed below.",
337
- "cite_spans": [],
338
- "ref_spans": [
339
- {
340
- "start": 17,
341
- "end": 25,
342
- "text": "Figure 2",
343
- "ref_id": "FIGREF1"
344
- }
345
- ],
346
- "eq_spans": [],
347
- "section": "Information States in PTT",
348
- "sec_num": "3.2"
349
- },
350
- {
351
- "text": "GND.OBL in Figure 2 includes two obligations. The first is an obligation on W to perform an understanding act (the predicate is understandingAct, the participant is W, and there is just one argument, DU3, which identifies the DU in CDU by referring to its ID). The second obligation is an obligation on C to address conversational act CA2; this ID points to the appropriate info_request in the DH list by means of the ID number. Obligations are specified in CDU and PDU, as well. Those in PDU are simply a subset of those in GND, since at point in the update process shown in Figure 2 this field contains information that has already been grounded (note that DU2 is not in UDUS anymore); but CDU contains obligations that have not been grounded yetin particular, the obligation on W to address CA6.",
352
- "cite_spans": [],
353
- "ref_spans": [
354
- {
355
- "start": 11,
356
- "end": 19,
357
- "text": "Figure 2",
358
- "ref_id": "FIGREF1"
359
- },
360
- {
361
- "start": 576,
362
- "end": 584,
363
- "text": "Figure 2",
364
- "ref_id": "FIGREF1"
365
- }
366
- ],
367
- "eq_spans": [],
368
- "section": "Information States in PTT",
369
- "sec_num": "3.2"
370
- },
371
- {
372
- "text": "GND.DH in this IS contains a list of dialogue actions whose occurrence has already been grounded: the info_request performed by utterance 1, with argument a question, 6 and the implicit acknowledge performed by utterance 2. 7 The DH field in CDU contains dialogue acts performed by utterance 2 that do need to be grounded: a directive by C to W to perform an action of type giveroute, and an assert by C of the proposition want(C, route), by which C provides an answer to the previous info_request CA2.",
373
- "cite_spans": [],
374
- "ref_spans": [],
375
- "eq_spans": [],
376
- "section": "Information States in PTT",
377
- "sec_num": "3.2"
378
- },
379
- {
380
- "text": "The COND field in CDU contains a conditional update resulting from the directive performed by that utterance. The idea is that directives do not immediately lead to obligations to perform the mentioned action: instead (in addition to an obligation to address the action with some sort of response), their effect is to add to the common ground the information that if the directive is accepted by the addressee, SThe fact that the utterance of a route please constitutes an answer is explicitly assumed; however, it should be possible to derive this information automatically (perhaps along the lines suggested by Kreutel (Kreutel, 1998) 7We assume here, as in (Traum, 1994) and (Poesio and Traum, 1998) , that understanding acts do not have to be grounded themselves, which would result in a infinite regress. then he or she has the obligation to perform the action type requested. (In this case, to give a route to C.)",
381
- "cite_spans": [
382
- {
383
- "start": 621,
384
- "end": 636,
385
- "text": "(Kreutel, 1998)",
386
- "ref_id": "BIBREF12"
387
- },
388
- {
389
- "start": 660,
390
- "end": 673,
391
- "text": "(Traum, 1994)",
392
- "ref_id": "BIBREF23"
393
- },
394
- {
395
- "start": 678,
396
- "end": 702,
397
- "text": "(Poesio and Traum, 1998)",
398
- "ref_id": "BIBREF16"
399
- }
400
- ],
401
- "ref_spans": [],
402
- "eq_spans": [],
403
- "section": "Information States in PTT",
404
- "sec_num": "3.2"
405
- },
406
- {
407
- "text": "We are now in a position to examine the update mechanisms which are performed when new dialogue acts are recognised. When a dialogue participant takes a turn and produces an utterance, the interpretation module sets the system variable latest_moves to contain a representation of the dialogue acts performed with the utterance. The updating procedure then uses update rules to modify the IS on the basis of the contents of latest_moves and of the previous IS. The basic procedure is described in (1) below, s",
408
- "cite_spans": [],
409
- "ref_spans": [],
410
- "eq_spans": [],
411
- "section": "Update Rules in PTT",
412
- "sec_num": "3.3"
413
- },
414
- {
415
- "text": "(1) 1. Create a new DU and push it on top of UDUs.",
416
- "cite_spans": [],
417
- "ref_spans": [],
418
- "eq_spans": [],
419
- "section": "Update Rules in PTT",
420
- "sec_num": "3.3"
421
- },
422
- {
423
- "text": "2. Perform updates on the basis of backwards grounding acts.",
424
- "cite_spans": [],
425
- "ref_spans": [],
426
- "eq_spans": [],
427
- "section": "Update Rules in PTT",
428
- "sec_num": "3.3"
429
- },
430
- {
431
- "text": ". If any other type of act is observed, record it in the dialogue history in CDU and apply the update rules for this kind of act 4. Apply update rules to all parts of the IS which contain newly added acts.",
432
- "cite_spans": [],
433
- "ref_spans": [],
434
- "eq_spans": [],
435
- "section": "Update Rules in PTT",
436
- "sec_num": "3.3"
437
- },
438
- {
439
- "text": "The first step involves moving the contents of CDU to PDU (losing direct access to the former PDU contents) and putting in CDU a new empty DU with a new identifier. The second and third steps deal explicitly with the contents of latest.moves, applying one urule (of possibly a larger set) for each act in latest_moves. The relevant effects for each act are summarised in 2 SSee for different versions of this update procedure used for slightly different versions of the theory.",
440
- "cite_spans": [],
441
- "ref_spans": [],
442
- "eq_spans": [],
443
- "section": "Update Rules in PTT",
444
- "sec_num": "3.3"
445
- },
446
- {
447
- "text": "(2) act ID:2, accept (DP, ID2) effect accomplished via rule resolution act ID:2, ack(DP, DU1) effect peRec(w.Gnd,w.pdu.tognd) effect remove(DU1,UDUS) act ID:2, agree(DP, ID2) effect push(scP,scp(DP,P(ID2))) act ID:2, answer(DP,ID2,ID3) effect push(scP,ans(DP, Q(ID2),P(ID2))) act ID:2, assert (DP,PROP) effect push(scP,sep(DP, PROP)) effect push (COND,accept (o(DP),ID)-+ scp(o(DP),PROP)) act ID:I, assert(DP,PROP) effect push (COND,accept (o(DP),ID)-~ scp(o(DP),PROP)) act ID:2, check(DP,PROP) effect push(OSL,address(o(DP),ID)) effect push(COND,agree(o(DP),ID) --~ scp(DP, PROP)) act ID:2, direct (DP, Act) effect push(OBL,address(o(DP),ID)) effect push(CONI),accept (o(DP),ID) -~ obl(o(DP),Act)) act ID:2, info_request (DP, Q) effect push(osL,address(o(DP),ID))",
448
- "cite_spans": [
449
- {
450
- "start": 21,
451
- "end": 25,
452
- "text": "(DP,",
453
- "ref_id": null
454
- },
455
- {
456
- "start": 26,
457
- "end": 30,
458
- "text": "ID2)",
459
- "ref_id": null
460
- }
461
- ],
462
- "ref_spans": [],
463
- "eq_spans": [],
464
- "section": "Update Rules in PTT",
465
- "sec_num": "3.3"
466
- },
467
- {
468
- "text": "The ack act is the only backward grounding act implemented at the moment. The main effect of an ack is to merge the information in the acknowledged DU (assumed to be PDU) into GND, also removing this DU from UDUS. Unlike the other acts described below, ack acts are recorded directly into GND.DH, rather than into CDU.TOGND.DH.",
469
- "cite_spans": [],
470
- "ref_spans": [],
471
- "eq_spans": [],
472
- "section": "Update Rules in PTT",
473
- "sec_num": "3.3"
474
- },
475
- {
476
- "text": "All of the other updates are performed in the third step of the procedure in (1). The only effect of accept acts is to enable the conditional rules which are part of the effect of assert and direct, leading to social commitments and obligations, respectively. agree acts also trigger conditional rules introduced by check; in addition, they result in the agent being socially committed to the proposition introduced by the act with which the agent agrees. Performing an answer to question ID2 by asserting proposition P(ID3) commits the dialogue participant to the proposition that P(ID3) is indeed an answer to Q(ID2).",
477
- "cite_spans": [],
478
- "ref_spans": [],
479
- "eq_spans": [],
480
- "section": "Update Rules in PTT",
481
- "sec_num": "3.3"
482
- },
483
- {
484
- "text": "The two rules for assert are where the confidence levels are actually used, to implement a simple verification strategy. The idea is that the system only assumes that the user is committed to the asserted proposition when a confidence level of 2 is observed, while some asserts are assumed not to have been sufficiently well understood, and are only assigned a confidence level 1. This leads the system to perform a check, as we will see shortly.",
485
- "cite_spans": [],
486
- "ref_spans": [],
487
- "eq_spans": [],
488
- "section": "Update Rules in PTT",
489
- "sec_num": "3.3"
490
- },
491
- {
492
- "text": "The next three update rules, for check, direct, and info_req, all impose an obligation on the other dialogue participant to address the dialogue act. In addition, the direct rule introduces a conditional act: acceptance of the directive will impose an obligation on the hearer to act on its contents.",
493
- "cite_spans": [],
494
- "ref_spans": [],
495
- "eq_spans": [],
496
- "section": "Update Rules in PTT",
497
- "sec_num": "3.3"
498
- },
499
- {
500
- "text": "In addition, all FORWARD ACTS 9 in the DRI scheme (Discourse Resource Initiative, 1997) impose an obligation to perform an understanding act (e.g., an acknowledgement):",
501
- "cite_spans": [],
502
- "ref_spans": [],
503
- "eq_spans": [],
504
- "section": "Update Rules in PTT",
505
- "sec_num": "3.3"
506
- },
507
- {
508
- "text": "(3) 1 act effect ID:c, forward-looking-act (DP) push (OBL,u-act (o(DP) ,CDU.id)) I",
509
- "cite_spans": [
510
- {
511
- "start": 53,
512
- "end": 70,
513
- "text": "(OBL,u-act (o(DP)",
514
- "ref_id": null
515
- }
516
- ],
517
- "ref_spans": [],
518
- "eq_spans": [],
519
- "section": "Update Rules in PTT",
520
- "sec_num": "3.3"
521
- },
522
- {
523
- "text": "The internal urules implementing the updates in (2) have the format shown in (4), which is the urule for info_request. As noted above, these rules have four parts; a name, a type, a list of conditions, and a list of effects. The conditions in (4) state that there must be a move in latest_moves whose predicate is inforeq. The effects l\u00b0 state that the move should be recorded in the dialogue history in CDU, that an obligation to address the request should be pushed into OBL in CDU, and that the requirement for an understanding act by W should be pushed directly into the list in W.GND.",
524
- "cite_spans": [],
525
- "ref_spans": [],
526
- "eq_spans": [],
527
- "section": "Update Rules in PTT",
528
- "sec_num": "3.3"
529
- },
530
- {
531
- "text": "The fourth and final step of the algorithm cycles through the updating process in case recently added facts have further implications. For instance, when an action has been performed that matches the antecedent of a rule in COND, the consequent is established. Likewise, when an action is performed it releases any obligations to perform that action. Thus, accept, answer, and agree are all ways of releasing an obligation to address, since these are all appropriate backward looking actions. Similarly, an agent will drop intentions to perform actions it has already (successfully) performed.",
532
- "cite_spans": [],
533
- "ref_spans": [],
534
- "eq_spans": [],
535
- "section": "Update Rules in PTT",
536
- "sec_num": "3.3"
537
- },
538
- {
539
- "text": "We assume, in common with BDI-approaches to agency (e.g., (Bratman et al., 1988) ) that intentions 9Forward acts include assert, check, direct, and info_request.",
540
- "cite_spans": [
541
- {
542
- "start": 58,
543
- "end": 80,
544
- "text": "(Bratman et al., 1988)",
545
- "ref_id": "BIBREF2"
546
- }
547
- ],
548
- "ref_spans": [],
549
- "eq_spans": [],
550
- "section": "Deliberation",
551
- "sec_num": "3.4"
552
- },
553
- {
554
- "text": "l\u00b0The ID and HID values simply contain numbers identifying the discourse units and conversational acts.",
555
- "cite_spans": [],
556
- "ref_spans": [],
557
- "eq_spans": [],
558
- "section": "Deliberation",
559
- "sec_num": "3.4"
560
- },
561
- {
562
- "text": "are the primary mental attitude leading to an agent's actions. The main issues to explain then become how such intentions are adopted given the rest of the information state, and how an agent gets from intentions to actual performance.",
563
- "cite_spans": [],
564
- "ref_spans": [],
565
- "eq_spans": [],
566
- "section": "Deliberation",
567
- "sec_num": "3.4"
568
- },
569
- {
570
- "text": "For the latter question, we take a fairly simplistic approach here: all the intentions to perform dialogue acts are simply transferred to the next_moves system variable, with the assumption that the generation module can realise all of them as a single utterance. A more sophisticated approach would be to weight the importance of (immediate) realisation of sets of intentions and compare this to the likelihood that particular utterances will achieve these effects at minimal cost, and choose accordingly. We leave this for future work (see (Traum and Dillenbourg, 1998) for some preliminary ideas along these lines), concentrating here on the first issue -how the system adopts intentions to perform dialogue acts from other aspects of the mental state.",
571
- "cite_spans": [
572
- {
573
- "start": 542,
574
- "end": 571,
575
- "text": "(Traum and Dillenbourg, 1998)",
576
- "ref_id": "BIBREF22"
577
- }
578
- ],
579
- "ref_spans": [],
580
- "eq_spans": [],
581
- "section": "Deliberation",
582
- "sec_num": "3.4"
583
- },
584
- {
585
- "text": "The current system takes the following factors into account:",
586
- "cite_spans": [],
587
- "ref_spans": [],
588
- "eq_spans": [],
589
- "section": "Deliberation",
590
- "sec_num": "3.4"
591
- },
592
- {
593
- "text": "\u2022 obligations (to perform understanding acts, to address previous dialogue acts, to perform other actions)",
594
- "cite_spans": [],
595
- "ref_spans": [],
596
- "eq_spans": [],
597
- "section": "Deliberation",
598
- "sec_num": "3.4"
599
- },
600
- {
601
- "text": "\u2022 potential obligations (that would result if another act were performed, as represented in the COND field)",
602
- "cite_spans": [],
603
- "ref_spans": [],
604
- "eq_spans": [],
605
- "section": "Deliberation",
606
- "sec_num": "3.4"
607
- },
608
- {
609
- "text": "\u2022 insufficiently understood dialogue acts (with a 1 confidence level in CDU.DH)",
610
- "cite_spans": [],
611
- "ref_spans": [],
612
- "eq_spans": [],
613
- "section": "Deliberation",
614
- "sec_num": "3.4"
615
- },
616
- {
617
- "text": "\u2022 intentions to perform complex acts",
618
- "cite_spans": [],
619
- "ref_spans": [],
620
- "eq_spans": [],
621
- "section": "Deliberation",
622
- "sec_num": "3.4"
623
- },
624
- {
625
- "text": "The current deliberation process assumes maximal cooperativity, in that the system always chooses to meet its obligations whenever possible, and also chooses to provide a maximally helpful response when possible. Thus, when obliged to address a previous dialogue act such as a question or directive, it will choose to actually return the answer or perform the action, if possible, rather than reject or negotiate such a performance, which would also be acting in accordance with the obligations (see (Kreutel, 1998) on how acts might be rejected).",
626
- "cite_spans": [],
627
- "ref_spans": [],
628
- "eq_spans": [],
629
- "section": "Deliberation",
630
- "sec_num": "3.4"
631
- },
632
- {
633
- "text": "In the current implementation, the following rules are used to adopt new intentions (i.e., to update the INT field):",
634
- "cite_spans": [],
635
- "ref_spans": [],
636
- "eq_spans": [],
637
- "section": "Deliberation",
638
- "sec_num": "3.4"
639
- },
640
- {
641
- "text": "(5) 1. add an intention to acknowledge(W,CDU), given an obligation to perform a u-act, if everything in CDU is sufficiently understood (i.e., to level 2); 2. add an intention to accept a directive or answer a question as the result of an obligation to address a dialogue act; 3. add an intention to perform an action if COND contains a conditional that will establish an obligation to perform the action, and the antecedent of this conditional is another action that is already intended. (This anticipatory planning allows the obligation to be discharged at the same time it is invoked, e.g., without giving an intermediate acceptance of an directive.)",
642
- "cite_spans": [],
643
- "ref_spans": [],
644
- "eq_spans": [],
645
- "section": "Deliberation",
646
- "sec_num": "3.4"
647
- },
648
- {
649
- "text": "4. add an intention to perform a (dialogue) action motivated by the intention to perform the current task. In the case of the Autoroute domain, we have two cases: the system may decide (a) to check any dialogue acts in CDU at confidence level 1, which contain information needed to discharge the intention to give a route; or (b) to perform a question asking about a new piece of information that has not been established (this is decided by inspecting GND.SCP and CDU.SCP). For example, it may decide to ask about the starting point, the time of departure, etc.",
650
- "cite_spans": [],
651
- "ref_spans": [],
652
- "eq_spans": [],
653
- "section": "Deliberation",
654
- "sec_num": "3.4"
655
- },
656
- {
657
- "text": "4 Extended Example In this section, we discuss more examples of how the information state changes as a result of processing and performing dialogue acts. It is useful to do this by looking briefly at a typical Autoroute dialogue, shown in (6). 11 Our implementation can process this sort of dialogue using very simple interpretation and generation routines that provide the dialogue acts in latest_moves from the text strings, and produce W's output text from the dialogue acts which the system places in next_moves. 6 We assume that before the dialogue starts, W has the intention to ask C what kind of help is required, liThe interchanges have been cleaned up to some extent here, mainly by removing pauses and hesitations. been assumed. Current and future work is directed towards measuring the theory against more challenging data to test its validity; cases where grounding is less automatic are an obvious source of such tests, and we have identified a few relevant problem cases in the Autoroute dialogues. We do claim, however, that the implementation as it stands validates a number of key aspects of the theory and provides a good basis for future work in dialogue modelling.",
658
- "cite_spans": [],
659
- "ref_spans": [],
660
- "eq_spans": [],
661
- "section": "Deliberation",
662
- "sec_num": "3.4"
663
- }
664
- ],
665
- "back_matter": [
666
- {
667
- "text": "The TRINDI (Task Oriented Instructional Dialogue) project is supported by the Telematics Applications Programme, Language Engineering Project LE4-8314. Massimo Poesio is supported by an EP-SRC Advanced Research Fellowship.",
668
- "cite_spans": [],
669
- "ref_spans": [],
670
- "eq_spans": [],
671
- "section": "Acknowledgments",
672
- "sec_num": null
673
- },
674
- {
675
- "text": "and that C has the intention to find a route. We also assume that W has the turn, and that the presence of the how can I help intention triggers an utterance directly. After C's agreement in [6], the deliberation routine is able to move past discussion of the starting point, and add an intention to ask about the next piece of information, the destination. This leads to producing utterance [7], which also implicitly acknowledges [6], after which C's agreement is grounded, leading to the IS shown in Figure 5 . Note that the list in W.GND.SCP in Figure 5 indicates that both C and W are committed to the proposition that the starting place is Malvern.",
676
- "cite_spans": [],
677
- "ref_spans": [
678
- {
679
- "start": 503,
680
- "end": 511,
681
- "text": "Figure 5",
682
- "ref_id": null
683
- },
684
- {
685
- "start": 549,
686
- "end": 557,
687
- "text": "Figure 5",
688
- "ref_id": null
689
- }
690
- ],
691
- "eq_spans": [],
692
- "section": "annex",
693
- "sec_num": null
694
- },
695
- {
696
- "text": "It has only been possible here to introduce the basic concerns of the PTT account of dialogue modelling and to pick out one or two illustrative examples to highlight the implementational approach which has",
697
- "cite_spans": [],
698
- "ref_spans": [],
699
- "eq_spans": [],
700
- "section": "Conclusions",
701
- "sec_num": "5"
702
- }
703
- ],
704
- "bib_entries": {
705
- "BIBREF0": {
706
- "ref_id": "b0",
707
- "title": "Linguistic Communication as Action and Cooperation",
708
- "authors": [
709
- {
710
- "first": "J",
711
- "middle": [],
712
- "last": "Allwood",
713
- "suffix": ""
714
- }
715
- ],
716
- "year": 1976,
717
- "venue": "",
718
- "volume": "",
719
- "issue": "",
720
- "pages": "",
721
- "other_ids": {},
722
- "num": null,
723
- "urls": [],
724
- "raw_text": "J. Allwood. 1976. Linguistic Communication as Action and Cooperation. Ph.D. thesis, GSteborg University, Department of Linguistics.",
725
- "links": null
726
- },
727
- "BIBREF1": {
728
- "ref_id": "b1",
729
- "title": "Obligations and options in dialogue",
730
- "authors": [
731
- {
732
- "first": "J",
733
- "middle": [],
734
- "last": "Allwood",
735
- "suffix": ""
736
- }
737
- ],
738
- "year": 1994,
739
- "venue": "Think Quarterly",
740
- "volume": "3",
741
- "issue": "",
742
- "pages": "9--18",
743
- "other_ids": {},
744
- "num": null,
745
- "urls": [],
746
- "raw_text": "J. Allwood. 1994. Obligations and options in dia- logue. Think Quarterly, 3:9-18.",
747
- "links": null
748
- },
749
- "BIBREF2": {
750
- "ref_id": "b2",
751
- "title": "Plans and Resource-Bounded Practical Reasoning",
752
- "authors": [
753
- {
754
- "first": "M",
755
- "middle": [
756
- "E"
757
- ],
758
- "last": "Bratman",
759
- "suffix": ""
760
- },
761
- {
762
- "first": "D",
763
- "middle": [
764
- "J"
765
- ],
766
- "last": "Israel",
767
- "suffix": ""
768
- },
769
- {
770
- "first": "M",
771
- "middle": [
772
- "E"
773
- ],
774
- "last": "Pollack",
775
- "suffix": ""
776
- }
777
- ],
778
- "year": 1988,
779
- "venue": "Computational Intelligence",
780
- "volume": "4",
781
- "issue": "4",
782
- "pages": "",
783
- "other_ids": {},
784
- "num": null,
785
- "urls": [],
786
- "raw_text": "M. E. Bratman, D. J. Israel and M. E. Pollack. 1988. Plans and Resource-Bounded Practical Reason- ing. Computational Intelligence, 4(4).",
787
- "links": null
788
- },
789
- "BIBREF3": {
790
- "ref_id": "b3",
791
- "title": "A rational agent as the kernel of a cooperative spoken dialogue system: Implementing a logical theory of interaction",
792
- "authors": [
793
- {
794
- "first": "P",
795
- "middle": [],
796
- "last": "Bretier",
797
- "suffix": ""
798
- },
799
- {
800
- "first": "M",
801
- "middle": [
802
- "D"
803
- ],
804
- "last": "Sadek",
805
- "suffix": ""
806
- }
807
- ],
808
- "year": 1996,
809
- "venue": "Intelligent Agents III --Proceedings of the Third International Workshop on Agent Theories, Architectures, and Languages (ATAL-96)",
810
- "volume": "",
811
- "issue": "",
812
- "pages": "",
813
- "other_ids": {},
814
- "num": null,
815
- "urls": [],
816
- "raw_text": "P. Bretier and M. D. Sadek. 1996. A rational agent as the kernel of a cooperative spoken dialogue system: Implementing a logical theory of inter- action. In J. P. Miiller, M. J. Wooldridge, and N. R. Jennings, editors, Intelligent Agents III -- Proceedings of the Third International Workshop on Agent Theories, Architectures, and Languages (ATAL-96), Lecture Notes in Artificial Intelli- gence. Springer-Verlag, Heidelberg.",
817
- "links": null
818
- },
819
- "BIBREF4": {
820
- "ref_id": "b4",
821
- "title": "Thistle: diagram display engines and editors",
822
- "authors": [
823
- {
824
- "first": "J",
825
- "middle": [],
826
- "last": "Calder",
827
- "suffix": ""
828
- }
829
- ],
830
- "year": 1998,
831
- "venue": "",
832
- "volume": "",
833
- "issue": "",
834
- "pages": "",
835
- "other_ids": {},
836
- "num": null,
837
- "urls": [],
838
- "raw_text": "J. Calder. 1998. Thistle: diagram display en- gines and editors. Technical Report HCRC/TR- 97, HCRC, University of Edinburgh, Edinburgh.",
839
- "links": null
840
- },
841
- "BIBREF5": {
842
- "ref_id": "b5",
843
- "title": "Contributing to discourse",
844
- "authors": [
845
- {
846
- "first": "H",
847
- "middle": [
848
- "H"
849
- ],
850
- "last": "Clark",
851
- "suffix": ""
852
- },
853
- {
854
- "first": "E",
855
- "middle": [
856
- "F"
857
- ],
858
- "last": "Schaefer",
859
- "suffix": ""
860
- }
861
- ],
862
- "year": 1989,
863
- "venue": "Cognitive Science",
864
- "volume": "13",
865
- "issue": "",
866
- "pages": "259--294",
867
- "other_ids": {},
868
- "num": null,
869
- "urls": [],
870
- "raw_text": "H. H. Clark and E. F. Schaefer. 1989. Contributing to discourse. Cognitive Science, 13:259-294.",
871
- "links": null
872
- },
873
- "BIBREF6": {
874
- "ref_id": "b6",
875
- "title": "Referring as a collaborative process",
876
- "authors": [
877
- {
878
- "first": "H",
879
- "middle": [
880
- "H"
881
- ],
882
- "last": "Clark",
883
- "suffix": ""
884
- },
885
- {
886
- "first": "D",
887
- "middle": [],
888
- "last": "Wilkes-Gibbs",
889
- "suffix": ""
890
- }
891
- ],
892
- "year": 1986,
893
- "venue": "Also appears as Chapter",
894
- "volume": "22",
895
- "issue": "",
896
- "pages": "1--39",
897
- "other_ids": {},
898
- "num": null,
899
- "urls": [],
900
- "raw_text": "H. H. Clark and D. Wilkes-Gibbs. 1986. Referring as a collaborative process. Cognition, 22:1-39. Also appears as Chapter 4 in (Clark, 1992).",
901
- "links": null
902
- },
903
- "BIBREF7": {
904
- "ref_id": "b7",
905
- "title": "Arenas of Language Use",
906
- "authors": [
907
- {
908
- "first": "H",
909
- "middle": [
910
- "H"
911
- ],
912
- "last": "Clark",
913
- "suffix": ""
914
- }
915
- ],
916
- "year": 1992,
917
- "venue": "",
918
- "volume": "",
919
- "issue": "",
920
- "pages": "",
921
- "other_ids": {},
922
- "num": null,
923
- "urls": [],
924
- "raw_text": "H. H. Clark. 1992. Arenas of Language Use. Uni- versity of Chicago Press.",
925
- "links": null
926
- },
927
- "BIBREF8": {
928
- "ref_id": "b8",
929
- "title": "Rational interaction as the basis for communication",
930
- "authors": [
931
- {
932
- "first": "P",
933
- "middle": [
934
- "R"
935
- ],
936
- "last": "Cohen",
937
- "suffix": ""
938
- },
939
- {
940
- "first": "H",
941
- "middle": [
942
- "J"
943
- ],
944
- "last": "Levesque",
945
- "suffix": ""
946
- }
947
- ],
948
- "year": 1990,
949
- "venue": "",
950
- "volume": "",
951
- "issue": "",
952
- "pages": "",
953
- "other_ids": {},
954
- "num": null,
955
- "urls": [],
956
- "raw_text": "P. R. Cohen and H. J. Levesque. 1990. Rational in- teraction as the basis for communication. In P. R.",
957
- "links": null
958
- },
959
- "BIBREF9": {
960
- "ref_id": "b9",
961
- "title": "tentions in Communication",
962
- "authors": [
963
- {
964
- "first": "J",
965
- "middle": [],
966
- "last": "Cohen",
967
- "suffix": ""
968
- },
969
- {
970
- "first": "M",
971
- "middle": [
972
- "E"
973
- ],
974
- "last": "Morgan",
975
- "suffix": ""
976
- },
977
- {
978
- "first": "",
979
- "middle": [],
980
- "last": "Pollack",
981
- "suffix": ""
982
- }
983
- ],
984
- "year": null,
985
- "venue": "",
986
- "volume": "",
987
- "issue": "",
988
- "pages": "",
989
- "other_ids": {},
990
- "num": null,
991
- "urls": [],
992
- "raw_text": "Cohen, J. Morgan, and M. E. Pollack, editors, In- tentions in Communication. MIT Press.",
993
- "links": null
994
- },
995
- "BIBREF10": {
996
- "ref_id": "b10",
997
- "title": "Standards for dialogue coding in natural language processing",
998
- "authors": [],
999
- "year": 1997,
1000
- "venue": "",
1001
- "volume": "",
1002
- "issue": "",
1003
- "pages": "",
1004
- "other_ids": {},
1005
- "num": null,
1006
- "urls": [],
1007
- "raw_text": "Discourse Resource Initiative. 1997. Standards for dialogue coding in natural language processing. Report no. 167, Dagstuhl-Seminar.",
1008
- "links": null
1009
- },
1010
- "BIBREF11": {
1011
- "ref_id": "b11",
1012
- "title": "Plans for discourse",
1013
- "authors": [
1014
- {
1015
- "first": "B",
1016
- "middle": [
1017
- "J"
1018
- ],
1019
- "last": "Grosz",
1020
- "suffix": ""
1021
- },
1022
- {
1023
- "first": "C",
1024
- "middle": [
1025
- "L"
1026
- ],
1027
- "last": "Sidner",
1028
- "suffix": ""
1029
- }
1030
- ],
1031
- "year": 1990,
1032
- "venue": "Intentions in Communication",
1033
- "volume": "",
1034
- "issue": "",
1035
- "pages": "",
1036
- "other_ids": {},
1037
- "num": null,
1038
- "urls": [],
1039
- "raw_text": "B. J. Grosz and C. L. Sidner. 1990. Plans for dis- course. In P. R. Cohen, J. Morgan, and M. E. Pol- lack, editors, Intentions in Communication. MIT Press.",
1040
- "links": null
1041
- },
1042
- "BIBREF12": {
1043
- "ref_id": "b12",
1044
- "title": "An obligation-driven computational model for questions and assertions in dialogue",
1045
- "authors": [
1046
- {
1047
- "first": "J",
1048
- "middle": [],
1049
- "last": "Kreutel",
1050
- "suffix": ""
1051
- }
1052
- ],
1053
- "year": 1998,
1054
- "venue": "",
1055
- "volume": "",
1056
- "issue": "",
1057
- "pages": "",
1058
- "other_ids": {},
1059
- "num": null,
1060
- "urls": [],
1061
- "raw_text": "J. Kreutel. 1998. An obligation-driven computa- tional model for questions and assertions in dia- logue. Master's thesis, Department of Linguistics, University of Edinburgh, Edinburgh.",
1062
- "links": null
1063
- },
1064
- "BIBREF14": {
1065
- "ref_id": "b14",
1066
- "title": "Annotating conversations for information state update",
1067
- "authors": [
1068
- {
1069
- "first": "M",
1070
- "middle": [],
1071
- "last": "Poesio",
1072
- "suffix": ""
1073
- },
1074
- {
1075
- "first": "R",
1076
- "middle": [],
1077
- "last": "Cooper",
1078
- "suffix": ""
1079
- },
1080
- {
1081
- "first": "S",
1082
- "middle": [],
1083
- "last": "Larsson",
1084
- "suffix": ""
1085
- },
1086
- {
1087
- "first": "D",
1088
- "middle": [],
1089
- "last": "Traum",
1090
- "suffix": ""
1091
- },
1092
- {
1093
- "first": "C",
1094
- "middle": [],
1095
- "last": "Matheson",
1096
- "suffix": ""
1097
- }
1098
- ],
1099
- "year": 1999,
1100
- "venue": "Proceedings of Amstelogue 99",
1101
- "volume": "",
1102
- "issue": "",
1103
- "pages": "",
1104
- "other_ids": {},
1105
- "num": null,
1106
- "urls": [],
1107
- "raw_text": "M. Poesio, R. Cooper, S. Larsson, D. Traum, and C. Matheson. 1999. Annotating conversations for information state update. In Proceedings of Am- stelogue 99, 3rd Workshop on the Semantics and Pragmatics of Dialogues.",
1108
- "links": null
1109
- },
1110
- "BIBREF15": {
1111
- "ref_id": "b15",
1112
- "title": "Conversational actions and discourse situations",
1113
- "authors": [
1114
- {
1115
- "first": "M",
1116
- "middle": [],
1117
- "last": "Poesio",
1118
- "suffix": ""
1119
- },
1120
- {
1121
- "first": "D",
1122
- "middle": [
1123
- "R"
1124
- ],
1125
- "last": "Tranm",
1126
- "suffix": ""
1127
- }
1128
- ],
1129
- "year": 1997,
1130
- "venue": "Computational Intelligence",
1131
- "volume": "13",
1132
- "issue": "3",
1133
- "pages": "",
1134
- "other_ids": {},
1135
- "num": null,
1136
- "urls": [],
1137
- "raw_text": "M. Poesio and D. R. Tranm. 1997. Conversational actions and discourse situations. Computational Intelligence, 13(3).",
1138
- "links": null
1139
- },
1140
- "BIBREF16": {
1141
- "ref_id": "b16",
1142
- "title": "Towards an axiomatization of dialogue acts",
1143
- "authors": [
1144
- {
1145
- "first": "M",
1146
- "middle": [],
1147
- "last": "Poesio",
1148
- "suffix": ""
1149
- },
1150
- {
1151
- "first": "D",
1152
- "middle": [
1153
- "R"
1154
- ],
1155
- "last": "Traum",
1156
- "suffix": ""
1157
- }
1158
- ],
1159
- "year": 1998,
1160
- "venue": "Proceedings of Twendial'98, 13th Twente Workshop on Language Technology",
1161
- "volume": "",
1162
- "issue": "",
1163
- "pages": "207--222",
1164
- "other_ids": {},
1165
- "num": null,
1166
- "urls": [],
1167
- "raw_text": "M. Poesio and D. R. Traum. 1998. Towards an ax- iomatization of dialogue acts. In Proceedings of Twendial'98, 13th Twente Workshop on Language Technology, pages 207-222.",
1168
- "links": null
1169
- },
1170
- "BIBREF17": {
1171
- "ref_id": "b17",
1172
- "title": "Dialogue acts are rational plans",
1173
- "authors": [
1174
- {
1175
- "first": "M",
1176
- "middle": [
1177
- "D"
1178
- ],
1179
- "last": "Sadek",
1180
- "suffix": ""
1181
- }
1182
- ],
1183
- "year": 1991,
1184
- "venue": "Proceedings o] the ESCA/ETR workshop on multi-modal dialogue",
1185
- "volume": "",
1186
- "issue": "",
1187
- "pages": "",
1188
- "other_ids": {},
1189
- "num": null,
1190
- "urls": [],
1191
- "raw_text": "M. D. Sadek. 1991. Dialogue acts are rational plans. In Proceedings o] the ESCA/ETR workshop on multi-modal dialogue.",
1192
- "links": null
1193
- },
1194
- "BIBREF18": {
1195
- "ref_id": "b18",
1196
- "title": "Agent communication languages: Rethinking the principles",
1197
- "authors": [
1198
- {
1199
- "first": "M",
1200
- "middle": [
1201
- "P"
1202
- ],
1203
- "last": "Singh",
1204
- "suffix": ""
1205
- }
1206
- ],
1207
- "year": 1998,
1208
- "venue": "IEEE Computer",
1209
- "volume": "31",
1210
- "issue": "12",
1211
- "pages": "40--47",
1212
- "other_ids": {},
1213
- "num": null,
1214
- "urls": [],
1215
- "raw_text": "M. P. Singh. 1998. Agent communication lan- guages: Rethinking the principles. IEEE Com- puter, 31(12):40-47.",
1216
- "links": null
1217
- },
1218
- "BIBREF19": {
1219
- "ref_id": "b19",
1220
- "title": "A speech acts approach to grounding in conversation",
1221
- "authors": [
1222
- {
1223
- "first": "D",
1224
- "middle": [
1225
- "R"
1226
- ],
1227
- "last": "Traum",
1228
- "suffix": ""
1229
- },
1230
- {
1231
- "first": "J",
1232
- "middle": [
1233
- "F"
1234
- ],
1235
- "last": "Allen",
1236
- "suffix": ""
1237
- }
1238
- ],
1239
- "year": 1992,
1240
- "venue": "Proceedings 2nd International Conference on Spoken Language Processing (ICSLP-92)",
1241
- "volume": "",
1242
- "issue": "",
1243
- "pages": "137--177",
1244
- "other_ids": {},
1245
- "num": null,
1246
- "urls": [],
1247
- "raw_text": "D. R. Traum and J. F. Allen. 1992. A speech acts approach to grounding in conversation. In Pro- ceedings 2nd International Conference on Spoken Language Processing (ICSLP-92), pages 137-40, October.",
1248
- "links": null
1249
- },
1250
- "BIBREF20": {
1251
- "ref_id": "b20",
1252
- "title": "Discourse obligations in dialogue processing",
1253
- "authors": [
1254
- {
1255
- "first": "D",
1256
- "middle": [
1257
- "R"
1258
- ],
1259
- "last": "Traum",
1260
- "suffix": ""
1261
- },
1262
- {
1263
- "first": "J",
1264
- "middle": [
1265
- "F"
1266
- ],
1267
- "last": "Allen",
1268
- "suffix": ""
1269
- }
1270
- ],
1271
- "year": 1994,
1272
- "venue": "Proceedings of the 32nd Annual meeting of the Association for Computational Linguistics",
1273
- "volume": "",
1274
- "issue": "",
1275
- "pages": "1--8",
1276
- "other_ids": {},
1277
- "num": null,
1278
- "urls": [],
1279
- "raw_text": "D. R. Traum and J. F. Allen. 1994. Discourse obli- gations in dialogue processing. In Proceedings of the 32nd Annual meeting of the Association for Computational Linguistics, pages 1-8, June.",
1280
- "links": null
1281
- },
1282
- "BIBREF21": {
1283
- "ref_id": "b21",
1284
- "title": "A model of dialogue moves and information state revision",
1285
- "authors": [
1286
- {
1287
- "first": "D",
1288
- "middle": [
1289
- "R"
1290
- ],
1291
- "last": "Traum",
1292
- "suffix": ""
1293
- },
1294
- {
1295
- "first": "J",
1296
- "middle": [],
1297
- "last": "Bos",
1298
- "suffix": ""
1299
- },
1300
- {
1301
- "first": "R",
1302
- "middle": [],
1303
- "last": "Cooper",
1304
- "suffix": ""
1305
- },
1306
- {
1307
- "first": "S",
1308
- "middle": [],
1309
- "last": "Larsson",
1310
- "suffix": ""
1311
- },
1312
- {
1313
- "first": "I",
1314
- "middle": [],
1315
- "last": "Lewin",
1316
- "suffix": ""
1317
- },
1318
- {
1319
- "first": "C",
1320
- "middle": [],
1321
- "last": "Matheson",
1322
- "suffix": ""
1323
- },
1324
- {
1325
- "first": "M",
1326
- "middle": [],
1327
- "last": "Poesio",
1328
- "suffix": ""
1329
- }
1330
- ],
1331
- "year": 1999,
1332
- "venue": "",
1333
- "volume": "",
1334
- "issue": "",
1335
- "pages": "",
1336
- "other_ids": {},
1337
- "num": null,
1338
- "urls": [],
1339
- "raw_text": "D. R. Traum, J. Bos, R. Cooper, S. Larsson, I. Lewin, C. Matheson, and M. Poesio. 1999. A model of dialogue moves and information state re- vision. Technical Report Deliverable D2.1, Trindi.",
1340
- "links": null
1341
- },
1342
- "BIBREF22": {
1343
- "ref_id": "b22",
1344
- "title": "Towards a Normative Model of Grounding in Collaboration",
1345
- "authors": [
1346
- {
1347
- "first": "D",
1348
- "middle": [
1349
- "R"
1350
- ],
1351
- "last": "Traum",
1352
- "suffix": ""
1353
- },
1354
- {
1355
- "first": "P",
1356
- "middle": [],
1357
- "last": "Dillenbourg",
1358
- "suffix": ""
1359
- }
1360
- ],
1361
- "year": 1998,
1362
- "venue": "Proceedings of the ESSLLI98 workshop on Mutual Knowledge, Common Ground and Public Information",
1363
- "volume": "",
1364
- "issue": "",
1365
- "pages": "",
1366
- "other_ids": {},
1367
- "num": null,
1368
- "urls": [],
1369
- "raw_text": "D. R. Traum and P. Dillenbourg. 1998. Towards a Normative Model of Grounding in Collaboration. In Proceedings of the ESSLLI98 workshop on Mu- tual Knowledge, Common Ground and Public In- formation.",
1370
- "links": null
1371
- },
1372
- "BIBREF23": {
1373
- "ref_id": "b23",
1374
- "title": "A computational theory of grounding in natural language conversation",
1375
- "authors": [
1376
- {
1377
- "first": "D",
1378
- "middle": [
1379
- "R"
1380
- ],
1381
- "last": "Traum",
1382
- "suffix": ""
1383
- }
1384
- ],
1385
- "year": 1994,
1386
- "venue": "",
1387
- "volume": "",
1388
- "issue": "",
1389
- "pages": "",
1390
- "other_ids": {},
1391
- "num": null,
1392
- "urls": [],
1393
- "raw_text": "D. R. Traum. 1994. A computational theory of grounding in natural language conversation.",
1394
- "links": null
1395
- }
1396
- },
1397
- "ref_entries": {
1398
- "FIGREF0": {
1399
- "uris": null,
1400
- "text": "Figure 1: TrindiKit Architecture",
1401
- "num": null,
1402
- "type_str": "figure"
1403
- },
1404
- "FIGREF1": {
1405
- "uris": null,
1406
- "text": "*~,~,d~(W.bU3)/ JI lINT: <letrome(C)>] J Structure of Information States model misunderstandings arising from the dialogue participants having differing views on what has been",
1407
- "num": null,
1408
- "type_str": "figure"
1409
- },
1410
- "FIGREF2": {
1411
- "uris": null,
1412
- "text": "=uxe( doZnfoR, q. rulet~.S, [ hearer(DP), latest_moves: in(Hove}, Move:valEec(pred,inforeq) ], [ incr_set(update_cycles,_), incr_set (next.dh_id, HID), next _du_name (ID), pushRec (w'cdu'tognd'dh, record ( [atype=Move, c level=2, id=HID ])), pushRec (e'cdu~tosnd'obl, record ( [pred~address, dp=DP,",
1413
- "num": null,
1414
- "type_str": "figure"
1415
- },
1416
- "TABREF0": {
1417
- "content": "<table><tr><td/><td colspan=\"3\">r [OBL: ~lddre~(C,CA2 ) ] understandingAct( W,DU3 )\\ ]</td><td>1 /</td><td>~1 //</td></tr><tr><td colspan=\"2\">~.. /..</td><td colspan=\"2\">/CAS:C2 ,~..,~g.(C.DU2)</td><td>\\/</td><td>II</td></tr><tr><td colspan=\"3\">..... / .... \\c^:: / sc : &lt; \" [COND: &lt; &gt;</td><td/><td>'// ! J</td><td>II H H</td></tr><tr><td colspan=\"2\">UDUS: &lt;DU3&gt;</td><td/><td/><td/><td>H</td></tr><tr><td/><td>[</td><td colspan=\"2\">[OBL: &lt;,ddri~z(C.CA2 ) &gt;</td><td>]]</td><td>H</td></tr><tr><td/><td/><td>DH:</td><td colspan=\"2\">&lt;CA2: C2. into requi~t( W.?help fore1 ) :&gt;</td></tr><tr><td>/ : |</td><td>/ LID:</td><td colspan=\"2\">LCOND: &lt; &gt; DU2</td><td colspan=\"2\">J/ If J II</td></tr><tr><td>/ /</td><td>[ /</td><td>/</td><td colspan=\"2\">/CA5: C2. dil c,(C ivemule(W) ) \\</td><td>lilt //H</td></tr><tr><td>/ ko,, I /</td><td>/ / [</td><td colspan=\"4\">/DH: LCOND: &lt;IICIpt(W.CA6)-&gt; obl(W~iveroutc(W))&gt;J III (CAS: C2. a,swer( C.CA2.CA4 ) ) //11 //11 IIII</td></tr><tr><td>/</td><td>LID:</td><td>DU3</td><td/><td/><td>JII</td></tr></table>",
1418
- "text": "). Some have been subsequently edited for brevity and clarity.",
1419
- "html": null,
1420
- "num": null,
1421
- "type_str": "table"
1422
- }
1423
- }
1424
- }
1425
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Full_text_JSON/prefixA/json/A00/A00-2002.json DELETED
@@ -1,1299 +0,0 @@
1
- {
2
- "paper_id": "A00-2002",
3
- "header": {
4
- "generated_with": "S2ORC 1.0.0",
5
- "date_generated": "2023-01-19T01:12:27.859937Z"
6
- },
7
- "title": "The Automatic Translation of Discourse Structures",
8
- "authors": [
9
- {
10
- "first": "Daniel",
11
- "middle": [],
12
- "last": "Marcu",
13
- "suffix": "",
14
- "affiliation": {},
15
- "email": "[email protected]"
16
- },
17
- {
18
- "first": "Lynn",
19
- "middle": [],
20
- "last": "Carlson",
21
- "suffix": "",
22
- "affiliation": {},
23
- "email": ""
24
- },
25
- {
26
- "first": "Ft",
27
- "middle": [],
28
- "last": "Meade",
29
- "suffix": "",
30
- "affiliation": {},
31
- "email": ""
32
- },
33
- {
34
- "first": "Maki",
35
- "middle": [],
36
- "last": "Watanabe",
37
- "suffix": "",
38
- "affiliation": {},
39
- "email": "[email protected]"
40
- }
41
- ],
42
- "year": "",
43
- "venue": null,
44
- "identifiers": {},
45
- "abstract": "We empirically show that there are significant differences between the discourse structure of Japanese texts and the discourse structure of their corresponding English translations. To improve translation quality, we propose a computational model for rewriting discourse structures. When we train our model on a parallel corpus of manually built Japanese and English discourse structure trees, we learn to rewrite Japanese trees as trees that are closer to the natural English rendering than the original ones. 1 Motivation Almost all current MT systems process text one sentence at a time. Because of this limited focus, MT systems cannot regroup and reorder the clauses and sentences of an input text to achieve the most natural rendering in a target language. Yet, even between languages as close as English and French, there is a 10% mismatch in number of sentences-what is said in two sentences in one language is said in only one, or in three, in the other (Gale and Church, 1993). For distant language pairs, such as Japanese and English, the differences are more significant. Consider, for example, Japanese sentence (1), a word-byword \"gloss\" of it (2), and a two-sentence translation of it that was produced by a professional translator (3).",
46
- "pdf_parse": {
47
- "paper_id": "A00-2002",
48
- "_pdf_hash": "",
49
- "abstract": [
50
- {
51
- "text": "We empirically show that there are significant differences between the discourse structure of Japanese texts and the discourse structure of their corresponding English translations. To improve translation quality, we propose a computational model for rewriting discourse structures. When we train our model on a parallel corpus of manually built Japanese and English discourse structure trees, we learn to rewrite Japanese trees as trees that are closer to the natural English rendering than the original ones. 1 Motivation Almost all current MT systems process text one sentence at a time. Because of this limited focus, MT systems cannot regroup and reorder the clauses and sentences of an input text to achieve the most natural rendering in a target language. Yet, even between languages as close as English and French, there is a 10% mismatch in number of sentences-what is said in two sentences in one language is said in only one, or in three, in the other (Gale and Church, 1993). For distant language pairs, such as Japanese and English, the differences are more significant. Consider, for example, Japanese sentence (1), a word-byword \"gloss\" of it (2), and a two-sentence translation of it that was produced by a professional translator (3).",
52
- "cite_spans": [],
53
- "ref_spans": [],
54
- "eq_spans": [],
55
- "section": "Abstract",
56
- "sec_num": null
57
- }
58
- ],
59
- "body_text": [
60
- {
61
- "text": "(1) [ The labeled spans of text represent elementary discourse units (edus), i.e., minimal text spans that have an unambiguous discourse function (Mann and Thompson, 1988 ). If we analyze the text fragments closely, we will notice that in translating sentence (1), a professional translator chose to realize the information in Japanese unit 2 first (unit 2 in text (1) corresponds roughly to unit 1 in text (3)); to realize then some of the information in Japanese unit 1 (part of unit 1 in text (1) corresponds to unit 2 in text (3)); to fuse then information given in units 1, 3, and 5 in text (1) and realize it in English as unit 3; and so on. Also, the translator chose to repackage the information in the original Japanese sentence into two English sentences.",
62
- "cite_spans": [
63
- {
64
- "start": 4,
65
- "end": 5,
66
- "text": "[",
67
- "ref_id": null
68
- },
69
- {
70
- "start": 146,
71
- "end": 170,
72
- "text": "(Mann and Thompson, 1988",
73
- "ref_id": "BIBREF4"
74
- }
75
- ],
76
- "ref_spans": [],
77
- "eq_spans": [],
78
- "section": "",
79
- "sec_num": null
80
- },
81
- {
82
- "text": "At the elementary unit level, the correspondence between Japanese sentence (1) and its English translation (3) can be represented as in (4}, where j C e denotes the fact that the semantic content of unit j is realized fully in unit e; j D e denotes the fact that the semantic content of unit e is realized fully in unit j; j = e denotes the fact that units j and e are semantically equivalent; and j ~ e denotes the fact that there is a semantic overlap between units j and e, but neither proper inclusion nor proper equivalence.",
83
- "cite_spans": [],
84
- "ref_spans": [],
85
- "eq_spans": [],
86
- "section": "",
87
- "sec_num": null
88
- },
89
- {
90
- "text": ".!1 D e2;jt -~ e3;",
91
- "cite_spans": [],
92
- "ref_spans": [],
93
- "eq_spans": [],
94
- "section": "",
95
- "sec_num": null
96
- },
97
- {
98
- "text": ".12 ----el; 33 C e3;",
99
- "cite_spans": [],
100
- "ref_spans": [],
101
- "eq_spans": [],
102
- "section": "",
103
- "sec_num": null
104
- },
105
- {
106
- "text": ".14 ~ e4;j4 ~ es; .15 ~ e3;",
107
- "cite_spans": [],
108
- "ref_spans": [],
109
- "eq_spans": [],
110
- "section": "",
111
- "sec_num": null
112
- },
113
- {
114
- "text": ".16 C e6;",
115
- "cite_spans": [],
116
- "ref_spans": [],
117
- "eq_spans": [],
118
- "section": "",
119
- "sec_num": null
120
- },
121
- {
122
- "text": ".17 C e6 Hence. the mappings in (4) provide all explicit representation of the way information is re-ordered and re-packaged when translated from Japanese into English. However, when translating text, it is also the case that t he rhetorical rendering changes. What is realized ill Japanese using an CONTRAST relation can be realized in English using, for example, a COXl- PARISON or a CONCESSION relation.",
123
- "cite_spans": [
124
- {
125
- "start": 373,
126
- "end": 380,
127
- "text": "PARISON",
128
- "ref_id": null
129
- }
130
- ],
131
- "ref_spans": [],
132
- "eq_spans": [],
133
- "section": "",
134
- "sec_num": null
135
- },
136
- {
137
- "text": "Figure I presents in the style of Mann and Thompson (1988) the discourse structures of text fragments (1) and (3), Each discourse structure is a tree whose leaves correspond to the edus and whose internal nodes correspond to contiguous text spans. Each node is characterized by a status (NUCLEUS or SATELLITE) and a rhetorical relation, which is a relation that holds between two non-overlapping text spans. The distinction between nuclei and satellites comes from the empirical observation that the nucleus expresses what is more essential to the writer's intention than the satellite: and that the nucleus of a rhetorical relation is comprehensible independent of tile satellite, but not vice versa. When spans are equally important, the relation is nmltinuclear: for example, the CONTRAST relation that holds between unit [3] and span [4.5] in the rhetorical structure of the English text in figure 1 is nmhinuclear. Rhetorical relations that end in the suffix \"'-e'\" denote relations that correspond to embedded syntactic con-stituents. For example, the ELABORATION-OBJECT-ATTRIBUTE-E relation that holds between units 2 and 1 in the English discourse structure corresponds to a restrictive relative.",
138
- "cite_spans": [
139
- {
140
- "start": 838,
141
- "end": 843,
142
- "text": "[4.5]",
143
- "ref_id": null
144
- }
145
- ],
146
- "ref_spans": [],
147
- "eq_spans": [],
148
- "section": "",
149
- "sec_num": null
150
- },
151
- {
152
- "text": "If one knows the mappings at the edu level, one can determine the mappings at the span (discourse constituent) level as well. For example, using the elementary mappings in (4), one call determine that Japanese span [1,2] corresponds to English span [I,2] , Japanese unit [4] to English span [4, 5] , Japanese span [6.7] to English unit [6], Japanese span [1.5] to English span [1.5], and so on. As Figure 1 shows, the CONCESSION relation that holds between spans [1, 5] and [6, 7] in the Japanese tree corresponds to a similar relation that. holds between span [1,5] and unit [6] in the English tree (modulo the fact that, in Japanese, the relation holds between sent ence fragments, while in English it holds between full sentences). However, the TEMPORAL-AFTER relation that holds between units [:3] and [4] ill the Japanese tree is realized as a CONTRAST relation between unit [3] and span [4.5] in MT systems at the syntactic level. For example, the re-ordering of units 1 and 2, can be dealt with using only syntactic models. However, as we will see in Section 2, there are significant differences between Japanese and English with respect to the way information is packaged and organized rhetorically not only at the sentence level, but also, at the paragraph and text levels. More specifically, as humans translate Japanese into English, they re-order the clauses, sentences, and paragraphs of Japanese texts, they re-package the information into clauses, sentences, and paragraphs that are not a one-to-one mapping of the original Japanese units, and they rhetorically re-organize the structure of the translated text so as to reflect rhetorical constraints specific to English. If a translation system is to produce text that is not only grammatical but also coherent, it will have to ensure that the discourse structure of the target text reflects the natural renderings of the target language, and not that of the source language. In Section 2, we empirically show that there are significant differences between the rhetorical structure of Japanese texts and their corresponding English translations. These differences justify our investigation into developing computational models for discourse structure rewriting. In Section 3, we present such a rewriting model, which re-orders the edus of the original text, determines English-specific clause, sentence, and paragraph boundaries, and rebuilds the Japanese discourse structure of a text using English-specific rhetorical renderings. In Section 4, we evaluate the performance of an implementation of this model. We end with a discussion.",
153
- "cite_spans": [
154
- {
155
- "start": 249,
156
- "end": 254,
157
- "text": "[I,2]",
158
- "ref_id": null
159
- },
160
- {
161
- "start": 291,
162
- "end": 294,
163
- "text": "[4,",
164
- "ref_id": null
165
- },
166
- {
167
- "start": 295,
168
- "end": 297,
169
- "text": "5]",
170
- "ref_id": null
171
- },
172
- {
173
- "start": 463,
174
- "end": 466,
175
- "text": "[1,",
176
- "ref_id": null
177
- },
178
- {
179
- "start": 467,
180
- "end": 469,
181
- "text": "5]",
182
- "ref_id": null
183
- },
184
- {
185
- "start": 474,
186
- "end": 477,
187
- "text": "[6,",
188
- "ref_id": null
189
- },
190
- {
191
- "start": 478,
192
- "end": 480,
193
- "text": "7]",
194
- "ref_id": null
195
- },
196
- {
197
- "start": 576,
198
- "end": 579,
199
- "text": "[6]",
200
- "ref_id": null
201
- },
202
- {
203
- "start": 893,
204
- "end": 898,
205
- "text": "[4.5]",
206
- "ref_id": null
207
- }
208
- ],
209
- "ref_spans": [
210
- {
211
- "start": 395,
212
- "end": 406,
213
- "text": "As Figure 1",
214
- "ref_id": "FIGREF1"
215
- }
216
- ],
217
- "eq_spans": [],
218
- "section": "",
219
- "sec_num": null
220
- },
221
- {
222
- "text": "In order to assess the role of discourse structure in MT, we built manually a corpus of discourse trees for 40 Japanese texts and their corresponding translations. The texts were selected randomly from the ARPA corpus (White and O'Connell, 1994) . On average, each text had about 460 words. The Japanese texts had a total of 335 paragraphs and 773 sentences. The English texts had a total of 337 paragraphs and 827 sentences.",
223
- "cite_spans": [
224
- {
225
- "start": 218,
226
- "end": 245,
227
- "text": "(White and O'Connell, 1994)",
228
- "ref_id": "BIBREF12"
229
- }
230
- ],
231
- "ref_spans": [],
232
- "eq_spans": [],
233
- "section": "Experiment",
234
- "sec_num": "2"
235
- },
236
- {
237
- "text": "We developed a discourse annotation protocol for Japanese and English along the lines followed by . We used Marcu's discourse annotation tool (1999) in order to manually construct the discourse structure of all Japanese and English texts in the corpus. 10% of the Japanese and English texts were rhetorically labeled by two of us.",
238
- "cite_spans": [],
239
- "ref_spans": [],
240
- "eq_spans": [],
241
- "section": "Experiment",
242
- "sec_num": "2"
243
- },
244
- {
245
- "text": "The tool and the annotation protocol are available at http://www.isi.edu/~marcu/software/. The annotation procedure yielded over the entire corpus 2641 Japanese edus and 2363 English edus.",
246
- "cite_spans": [],
247
- "ref_spans": [],
248
- "eq_spans": [],
249
- "section": "Experiment",
250
- "sec_num": "2"
251
- },
252
- {
253
- "text": "We computed the reliability of the annotation using 's method for computing kappa statistics (Siegel and Castellan, 1988) over hierarchical structures. Table 1 displays average kappa statistics that reflect the reliability of the annotation of elementary discourse units, k~,, hierarchical discourse spans, ks, hierarchical nuclearity assignments, k,~, and hierarchical rhetorical relation assignments, k~. Kappa figures higher than 0.8 correspond to good agreement; kappa figures higher than 0.6 correspond to acceptable agreement. All kappa statistics were statistically significant at levels higher than a = 0.01. In addition to the kappa statistics, table 1 also displays in parentheses the average number of data points per document, over which the kappa statistics were computed.",
254
- "cite_spans": [
255
- {
256
- "start": 93,
257
- "end": 121,
258
- "text": "(Siegel and Castellan, 1988)",
259
- "ref_id": "BIBREF9"
260
- }
261
- ],
262
- "ref_spans": [
263
- {
264
- "start": 152,
265
- "end": 159,
266
- "text": "Table 1",
267
- "ref_id": null
268
- }
269
- ],
270
- "eq_spans": [],
271
- "section": "Experiment",
272
- "sec_num": "2"
273
- },
274
- {
275
- "text": "For each pair of Japanese-English discourse structures, we also built manually an alignment file, which specified in the notation discussed on page 1 the correspondence between the edus of the Japanese text and the edus of its English translation.",
276
- "cite_spans": [],
277
- "ref_spans": [],
278
- "eq_spans": [],
279
- "section": "Experiment",
280
- "sec_num": "2"
281
- },
282
- {
283
- "text": "We computed the similarity between English and Japanese discourse trees using labeled recall and precision figures that reflected the resemblance of tile Japanese and English discourse structures with respect to their assignment of edu boundaries, hierarchical spans, nuclearity, and rhetorical relations.",
284
- "cite_spans": [],
285
- "ref_spans": [],
286
- "eq_spans": [],
287
- "section": "Experiment",
288
- "sec_num": "2"
289
- },
290
- {
291
- "text": "Because the trees we compared differ from one language to the other in the number of elementary units, the order of these units, and the way the units are grouped recursively into discourse spans, we computed two types of recall and precision figures. In computing Position-Dependent (P-D) recall and precision figures, a Japanese span was considered to match an English span when the Japanese span contained all the Japanese edus that corresponded to tile edus in the English span, and when the Japanese and English spans appeared in tile same position with respect to the overall structure. For example, the English tree in figure 1 is characterized by 10 subsentential spans: [3, 5] , and [1, 5] . (Span [1,6] subsumes 2 sentences, so it is not sub-sentential.) The Japanese discourse tree has only 4 spans that could be matched in the same positions with English spans, namely spans [1,2], [4], [5] , and [1, 5] . Hence the similarity between the Japanese tree and the English tree with respect Level Units Spans Status/Nuclearity Relations P-D P P-D R P-D P P-DR P-DP P-DR P-I) P P-DR Table 2 : Similarity of the Japanese to their discourse structure below the sentence level has a recall of 4/10 and a precision of 4/11 (in Figure 1, there are 11 sub-sentential Japanese spans).",
292
- "cite_spans": [
293
- {
294
- "start": 679,
295
- "end": 682,
296
- "text": "[3,",
297
- "ref_id": null
298
- },
299
- {
300
- "start": 683,
301
- "end": 685,
302
- "text": "5]",
303
- "ref_id": null
304
- },
305
- {
306
- "start": 692,
307
- "end": 695,
308
- "text": "[1,",
309
- "ref_id": null
310
- },
311
- {
312
- "start": 696,
313
- "end": 698,
314
- "text": "5]",
315
- "ref_id": null
316
- },
317
- {
318
- "start": 899,
319
- "end": 902,
320
- "text": "[5]",
321
- "ref_id": null
322
- },
323
- {
324
- "start": 909,
325
- "end": 912,
326
- "text": "[1,",
327
- "ref_id": null
328
- },
329
- {
330
- "start": 913,
331
- "end": 915,
332
- "text": "5]",
333
- "ref_id": null
334
- }
335
- ],
336
- "ref_spans": [
337
- {
338
- "start": 1090,
339
- "end": 1097,
340
- "text": "Table 2",
341
- "ref_id": null
342
- },
343
- {
344
- "start": 1230,
345
- "end": 1236,
346
- "text": "Figure",
347
- "ref_id": null
348
- }
349
- ],
350
- "eq_spans": [],
351
- "section": "Experiment",
352
- "sec_num": "2"
353
- },
354
- {
355
- "text": "[1], [2], [3], [4], [5], [6], [1,2], [4,5],",
356
- "cite_spans": [],
357
- "ref_spans": [],
358
- "eq_spans": [],
359
- "section": "Experiment",
360
- "sec_num": "2"
361
- },
362
- {
363
- "text": "In computing Position-Independent (P-I) recall and precision figures, even when a Japanese span \"floated\" during the translation to a position in the English tree that was different from the position in the initial tree, the P-I recall and precision figures were not affected. The Position-Independent figures reflect the intuition that if two trees tl and t2 both have a subtree t, tl and t2 are more similar than if they were if they didn't share any tree. At the sentence level, we hence assume that if, for example, the syntactic structure of a relative clause is translated appropriately (even though it is not appropriately attached), this is better than translating wrongly that clause. The Position-Independent figures offer a more optimistic metric for comparing discourse trees. They span a wider range of values than the Position-Dependent figures, which enable a finer grained comparison, which in turn enables a better characterization of the differences between Japanese and English discourse structures. When one takes an optimistic stance, for the spans at the sub-sentential level in the trees in Table 1 In order to provide a better estimate of how close two discourse trees were, we computed Position-Dependent and -Independent recall and precision figures for the sentential level (where units are given by edus and spans are given by sets of edus or single sentences); paragraph level (where units are given by sentences and spans are given by sets of sentences or single paragraphs); and text level (where units are given by paragraphs and spans are given by sets of paragraphs). These figures offer a detailed picture of how discourse structures and relations are mapped from one language to the other across all and English discourse structures discourse levels, from sentence to text. The differences at the sentence level can be explained by differences between the syntactic structures of Japanese and English. The differences at the paragraph and text levels have a purely rhetorical explanation.",
364
- "cite_spans": [],
365
- "ref_spans": [
366
- {
367
- "start": 1114,
368
- "end": 1121,
369
- "text": "Table 1",
370
- "ref_id": null
371
- }
372
- ],
373
- "eq_spans": [],
374
- "section": "Experiment",
375
- "sec_num": "2"
376
- },
377
- {
378
- "text": "As expected, when we computed the recall and precision figures with respect to the nuclearity and relation assignments, we also factored in the statuses and the rhetorical relations that labeled each pair of spans. Table 2 smnmarizes the results (P-D and P-I (R)ecall and (P)recision figures) for each level (Sentence, Paragraph, and Text). The numbers in the \"Weighted Average\" line report averages of the Sentence-, Paragraph-, and Text-specific figures, weighted according to the number of units at each level. The numbers in the \"All\" line reflect recall and precision figures computed across the entire trees, with no attention paid to sentence and paragraph boundaries.",
379
- "cite_spans": [],
380
- "ref_spans": [
381
- {
382
- "start": 215,
383
- "end": 222,
384
- "text": "Table 2",
385
- "ref_id": null
386
- }
387
- ],
388
- "eq_spans": [],
389
- "section": "Experiment",
390
- "sec_num": "2"
391
- },
392
- {
393
- "text": "Given the significantly different syntactic structures of Japanese and English, we were not surprised by the low recall and precision results that reflect the similarity between discourse trees built below the sentence level. However, as Table 2 shows, there are significant differences between discourse trees at the paragraph and text levels as well. For exampie, the Position-Independent figures show that only about 62% of the sentences and only about 53% of the hierarchical spans built across sentences could be matched between the two corpora. When one looks at the status and rhetorical relations associated with the spans built across sentences at the paragraph level, the P-I recall and precision figures drop to about 43% and 35% respectively.",
394
- "cite_spans": [],
395
- "ref_spans": [
396
- {
397
- "start": 238,
398
- "end": 245,
399
- "text": "Table 2",
400
- "ref_id": null
401
- }
402
- ],
403
- "eq_spans": [],
404
- "section": "Experiment",
405
- "sec_num": "2"
406
- },
407
- {
408
- "text": "The differences in recall and precision are explained both by differences in the way information is packaged into paragraphs in the two languages and the way it is structured rhetorically both within and above the paragraph level.",
409
- "cite_spans": [],
410
- "ref_spans": [],
411
- "eq_spans": [],
412
- "section": "Experiment",
413
- "sec_num": "2"
414
- },
415
- {
416
- "text": "These results strongly suggest that if one attempts to translate Japanese into English on a sentence-bysentence basis, it is likely that the resulting text will be unnatural from a discourse perspective. For example, if some information rendered using a CON-TRAST relation in Japanese is rendered using an ELABORATION relation in English, it would be inappropriate to use a discourse marker like \"but\" in the English translation, although that would be consistent with the Japanese discourse structure. An inspection of the rhetorical mappings between Japanese and English revealed that some Japanese rhetorical renderings are consistently mapped into one or a few preferred renderings in English. For example, 34 of 115 CONTRAST relations in the Japanese texts are mapped into CONTRAST relations in English; 27 become nuclei of relations such as ANTITHE-SIS and CONCESSION, 14 are translated as COMPAR-ISON relations, 6 as satellites of CONCESSION relations, 5 as LIST relations, etc. Our goal is to learn these systematic discourse mapping rules and exploit them in a machine translation context.",
417
- "cite_spans": [],
418
- "ref_spans": [],
419
- "eq_spans": [],
420
- "section": "Experiment",
421
- "sec_num": "2"
422
- },
423
- {
424
- "text": "Towards a discourse-based machine translation system",
425
- "cite_spans": [],
426
- "ref_spans": [],
427
- "eq_spans": [],
428
- "section": "3",
429
- "sec_num": null
430
- },
431
- {
432
- "text": "We are currently working towards building the modules of a Discourse-Based Machine Translation system that works along the following lines.",
433
- "cite_spans": [],
434
- "ref_spans": [],
435
- "eq_spans": [],
436
- "section": "Overall architecture",
437
- "sec_num": "3.1"
438
- },
439
- {
440
- "text": "1. A discourse parser, such as those described by Sumita et al. (1992) , Kurohashi (1994) , and MarcH (1999), initially derives the discourse structure of the text given as input.",
441
- "cite_spans": [
442
- {
443
- "start": 50,
444
- "end": 70,
445
- "text": "Sumita et al. (1992)",
446
- "ref_id": "BIBREF11"
447
- },
448
- {
449
- "start": 73,
450
- "end": 89,
451
- "text": "Kurohashi (1994)",
452
- "ref_id": "BIBREF2"
453
- }
454
- ],
455
- "ref_spans": [],
456
- "eq_spans": [],
457
- "section": "Overall architecture",
458
- "sec_num": "3.1"
459
- },
460
- {
461
- "text": "A discourse-structure transfer module rewrites the discourse structure of the input text so as to reflect a discourse rendering that is natural to the target language.",
462
- "cite_spans": [],
463
- "ref_spans": [],
464
- "eq_spans": [],
465
- "section": "2.",
466
- "sec_num": null
467
- },
468
- {
469
- "text": "into the target language using translation and language models that incorporate discoursespecific features, which are extracted from the outputs of the discourse parser and discourse transfer modules.",
470
- "cite_spans": [],
471
- "ref_spans": [],
472
- "eq_spans": [],
473
- "section": "A statistical module maps the input text",
474
- "sec_num": "3."
475
- },
476
- {
477
- "text": "In this paper, we focus only on the discoursestructure transfer module. That is, we investigate the feasibility of building such a module.",
478
- "cite_spans": [],
479
- "ref_spans": [],
480
- "eq_spans": [],
481
- "section": "A statistical module maps the input text",
482
- "sec_num": "3."
483
- },
484
- {
485
- "text": "In order to learn to rewrite discourse structure trees, we first address a related problem, which we define below:",
486
- "cite_spans": [],
487
- "ref_spans": [],
488
- "eq_spans": [],
489
- "section": "The discourse-based transfer model",
490
- "sec_num": "3.2"
491
- },
492
- {
493
- "text": "Definition 3.1 Given two trees Ts and Tt and a correspondence Table C defined between Ts and Tt at the leaf level in terms of-----, C, D, and ~ relations, find a sequence of actions that rewrites the tree T~ into Tt.",
494
- "cite_spans": [],
495
- "ref_spans": [
496
- {
497
- "start": 62,
498
- "end": 69,
499
- "text": "Table C",
500
- "ref_id": null
501
- }
502
- ],
503
- "eq_spans": [],
504
- "section": "The discourse-based transfer model",
505
- "sec_num": "3.2"
506
- },
507
- {
508
- "text": "If for any tuple (Ts, Tt, C> such a sequence of actions can be derived, it is then possible to use a corpus of (Ts, Tt, C) tuples in order to automatically learn to derive from an unseen tree Ts,, which has the same structural properties as the trees Ts, a tree Ttj, which has structural properties similar to those of the trees Tt. In order to solve the problem in definition 3.1, we extend the shift-reduce parsing paradigm applied by Magerman (1995) , Hermjakob and Mooney (1997), and MarcH (1999) . In this extended paradigm, the transfer process starts with an empty Stack and an Input List that contains a sequence of elementary discourse trees edts, one edt for each edu in the tree Ts given as input. The status and rhetorical relation associated with each edt is undefined. At each step, the transfer module applies an operation that is aimed at building from the units in T, the discourse tree Tt. In the context of our discourse-transfer module, we need 7 types of operations:",
509
- "cite_spans": [
510
- {
511
- "start": 437,
512
- "end": 452,
513
- "text": "Magerman (1995)",
514
- "ref_id": "BIBREF3"
515
- },
516
- {
517
- "start": 455,
518
- "end": 468,
519
- "text": "Hermjakob and",
520
- "ref_id": "BIBREF1"
521
- },
522
- {
523
- "start": 469,
524
- "end": 487,
525
- "text": "Mooney (1997), and",
526
- "ref_id": "BIBREF1"
527
- },
528
- {
529
- "start": 488,
530
- "end": 500,
531
- "text": "MarcH (1999)",
532
- "ref_id": null
533
- }
534
- ],
535
- "ref_spans": [],
536
- "eq_spans": [],
537
- "section": "The discourse-based transfer model",
538
- "sec_num": "3.2"
539
- },
540
- {
541
- "text": "\u2022 SHIFT operations transfer the first edt from the input list into the stack;",
542
- "cite_spans": [],
543
- "ref_spans": [],
544
- "eq_spans": [],
545
- "section": "The discourse-based transfer model",
546
- "sec_num": "3.2"
547
- },
548
- {
549
- "text": "\u2022 REDUCE operations pop the two discourse trees located at the top of the stack; combine them into a new tree updating the statuses and rhetorical relation names of the trees involved in the operation; and push the new tree on the top of the stack. These operations are used to build the structure of the discourse tree in the target language.",
550
- "cite_spans": [],
551
- "ref_spans": [],
552
- "eq_spans": [],
553
- "section": "The discourse-based transfer model",
554
- "sec_num": "3.2"
555
- },
556
- {
557
- "text": "\u2022 BREAK operations are used in order to break the edt at the beginning of the input list into a predetermined number of units. These operations are used to ensure that the resulting tree has the same number of edts as Tt.",
558
- "cite_spans": [],
559
- "ref_spans": [],
560
- "eq_spans": [],
561
- "section": "The discourse-based transfer model",
562
- "sec_num": "3.2"
563
- },
564
- {
565
- "text": "A BREAK operation is necessary whenever a Japanese edu is mapped into nmltiple English units.",
566
- "cite_spans": [],
567
- "ref_spans": [],
568
- "eq_spans": [],
569
- "section": "The discourse-based transfer model",
570
- "sec_num": "3.2"
571
- },
572
- {
573
- "text": "\u2022 CREATE-NEXT operations are used in order to create English discourse constituents that have no correspondent in the Japanese tree.",
574
- "cite_spans": [],
575
- "ref_spans": [],
576
- "eq_spans": [],
577
- "section": "The discourse-based transfer model",
578
- "sec_num": "3.2"
579
- },
580
- {
581
- "text": "\u2022 FUSE operations are used in order to fuse the edt at the top of the stack into the tree that immediately precedes it. These operations are used whenever multiple Japanese edus are mapped into one English edu.",
582
- "cite_spans": [],
583
- "ref_spans": [],
584
- "eq_spans": [],
585
- "section": "The discourse-based transfer model",
586
- "sec_num": "3.2"
587
- },
588
- {
589
- "text": "\u2022 SWAP operations swap the edt at the beginning of the input list with an edt found one or more positions to the right. These operations are necessary for re-ordering discourse constituents.",
590
- "cite_spans": [],
591
- "ref_spans": [],
592
- "eq_spans": [],
593
- "section": "The discourse-based transfer model",
594
- "sec_num": "3.2"
595
- },
596
- {
597
- "text": "\u2022 ASSIGNTYPE operations assign one or more of the following types to the tree at the top of the stack: Unit, MultiUnit, Sentence, Paragraph, MultiParagraph, and Text. These op-erations are necessary in order to ensure sentence and paragraph boundaries that are specific to the target language.",
598
- "cite_spans": [],
599
- "ref_spans": [],
600
- "eq_spans": [],
601
- "section": "The discourse-based transfer model",
602
- "sec_num": "3.2"
603
- },
604
- {
605
- "text": "For example, the first sentence of the English tree in Figure 1 can be obtained from the original Japanese sequence by following the sequence of actions (5), whose effects are shown in Figure 2 . For the purpose of compactness, the figure does not illustrate the effect of ASSIGNTYPE actions. For the same purpose, some lines correspond to more than one action, For our corpus, in order to enable a discoursebased transfer module to derive any English discourse tree starting from any Japanese discourse tree, it is sufficient to implement: * one SHIFT operation;",
606
- "cite_spans": [],
607
- "ref_spans": [
608
- {
609
- "start": 55,
610
- "end": 63,
611
- "text": "Figure 1",
612
- "ref_id": "FIGREF1"
613
- },
614
- {
615
- "start": 185,
616
- "end": 193,
617
- "text": "Figure 2",
618
- "ref_id": "FIGREF3"
619
- }
620
- ],
621
- "eq_spans": [],
622
- "section": "The discourse-based transfer model",
623
- "sec_num": "3.2"
624
- },
625
- {
626
- "text": "\u2022 3 x 2 \u00d7 85 REDUCE operations; (For each of the three possible pairs of nuclearity assignments NUCLEUS-SATELLITE (NS), SATELLITE-NUCLEUS (SN), AND NUCLEUS-NUCLEUS (NN), there are two possible ways to reduce two adjacent trees (one results in a binary tree, the other in a non-binary tree (Marcu, 1999) ), and 85 relation names.)",
627
- "cite_spans": [
628
- {
629
- "start": 289,
630
- "end": 302,
631
- "text": "(Marcu, 1999)",
632
- "ref_id": "BIBREF5"
633
- }
634
- ],
635
- "ref_spans": [],
636
- "eq_spans": [],
637
- "section": "The discourse-based transfer model",
638
- "sec_num": "3.2"
639
- },
640
- {
641
- "text": "\u2022 three types of BREAK operations; (In our corpus, a Japanese unit is broken into two, three, or at most four units.)",
642
- "cite_spans": [],
643
- "ref_spans": [],
644
- "eq_spans": [],
645
- "section": "The discourse-based transfer model",
646
- "sec_num": "3.2"
647
- },
648
- {
649
- "text": "\u2022 one type of CREATE-NEXT operation;",
650
- "cite_spans": [],
651
- "ref_spans": [],
652
- "eq_spans": [],
653
- "section": "The discourse-based transfer model",
654
- "sec_num": "3.2"
655
- },
656
- {
657
- "text": "\u2022 one type of FUSE operation;",
658
- "cite_spans": [],
659
- "ref_spans": [],
660
- "eq_spans": [],
661
- "section": "The discourse-based transfer model",
662
- "sec_num": "3.2"
663
- },
664
- {
665
- "text": "\u2022 eleven types of SWAP operations; (In our corpus, Japanese units are at most l l positions away from their location in an Englishspecific rendering.)",
666
- "cite_spans": [],
667
- "ref_spans": [],
668
- "eq_spans": [],
669
- "section": "The discourse-based transfer model",
670
- "sec_num": "3.2"
671
- },
672
- {
673
- "text": "\u2022 seven types of ASSIGN~]~YPE operations: Unit, MultiUnit, Sentence, MultiSentence, Paragraph, MultiParagraph, and Text.",
674
- "cite_spans": [],
675
- "ref_spans": [],
676
- "eq_spans": [],
677
- "section": "The discourse-based transfer model",
678
- "sec_num": "3.2"
679
- },
680
- {
681
- "text": "These actions are sufficient for rewriting any tree Ts into any tree Tt, where Tt may have a different number of edus, where the edus of Tt may have a different ordering than the edus of Ts, and where the hierarchical structures of the two trees may be different as well.",
682
- "cite_spans": [],
683
- "ref_spans": [],
684
- "eq_spans": [],
685
- "section": "The discourse-based transfer model",
686
- "sec_num": "3.2"
687
- },
688
- {
689
- "text": "3.3 Learning the parameters of the discourse-transfer model We associate with each configuration of our transfer model a learning case. The cases were generated by a program that automatically derived the sequence of actions that mapped the Japanese trees in our corpus into the sibling English trees, using the correspondences at the elementary unit level that were constructed manually. Overall, the 40 pairs of Japanese and English discourse trees yielded 14108 cases.",
690
- "cite_spans": [],
691
- "ref_spans": [],
692
- "eq_spans": [],
693
- "section": "The discourse-based transfer model",
694
- "sec_num": "3.2"
695
- },
696
- {
697
- "text": "To each learning example, we associated a set of features from the following classes:",
698
- "cite_spans": [],
699
- "ref_spans": [],
700
- "eq_spans": [],
701
- "section": "The discourse-based transfer model",
702
- "sec_num": "3.2"
703
- },
704
- {
705
- "text": "Operational and discourse features reflect the number of trees in the stack, the input list, and the types of the last five operations. They encode information pertaining to the types of the partial trees built up to a certain time and the rhetorical relations that hold between these trees.",
706
- "cite_spans": [],
707
- "ref_spans": [],
708
- "eq_spans": [],
709
- "section": "The discourse-based transfer model",
710
- "sec_num": "3.2"
711
- },
712
- {
713
- "text": "Correspondence-based features reflect the nuclearity, rhetorical relations, and types of the Japanese trees that correspond to the English-like partial trees derived up to a given time.",
714
- "cite_spans": [],
715
- "ref_spans": [],
716
- "eq_spans": [],
717
- "section": "The discourse-based transfer model",
718
- "sec_num": "3.2"
719
- },
720
- {
721
- "text": "Lexicalfeatures specify whether the Japanese spans that correspond to the structures derived up to a given time use potential discourse markers, such as dakara (because) and no ni (although).",
722
- "cite_spans": [],
723
- "ref_spans": [],
724
- "eq_spans": [],
725
- "section": "The discourse-based transfer model",
726
- "sec_num": "3.2"
727
- },
728
- {
729
- "text": "The discourse transfer module uses the C4.5 program (Quinlan, 1993) in order to learn decision trees and rules that specify how Japanese discourse trees should be mapped into English-like trees. A ten-fold cross-validation evaluation of the classifier yielded an accuracy of 70.2% (+ 0.21).",
730
- "cite_spans": [
731
- {
732
- "start": 52,
733
- "end": 67,
734
- "text": "(Quinlan, 1993)",
735
- "ref_id": "BIBREF8"
736
- }
737
- ],
738
- "ref_spans": [],
739
- "eq_spans": [],
740
- "section": "The discourse-based transfer model",
741
- "sec_num": "3.2"
742
- },
743
- {
744
- "text": "In order to better understand the strengths and weaknesses of the classifier, we also attempted to break the problem into smaller components. Hence, instead of learning all actions at once, we attempted to learn first whether the rewriting procedure should choose a SHIFT, REDUCE, BREAK, FUSE, SWAP, or ASSIGNTYPE operation (the \"Main Action Type\" classifier in table 3), and only then to refine this decision by determining what type of reduce operation to perform, how many units to break a Japanese units into, how big the distance to the SWAP-ed unit should be, and what type of ASSIGNTYPE operation one should perform. Table 3 shows the sizes of each STACK 2 2 1\"",
745
- "cite_spans": [],
746
- "ref_spans": [
747
- {
748
- "start": 624,
749
- "end": 670,
750
- "text": "Table 3 shows the sizes of each STACK 2 2",
751
- "ref_id": "TABREF5"
752
- }
753
- ],
754
- "eq_spans": [],
755
- "section": "The discourse-based transfer model",
756
- "sec_num": "3.2"
757
- },
758
- {
759
- "text": "[~A BORATION_(IB~TI- data set and the performance of each of these classitiers, as determined using a ten-fold cross-validation procedure. For the purpose of comparison, each classifier is paired with a majority baseline.",
760
- "cite_spans": [],
761
- "ref_spans": [],
762
- "eq_spans": [],
763
- "section": "The discourse-based transfer model",
764
- "sec_num": "3.2"
765
- },
766
- {
767
- "text": "The results in Table 3 show that the most difficult subtasks to learn are that of determining the number of units a Japanese unit should be broken into and that of determining the distance to the unit that is to be swapped. The features we used are not able to refine the baseline classifiers for these action types. The confusion matrix for the \"Main Action Type\" classifier (see Table 5 ) shows that the system has trouble mostly identifying BREAK and CREATE-NEXT actions. The system has difficulty learning what type of nuclearity ordering to prefer (the \"Nuclearity-Reduce\" classifier) and what re-lation to choose for the English-like structure (the \"Relation-Reduce\" classifier). Figure 3 shows a typical learning curve, the one that corresponds to the \"Reduce Relation\" classifier. Our learning curves suggest that more training data may improve performance. However, they also suggest that better features may be needed in order to improve performance significantly. Table 4 displays some learned rules. The first rule accounts for rhetorical mappings in which the order of the nucleus and satellite of an ATTRIBUTION relation is changed when translated from Japanese into English. The second rule was learned in order to map EXAMPLE Japanese satellites into EVIDENCE English satellites. ",
768
- "cite_spans": [],
769
- "ref_spans": [
770
- {
771
- "start": 15,
772
- "end": 22,
773
- "text": "Table 3",
774
- "ref_id": "TABREF5"
775
- },
776
- {
777
- "start": 381,
778
- "end": 388,
779
- "text": "Table 5",
780
- "ref_id": "TABREF8"
781
- },
782
- {
783
- "start": 686,
784
- "end": 694,
785
- "text": "Figure 3",
786
- "ref_id": "FIGREF4"
787
- },
788
- {
789
- "start": 975,
790
- "end": 982,
791
- "text": "Table 4",
792
- "ref_id": "TABREF6"
793
- }
794
- ],
795
- "eq_spans": [],
796
- "section": "The discourse-based transfer model",
797
- "sec_num": "3.2"
798
- },
799
- {
800
- "text": "By applying the General classifier or the other six classifiers successively, one can map any Japanese discourse tree into a tree whose structure comes closer to the natural rendering of English. To evaluate the discourse-based transfer module, we carried out a ten-fold cross-validation experiment. That is, we trained the classifiers on 36 pairs of manually built and aligned discourse structures, and we then used the learned classifiers in order to map 4 unseen Japanese discourse trees into English-like trees. We measured the similarity of the derived trees with the English trees built manually, using the metrics discussed in Section 2. We repeated the procedure ten times, each time training and testing on different subsets of tree pairs. We take the results reported in Table 2 as a baseline for our model. The baseline corresponds to applying no knowledge of discourse. Table 6 displays the absolute improvement (in percentage points) in recall and precision figures obtained when the General classifier was used to map Japanese trees into English-looking trees. The General classifier yielded the best results. The results in Table 6 are averaged over a ten-fold cross-validation experiment.",
801
- "cite_spans": [],
802
- "ref_spans": [
803
- {
804
- "start": 781,
805
- "end": 788,
806
- "text": "Table 2",
807
- "ref_id": null
808
- },
809
- {
810
- "start": 882,
811
- "end": 889,
812
- "text": "Table 6",
813
- "ref_id": null
814
- },
815
- {
816
- "start": 1139,
817
- "end": 1146,
818
- "text": "Table 6",
819
- "ref_id": null
820
- }
821
- ],
822
- "eq_spans": [],
823
- "section": "Evaluation of the discourse-based transfer module",
824
- "sec_num": "4"
825
- },
826
- {
827
- "text": "The results in Table 6 show that our model outperforms the baseline with respect to building English-like discourse structures for sentences, but it under-performs the baseline with respect to building English-like structures at the paragraph and text levels. The main shortcoming of our model seems to come from its low performance in assigning paragraph boundaries. Because our classifier does not learn correctly which spans to consider paragraphs and which spans not, the recall and precision results at the paragraph and text levels are negatively affected. The poorer results at the paragraph and text levels can be also explained by errors whose effect cumulates during the step-by-step tree-reconstruction procedure; and by the fact that, for these levels, there is less data to learn from.",
828
- "cite_spans": [],
829
- "ref_spans": [
830
- {
831
- "start": 15,
832
- "end": 22,
833
- "text": "Table 6",
834
- "ref_id": null
835
- }
836
- ],
837
- "eq_spans": [],
838
- "section": "Evaluation of the discourse-based transfer module",
839
- "sec_num": "4"
840
- },
841
- {
842
- "text": "However, if one ignores the sentence and paragraph boundaries and evaluates the discourse structures overall, one can see that our model outperforms the baseline on all accounts according to the Position-Dependent evaluation; outperforms the baseline with respect to the assignment of elementary units, hierarchical spans, and nuclearity statuses according to the Position-Independent evaluation and under-performs the baseline only slightly +9.1 +25.5 +2.0 +19.9 +0.4 +13.4 -0.01 +8.4 Paragraph -14.7 +1.4 -12.5 -1.7 -11.0 -2.4 -9.9 -3.3 Text -9.6 -13.5 -7.1 -11.1 -6.3 -10.0 -5.2 -8.8 Weighted Average +1.5 +14.1 -2.1 +9.9 -3.1 +6.4 -3.0 +3.9 All -1.2 +2.5 -0.1 +2.9 +0.6 +3.5 +0.7 +2.6 P-I R P-I P P-I R P-I P P-I R P-I P Table 6 : Relative evaluation of the discourse-based transfer module with respect to the figures in Table 2. with respect to the rhetorical relation assignment according to the Position-Independent evaluation. More sophisticated discourse features, such as those discussed by Maynard (1998), for example, and a tighter integration with the lexicogrammar of the two languages may yield better cues for learning discourse-based translation models.",
843
- "cite_spans": [],
844
- "ref_spans": [
845
- {
846
- "start": 725,
847
- "end": 732,
848
- "text": "Table 6",
849
- "ref_id": null
850
- },
851
- {
852
- "start": 825,
853
- "end": 833,
854
- "text": "Table 2.",
855
- "ref_id": null
856
- }
857
- ],
858
- "eq_spans": [],
859
- "section": "Evaluation of the discourse-based transfer module",
860
- "sec_num": "4"
861
- },
862
- {
863
- "text": "We presented a systematic empirical study of the role of discourse structure in MT. Our study strongly supports the need for enriching MT systems with a discourse module, capable of re-ordering and repackaging the information in a source text in a way that is consistent with the discourse rendering of a target language. We presented an extended shiftreduce parsing model that can be used to map discourse trees specific to a source language into discourse trees specific to a target language. Our model outperforms a baseline with respect to its ability to predict the discourse structure of sentences. Our model also outperforms the baseline with respect to its ability to derive discourse structures that are closer to the natural, rhetorical rendering in a target language than the original discourse structures in the source language. Our model is still unable to determine correctly how to re-package sentences into paragraphs; a better understanding of the notion of \"paragraph\" is required in order to improve this.",
864
- "cite_spans": [],
865
- "ref_spans": [],
866
- "eq_spans": [],
867
- "section": "Conclusion",
868
- "sec_num": "5"
869
- }
870
- ],
871
- "back_matter": [],
872
- "bib_entries": {
873
- "BIBREF0": {
874
- "ref_id": "b0",
875
- "title": "A program for aligning sentences in bilingual corpora",
876
- "authors": [
877
- {
878
- "first": "A",
879
- "middle": [],
880
- "last": "William",
881
- "suffix": ""
882
- },
883
- {
884
- "first": "Kenneth",
885
- "middle": [
886
- "W"
887
- ],
888
- "last": "Gale",
889
- "suffix": ""
890
- },
891
- {
892
- "first": "",
893
- "middle": [],
894
- "last": "Church",
895
- "suffix": ""
896
- }
897
- ],
898
- "year": 1993,
899
- "venue": "Computational Linguistics",
900
- "volume": "19",
901
- "issue": "1",
902
- "pages": "75--102",
903
- "other_ids": {},
904
- "num": null,
905
- "urls": [],
906
- "raw_text": "William A. Gale and Kenneth W. Church. 1993. A program for aligning sentences in bilingual cor- pora. Computational Linguistics, 19(1):75-102.",
907
- "links": null
908
- },
909
- "BIBREF1": {
910
- "ref_id": "b1",
911
- "title": "Learning parse and translation decisions from examples with rich context",
912
- "authors": [
913
- {
914
- "first": "Ulf",
915
- "middle": [],
916
- "last": "Hermjakob",
917
- "suffix": ""
918
- },
919
- {
920
- "first": "Raymond",
921
- "middle": [
922
- "J"
923
- ],
924
- "last": "Mooney",
925
- "suffix": ""
926
- }
927
- ],
928
- "year": 1997,
929
- "venue": "Proc. of ACL'97",
930
- "volume": "",
931
- "issue": "",
932
- "pages": "482--489",
933
- "other_ids": {},
934
- "num": null,
935
- "urls": [],
936
- "raw_text": "Ulf Hermjakob and Raymond J. Mooney. 1997. Learning parse and translation decisions from ex- amples with rich context. In Proc. of ACL'97, pages 482-489, Madrid, Spain..",
937
- "links": null
938
- },
939
- "BIBREF2": {
940
- "ref_id": "b2",
941
- "title": "Automatic detection of discourse structure by checking surface information in sentences",
942
- "authors": [
943
- {
944
- "first": "Sadao",
945
- "middle": [],
946
- "last": "Kurohashi",
947
- "suffix": ""
948
- },
949
- {
950
- "first": "Makoto",
951
- "middle": [],
952
- "last": "Nagao",
953
- "suffix": ""
954
- }
955
- ],
956
- "year": 1994,
957
- "venue": "Proc. of COLING'94",
958
- "volume": "2",
959
- "issue": "",
960
- "pages": "1123--1127",
961
- "other_ids": {},
962
- "num": null,
963
- "urls": [],
964
- "raw_text": "Sadao Kurohashi and Makoto Nagao. 1994. Auto- matic detection of discourse structure by check- ing surface information in sentences. In Proc. of COLING'94, volume 2, pages 1123-1127, Kyoto, Japan.",
965
- "links": null
966
- },
967
- "BIBREF3": {
968
- "ref_id": "b3",
969
- "title": "Statistical decision-tree models for parsing",
970
- "authors": [
971
- {
972
- "first": "David",
973
- "middle": [
974
- "M"
975
- ],
976
- "last": "Magerman",
977
- "suffix": ""
978
- }
979
- ],
980
- "year": 1995,
981
- "venue": "Proc. of A CL '95",
982
- "volume": "",
983
- "issue": "",
984
- "pages": "276--283",
985
- "other_ids": {},
986
- "num": null,
987
- "urls": [],
988
- "raw_text": "David M. Magerman. 1995. Statistical decision-tree models for parsing. In Proc. of A CL '95, pages 276-283, Cambridge, Massachusetts.",
989
- "links": null
990
- },
991
- "BIBREF4": {
992
- "ref_id": "b4",
993
- "title": "Rhetorical structure theory: Toward a functional theory of text organization",
994
- "authors": [
995
- {
996
- "first": "C",
997
- "middle": [],
998
- "last": "William",
999
- "suffix": ""
1000
- },
1001
- {
1002
- "first": "Sandra",
1003
- "middle": [
1004
- "A"
1005
- ],
1006
- "last": "Mann",
1007
- "suffix": ""
1008
- },
1009
- {
1010
- "first": "",
1011
- "middle": [],
1012
- "last": "Thompson",
1013
- "suffix": ""
1014
- }
1015
- ],
1016
- "year": 1988,
1017
- "venue": "Text",
1018
- "volume": "8",
1019
- "issue": "3",
1020
- "pages": "243--281",
1021
- "other_ids": {},
1022
- "num": null,
1023
- "urls": [],
1024
- "raw_text": "William C. Mann and Sandra A. Thompson. 1988. Rhetorical structure theory: Toward a functional theory of text organization. Text, 8(3):243-281.",
1025
- "links": null
1026
- },
1027
- "BIBREF5": {
1028
- "ref_id": "b5",
1029
- "title": "A decision-based approach to rhetorical parsing",
1030
- "authors": [
1031
- {
1032
- "first": "Daniel",
1033
- "middle": [],
1034
- "last": "Marcu",
1035
- "suffix": ""
1036
- }
1037
- ],
1038
- "year": 1999,
1039
- "venue": "Proc. of A CL'99",
1040
- "volume": "",
1041
- "issue": "",
1042
- "pages": "365--372",
1043
- "other_ids": {},
1044
- "num": null,
1045
- "urls": [],
1046
- "raw_text": "Daniel Marcu. 1999. A decision-based approach to rhetorical parsing. In Proc. of A CL'99, pages 365- 372, Maryland.",
1047
- "links": null
1048
- },
1049
- "BIBREF6": {
1050
- "ref_id": "b6",
1051
- "title": "Experiments in constructing a corpus of discourse trees",
1052
- "authors": [
1053
- {
1054
- "first": "Daniel",
1055
- "middle": [],
1056
- "last": "Marcu",
1057
- "suffix": ""
1058
- },
1059
- {
1060
- "first": "Estibaliz",
1061
- "middle": [],
1062
- "last": "Amorrortu",
1063
- "suffix": ""
1064
- },
1065
- {
1066
- "first": "Magdalena",
1067
- "middle": [],
1068
- "last": "Romera",
1069
- "suffix": ""
1070
- }
1071
- ],
1072
- "year": 1999,
1073
- "venue": "Proc. of the A CL'99 Workshop on Standards and Tools for Discourse Tagging",
1074
- "volume": "",
1075
- "issue": "",
1076
- "pages": "48--57",
1077
- "other_ids": {},
1078
- "num": null,
1079
- "urls": [],
1080
- "raw_text": "Daniel Marcu, Estibaliz Amorrortu, and Magdalena Romera. 1999. Experiments in constructing a cor- pus of discourse trees. In Proc. of the A CL'99 Workshop on Standards and Tools for Discourse Tagging, pages 48-57, Maryland.",
1081
- "links": null
1082
- },
1083
- "BIBREF7": {
1084
- "ref_id": "b7",
1085
- "title": "Principles of Japanese Discourse: A Handbook",
1086
- "authors": [
1087
- {
1088
- "first": "K",
1089
- "middle": [],
1090
- "last": "Senko",
1091
- "suffix": ""
1092
- },
1093
- {
1094
- "first": "",
1095
- "middle": [],
1096
- "last": "Maynard",
1097
- "suffix": ""
1098
- }
1099
- ],
1100
- "year": 1998,
1101
- "venue": "",
1102
- "volume": "",
1103
- "issue": "",
1104
- "pages": "",
1105
- "other_ids": {},
1106
- "num": null,
1107
- "urls": [],
1108
- "raw_text": "Senko K. Maynard. 1998. Principles of Japanese Discourse: A Handbook. Cambridge Univ. Press.",
1109
- "links": null
1110
- },
1111
- "BIBREF8": {
1112
- "ref_id": "b8",
1113
- "title": "C4.5: Programs for Machine Learning",
1114
- "authors": [
1115
- {
1116
- "first": "J",
1117
- "middle": [],
1118
- "last": "",
1119
- "suffix": ""
1120
- },
1121
- {
1122
- "first": "Ross",
1123
- "middle": [],
1124
- "last": "Quinlan",
1125
- "suffix": ""
1126
- }
1127
- ],
1128
- "year": 1993,
1129
- "venue": "",
1130
- "volume": "",
1131
- "issue": "",
1132
- "pages": "",
1133
- "other_ids": {},
1134
- "num": null,
1135
- "urls": [],
1136
- "raw_text": "J. Ross Quinlan. 1993. C4.5: Programs for Machine Learning. Morgan Kaufmann Publishers.",
1137
- "links": null
1138
- },
1139
- "BIBREF9": {
1140
- "ref_id": "b9",
1141
- "title": "Nonparametric Statistics for the Behavioral Sciences",
1142
- "authors": [
1143
- {
1144
- "first": "Sidney",
1145
- "middle": [],
1146
- "last": "Siegel",
1147
- "suffix": ""
1148
- },
1149
- {
1150
- "first": "N",
1151
- "middle": [
1152
- "J"
1153
- ],
1154
- "last": "Castellan",
1155
- "suffix": ""
1156
- }
1157
- ],
1158
- "year": 1988,
1159
- "venue": "",
1160
- "volume": "",
1161
- "issue": "",
1162
- "pages": "",
1163
- "other_ids": {},
1164
- "num": null,
1165
- "urls": [],
1166
- "raw_text": "Sidney Siegel and N.J. Castellan. 1988. Non- parametric Statistics for the Behavioral Sciences.",
1167
- "links": null
1168
- },
1169
- "BIBREF11": {
1170
- "ref_id": "b11",
1171
- "title": "A discourse structure analyzer for Japanese text",
1172
- "authors": [
1173
- {
1174
- "first": "Kazuo",
1175
- "middle": [],
1176
- "last": "Sumita",
1177
- "suffix": ""
1178
- },
1179
- {
1180
- "first": "Kenji",
1181
- "middle": [],
1182
- "last": "Ono",
1183
- "suffix": ""
1184
- },
1185
- {
1186
- "first": "T",
1187
- "middle": [],
1188
- "last": "Chino",
1189
- "suffix": ""
1190
- },
1191
- {
1192
- "first": "Teruhiko",
1193
- "middle": [],
1194
- "last": "Ukita",
1195
- "suffix": ""
1196
- },
1197
- {
1198
- "first": "Shin'ya",
1199
- "middle": [],
1200
- "last": "Amano",
1201
- "suffix": ""
1202
- }
1203
- ],
1204
- "year": 1992,
1205
- "venue": "Proceedings of the International Conference on Fifth Generation Computer Systems",
1206
- "volume": "",
1207
- "issue": "",
1208
- "pages": "1133--1140",
1209
- "other_ids": {},
1210
- "num": null,
1211
- "urls": [],
1212
- "raw_text": "Kazuo Sumita, Kenji Ono, T. Chino, Teruhiko Ukita, and Shin'ya Amano. 1992. A discourse structure analyzer for Japanese text. In Proceed- ings of the International Conference on Fifth Gen- eration Computer Systems, v 2, pages 1133-1140.",
1213
- "links": null
1214
- },
1215
- "BIBREF12": {
1216
- "ref_id": "b12",
1217
- "title": "Evaluation in the ARPA machine-translation program: 1993 methodology",
1218
- "authors": [
1219
- {
1220
- "first": "J",
1221
- "middle": [],
1222
- "last": "White",
1223
- "suffix": ""
1224
- },
1225
- {
1226
- "first": "T",
1227
- "middle": [],
1228
- "last": "O'connell",
1229
- "suffix": ""
1230
- }
1231
- ],
1232
- "year": 1994,
1233
- "venue": "Proceedings of the ARPA Human Language Technology Workshop",
1234
- "volume": "",
1235
- "issue": "",
1236
- "pages": "135--140",
1237
- "other_ids": {},
1238
- "num": null,
1239
- "urls": [],
1240
- "raw_text": "J. White and T. O'Connell. 1994. Evaluation in the ARPA machine-translation program: 1993 methodology. In Proceedings of the ARPA Human Language Technology Workshop, pages 135-140, Washington, D.C.",
1241
- "links": null
1242
- }
1243
- },
1244
- "ref_entries": {
1245
- "FIGREF0": {
1246
- "text": "................................................................................................................. ==:~::::\u00b02'.~2~?:::-~.:.,~a .................",
1247
- "type_str": "figure",
1248
- "uris": null,
1249
- "num": null
1250
- },
1251
- "FIGREF1": {
1252
- "text": "The discourse structures of texts (1) and (3).",
1253
- "type_str": "figure",
1254
- "uris": null,
1255
- "num": null
1256
- },
1257
- "FIGREF3": {
1258
- "text": "Example of incremental tree reconstruction.",
1259
- "type_str": "figure",
1260
- "uris": null,
1261
- "num": null
1262
- },
1263
- "FIGREF4": {
1264
- "text": "Learning curve for the Relation-Reduce classifier. if rhetRelOfStack-llnJapTree = ATTRIBUTION then rhetRelOffFopStacklnEngTree ~ ATTRIBUTION if rhetRelOffFopStacklnJapTree ----EXAMPLE A isSentenceTheLastUnitlnJapTreeOfropStack = false then rhetRelOfI'opStackInEngTree ~ EVIDENCE",
1265
- "type_str": "figure",
1266
- "uris": null,
1267
- "num": null
1268
- },
1269
- "TABREF0": {
1270
- "html": null,
1271
- "type_str": "table",
1272
- "num": null,
1273
- "content": "<table><tr><td>[In its future population estimates'] [made</td><td>(3)</td></tr><tr><td>public last year, 2</td><td/></tr><tr><td>(2)</td><td/></tr></table>",
1274
- "text": "The Ministry of Health and Welfare last year revealed I ] [population of future estimate according to 2] [in future 1.499 persons as the lowest s] [that after *SAB* rising to turn that 4] [*they* estimated but s ] [already the estimate misses a point ~] [prediction became. 7] ] [the Ministry of Health and Welfare predicted that the SAB would drop to a new low of 1.499 in the future, s) [but would make a comeback after that, 4] [increasing once again, s ] [However, it looks as if that prediction will be quickly shattered. 6]"
1275
- },
1276
- "TABREF5": {
1277
- "html": null,
1278
- "type_str": "table",
1279
- "num": null,
1280
- "content": "<table><tr><td/><td/><td/><td/><td>RtlauoaRcaa\u00a2 e</td></tr><tr><td>~oo</td><td/><td/><td/><td/></tr><tr><td>440o</td><td/><td/><td/><td/></tr><tr><td>~oo</td><td/><td/><td/><td/></tr><tr><td>38 oo</td><td/><td/><td/><td/></tr><tr><td>~oa</td><td/><td/><td/><td/></tr><tr><td>I</td><td>I</td><td>I</td><td>I</td><td>tC~ xlO 3</td></tr></table>",
1281
- "text": "Performance of the classifiers"
1282
- },
1283
- "TABREF6": {
1284
- "html": null,
1285
- "type_str": "table",
1286
- "num": null,
1287
- "content": "<table/>",
1288
- "text": "Rule examples for the Relation-Reduce classifier."
1289
- },
1290
- "TABREF8": {
1291
- "html": null,
1292
- "type_str": "table",
1293
- "num": null,
1294
- "content": "<table/>",
1295
- "text": "Confusion matrix for the Main Action Type classifier."
1296
- }
1297
- }
1298
- }
1299
- }