text
stringlengths
22
301k
@RULES _ENDRULE [base] <- \@ \@ _xWHITE @@ #@POST # noop() #@RULES #_xNIL <- _xWILD [min=1 max=1 fail=(\@)] @@ @RULES _soRULES [base] <- \@ RULES [t] @@ _soPOST [base] <- \@ POST [t] @@ _soCHECK [base] <- \@ CHECK [t] @@ _soPRE [base] <- \@ PRE [t] @@ _soNODES [base] <- \@ NODES [t] @@ _soMULTI [base] <- \@ MULTI [t] @@ _soPATH [base] <- \@ PATH [t] @@ _soCODE [base] <- \@ CODE [t] @@ _soSELECT [base] <- \@ SELECT [t] @@ _soRECURSE [base] <- \@ RECURSE [t] @@ _eoPOST [base] <- \@ \@ POST [t] @@ _eoCHECK [base] <- \@ \@ CHECK [t] @@ _eoPRE [base] <- \@ \@ PRE [t] @@ _eoRULES [base] <- \@ \@ RULES [t] @@ _eoRECURSE [base] <- \@ \@ RECURSE [t] @@ _eoSELECT [base] <- \@ \@ SELECT [t] @@ _eoNODES [base] <- \@ \@ NODES [t] @@ _eoMULTI [base] <- \@ \@ MULTI [t] @@ _eoPATH [base] <- \@ \@ PATH [t] @@ _eoCODE [base] <- \@ \@ CODE [t] @@ _ENDRULE [base] <- \@ \@ @@ @POST rfanonlit(2) single() @RULES _NONLIT [base] <- \_ _xALPHA @@ @RULES _ARROW [base] <- \< \- @@ @POST rfaname(1) single() @RULES _LIT [base] <- _xALPHA @@ @POST rfanum(1) single() @RULES _NUM [base] <- _xNUM @@
# Fetch index entry for dictionary concept. Gets index concept that str would be added under. L("return_con") = wordindex(L("str"));
@DECL Spaces(L("output"), L("num"), L("spaces")) { "debug.txt" << "moose\n"; while (L("i") < L("num")) { L("output") << L("spaces"); L("i")++; } } @@DECL
@DECL VerticeXMLTree( L("out"), L("n"), L("parent id") ) { L("childs") = pndown(L("n")); L("leaf") = 0; while (L("childs")) { L("name") = pnname(L("childs")); if (strlength(L("name")) > 1) { L("id") = G("id")++; L("tag") = strtrim(strpiece(L("name"),0,strlength(L("name"))-1)); L("out") << "<vertice"; VerticeGetAttributes(L("out"),L("childs")); L("out") << ">\n"; L("out") << " <id>" << L("id") << "</id>\n"; L("out") << " <label>" << L("tag") << "</label>\n"; L("out") << "</vertice>\n"; if (L("parent id")) { L("out") << "<edge>\n"; L("out") << " <source>" << L("parent id") << "</source>\n"; L("out") << " <target>" << L("id") << "</target>\n"; L("out") << "</edge>\n"; } if (pndown(L("childs"))) { VerticeXMLTree(L("out"),L("childs"),L("id")); } } L("childs") = pnnext(L("childs")); } return L("leaf"); } VerticeXMLRecurseAll( L("out"), L("n"), # Current node. L("level") # level of recursion ) { L("childs") = pndown(L("n")); L("leaf") = 0; while (L("childs")) { L("name") = pnname(L("childs")); if (strlength(L("name")) > 1) { L("tag") = strpiece(L("name"),1,strlength(L("name"))-1); if (pndown(L("childs"))) { G("id")++; L("out") << "\n" << VerticeSpacing(L("level")) << "<" << L("tag"); VerticeGetAttributes(L("out"),L("childs")); L("out") << ">"; if (!VerticeXMLRecurseAll(L("out"),L("childs"),L("level")+1)) L("out") << "\n" << VerticeSpacing(L("level")); L("out") << "</" << L("tag") << ">"; } else { L("out") << pnvar(L("childs"),"$text"); L("leaf") = 1; } } L("childs") = pnnext(L("childs")); } return L("leaf"); } VerticeGetAttributes(L("out"),L("n")) { VerticeAddAttribute(L("out"),L("n"),"stem"); VerticeAddAttribute(L("out"),L("n"),"voice"); VerticeAddAttribute(L("out"),L("n"),"tense"); VerticeAddAttribute(L("out"),L("n"),"aspect"); } VerticeAddAttribute(L("out"),L("n"),L("name")) { L("value") = pnvar(L("n"),L("name")); if (L("value")) L("out") << " " << L("name") << "=\"" << L("value") << "\""; } VerticeSpacing(L("num")) { L("i") = 1; L("VerticeSpacing") = " "; while (L("i")++ < L("num")) { L("VerticeSpacing") = L("VerticeSpacing") + " "; } return L("VerticeSpacing"); }
@NODES _ROOT @POST N("wordcon") = dictfindword(strtolower(N("$text"))); if (N("wordcon")) { G("vals") = findvals(N("wordcon"),"pos"); G("first val") = "_" + strtoupper(getstrval(G("vals"))); N("pos num") = 0; while (G("vals")) { G("val") = getstrval(G("vals")); if (G("val") == "noun") N("noun") = 1; else if (G("val") == "verb") N("verb") = 1; else if (G("val") == "definite article") N("definite article") = 1; else if (G("val") == "indefinite article") N("indefinite article") = 1; else if (G("val") == "adjective") N("adjective") = 1; else if (G("val") == "adverb") N("adverb") = 1; else if (G("val") == "conj") N("conj") = 1; else if (G("val") == "pronoun") N("pronoun") = 1; else if (G("val") == "prep" || G("val") == "preposition") N("prep") = 1; else if (G("val") == "interj" || G("val") == "interjection") N("interj") = 1; else if (G("val") == "aux") N("aux") = 1; else if (G("val") == "pastpart") N("pastpart") = 1; G("vals") = nextval(G("vals")); N("pos num")++; } if (N("pos num") > 1) group(1,1,"_AMBIG"); else group(1,1,strsubst(G("first val")," ","_")); } @RULES _xNIL <- _xALPHA [s] ### (1) @@
# Fetch the last node in phrase. L("return_con") = lastnode(L("phrase"));
# Find attribute and value pair in the subhierarchy of a concept. attrexists(L("hier"), L("attr_s"), L("val_s"));
@NODES _ROOT @RULES _xNIL <- _xWILD ### (1) @@
@NODES _ROOT @POST L("abbrev") = getconcept(G("state"),"abbrev"); addstrval(L("abbrev"),"letters",N("text")); G("abbrev") << strtolower(N("text")) << " s=state abbrev=1 state=" << QuoteIfNeeded(G("state name")) << "\n"; @RULES _xNIL <- _abbrev ### (1) @@
@NODES _item @POST X("catid") = N("$text",2); @RULES _xNIL <- _categoryidOpen [s] ### (1) _xWILD [s fails=(_categoryidClose)] ### (2) _categoryidClose ### (3) @@ @POST "names.txt" << N("$text",2) << "\n"; addCategory(N("$text",2),X("catid")); X("name") = N("$text",2); @RULES _xNIL <- _nameOpen [s] ### (1) _xWILD [s fails=(_nameClose)] ### (2) _nameClose ### (3) @@ @PRE <2,2> lengthr(2,100); @POST "tooltip.txt" << N("$text",2) << "\n"; @RULES _xNIL <- _tooltipOpen [s] ### (1) _xWILD [s fails=(_tooltipClose)] ### (2) _tooltipClose ### (3) @@
@CODE # If found new sentence breaks, do this pass. if (!G("eos")) exitpass(); @@CODE #@PATH _ROOT _TEXTZONE _sent @NODES _sent @POST splice(1,1); @RULES _xNIL <- _EOS @@
@CODE closefile(G("file")); @@CODE
# Transfer confidence information from human candidate node to # a human name that was found inside it. @NODES _ROOT _contactZone _LINE _humanNameCandidate @CHECK if (X("ResumeOf") <= 0) fail(); @POST N("ResumeOf") = X("ResumeOf"); # 12/25/99 AM. # ninc(1, "ResumeOf") noop(); @RULES _xNIL <- _humanName [s] @@
@NODES _LINE @PRE <3,3> length(1); <6,6> length(3); <8,8> length(3); <10,10> length(4); @RULES # Ex: (+1)\_ _phoneNumber <- _xWILD [min=1 max=1 s match=("_openPunct" "\(")] \+ [s] _xWILD [min=1 max=1 trig s layer=("_countryCode") match=("_monthNum" "1")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] _xWHITE [star s] _xNUM [s layer=("_areaCode")] _xWHITE [star s] _xNUM [s layer=("_prefix")] _xWHITE [star s] _xNUM [s layer=("_suffix")] @@ @PRE <7,7> length(4); @RULES _phoneNumber <- _xNUM [s layer=("_countryCode")] \- [trig s] _xNUM [s layer=("_areaCode")] \- [s] _xNUM [s layer=("_prefix")] \- [s] _xNUM [s layer=("_suffix")] @@ @PRE <2,2> length(3); <5,5> length(3); <9,9> length(4); @RULES _phoneNumber <- _xWILD [min=1 max=1 s match=("_openPunct" "\(")] _xNUM [s layer=("_areaCode")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] _xWHITE [star s] _xNUM [s layer=("_prefix")] _xWHITE [star s] \- [s] _xWHITE [star s] _xNUM [s layer=("_suffix")] @@ @PRE <1,1> length(3); <3,3> length(3); <5,5> length(4); @RULES _phoneNumber <- _xNUM [s layer=("_areaCode")] _xPUNCT [trig s] _xNUM [s layer=("_prefix")] _xPUNCT [s] _xNUM [s layer=("_suffix")] @@ @PRE <1,1> length(2); <3,3> length(3); <5,5> length(6); @RULES _phoneNumber <- _xWILD [min=1 max=1 s layer=("_countryCode") match=("_year" "61")] \- [s] 411 [trig s layer=("_areaCode")] \- [s] 348700 [s layer=("_suffix")] @@ @PRE <1,1> length(3); <3,3> length(3); <5,5> length(4); @RULES _phoneNumber <- _xNUM [s layer=("_areaCode")] _xWHITE [star s] _xNUM [s layer=("_prefix")] \- [trig s] _xNUM [s layer=("_suffix")] @@ _phoneNumber <- _xNUM [s layer=("_areaCode")] _xWHITE [star s] _xNUM [s layer=("_prefix")] _xWHITE [star s] _xNUM [s layer=("_suffix")] @@ @PRE <2,2> length(3); <5,5> length(3); <7,7> length(4); @RULES _phoneNumber <- _xWILD [min=1 max=1 s match=("_openPunct" "\(")] _xNUM [s layer=("_areaCode")] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] _xWHITE [star s] _xNUM [s layer=("_prefix")] _xWHITE [star s] _xNUM [s layer=("_suffix")] @@ _phoneNumber <- _xWILD [min=1 max=1 s match=("_openPunct" "\(")] 408 [trig s] _xWILD [min=1 max=1 s match=("_closePunct" "\)")] _xWHITE [star s] 283 [s] \- [s] 0605 [s] @@
@NODES _LINE @RULES _experiencePart <- _DateRange [s] @@ # Ex: Senior\_Software\_Engineer _experiencePart <- _xWILD [min=1 max=1 s match=("_Caps" "_jobTitle")] @@
@CODE DisplayKB(G("parse"),2); @@CODE
@PATH _ROOT _RULES @RECURSE littopair @POST rfalittopair(1) single() @RULES _PAIR <- _LIT @@ @@RECURSE littopair @POST rfapairs(2) single() @RULES _PAIRS [base] <- \[ _xWILD [match=(_LIT _PAIR \*) recurse=(littopair)] \] @@
# Find the kb root, then make a child concept under root named "child" G("the root") = findroot(); # make the child concept makeconcept(G("the root"),"child"); # do something else ... # later on, get the string variant G("the child") = findconcept(G("the root"),"child"); # fetch the numeric variant G("same concept") = findconcept(G("the root"), 1);
@NODES _ROOT @POST if () single(); @RULES _xNIL <- _labelEntry ### (1) @@
@NODES _LINE @POST singler(3,3) @RULES _CompleteSchoolName [base] <- _xWILD [s one match=( _xSTART The _xPUNCT )] _xWHITE [star s] _SchoolNamePhrase [t] @@ # Joe (SchoolNamePhrase University) -> ModSchoolPhrase(Joe University) @RULES _SchoolNamePhrase [base] <- _xWILD [one s fails=( _xPUNCT _xWHITE _xNUM _degree _major _minor _gpa _DateRange _SingleDate)] # 10/09/99 PS _xWHITE [one s] # 10/07/99 AM. _SchoolNamePhrase [t] @@
@DECL AddWord(L("person"),L("word")) { "word.txt" << L("person") << " " << L("word") << "\n"; DictionaryWord(G("word"),"person",L("person"),"str"); } @@DECL
@NODES _ROOT @RULES _paragraph <- _xWILD [plus match=(_LINE)] ### (2) @@
@NODES _ROOT @POST excise(1,4); noop(); @RULES _xNIL <- _xSTART _xWILD [fail=(_xALHPA _xPUNCT)] _xWILD [one matches=(_xALHPA _xPUNCT)] _xWILD [fail=(\n \r)] ### (1) _xWILD [one matches=(\n \r)] ### (3) @@ @POST singler(3,3); @RULES _term <- _xNUM _xWILD [fail=(_xALPHA)] _xWILD [fail=(\n \r \")] ### (1) _xWILD [one matches=(\" \n \r _xEND)] ### (3) @@
@NODES _ROOT @RULES _paragraph [unsealed] <- _sentence [s plus] ### (1) _xWILD [s matches=(_paragraphSeparator _xEND)] ### (2) @@
# Match a _verb node optionally preceded by _modal, _have, or _be nodes, reducing to _vgroup @RULES _vgroup <- _modal [option] _have [option] _be [option] _verb @@
@NODES _ROOT @PRE <1,1> var("bases"); @POST # Check for section or subsection title. "extract_codes.txt" << "Beginning code extraction step." << ":\n"; L("section_title") = N("section_title", 1); L("subsection_title") = N("subsection", 1); G("line_count") = G("line_count") + 1; L("section_name") = pnname(N(1)) + "_" + str(G("line_count")); "extract_codes.txt" << "section_name=" << L("section_name") << ":\n"; L("section") = AddUniqueCon(G("note_words"), L("section_name")); if (L("section_title")) { addstrval(L("section"),"section",L("section_title")); "extract_codes.txt" << "SECTION " << L("section_title") << ":\n"; } if (L("subsection")) { addstrval(L("section"),"subsection",L("subsection_title")); "extract_codes.txt" << "SUBSECTION " << L("subsection") << ":\n"; } # L("bases") = addattr(L("section"), "bases"); L("all_bases") = pnvar(N(1), "bases"); L("bases_len") = arraylength(L("all_bases")); "extract_codes.txt" << "\t" << "Adding " << L("bases_len") << " total bases." << "\n"; # addstrval(L("section"),"bases", L("all_bases")); L("i") = 0; while(L("i") < L("bases_len")) { "extract_codes.txt" << "\t" << "Adding " << L("all_bases")[L("i")] << "\n"; AddUniqueStr(L("section"),"bases", L("all_bases")[L("i")]); "extract_codes.txt" << "\t" << conceptname(G("eui_to_base_term")) << "\n"; L("eui_con") = getconcept(G("eui_to_base_term"), L("all_bases")[L("i")]) L("eui_term") = strval(L("eui_con"), "term"); AddUniqueStr(L("section"), "terms", L("eui_term")); L("i")++; } # addstrval(L("section"),"bases", N("bases", 1)); "extract_codes.txt" << "\t" << N("bases", 1) << "\n"; # extractCodeRanks(N("bases", 1), "diagnoses"); # extractCodeRanks(N("bases", 1), "procedures"); @RULES _xNIL <- _xWILD [one matches=( _section _subsection _sentence _looseText _item)] @@
@NODES _ROOT @PRE <1,1> var("up"); @POST splice(1,1); @RULES _xNIL <- _LINE ### (1) @@
@NODES _LINE @PRE <1,1> cap() @RULES # Ex: Sweden _countryWord <- _xWILD [min=1 max=1 s match=(Sweden USA Canada Mexico France Australia ROC USSR PRC UAR UK Spain Italy Finland Turkey Israel Egypt Yugoslavia Japan Norway Denmark Holland Germany Austria Hungary Albania Portugal Poland Greece Czechoslovakia India Pakistan Rumania Romania Bulgaria US Latvia Estonia Switzerland Morocco Algeria Yemen Ethiopia Libya Palestine Iraq Iran Lebanon Venezuela Russia China Peru Chile Korea England Belize Paraguay Uruguay Argentina Brazil Ecuador Gabon Nigeria Sudan Indonesia Thailand Malaysia Singapore Laos Taiwan Ukraine)] @@
@NODES _ROOT @POST singler(1,2); @RULES _IGNORE <- _xSTART ### (1) _xWILD ### (2) _beginAbs [lookahead] ### (3) @@ @POST singler(2,2); @RULES _IGNORE <- _endAbs ### (1) _xWILD ### (2) _beginFigure ### (3) @@ @POST singler(2,2); @RULES _IGNORE <- _endAbs ### (1) _xWILD ### (2) _section ### (3) @@
@NODES _LINE @PRE <3,3> numrange(1, 31) @RULES # ambiguity, month-day-year reading is preferred. _date [] <- _monthNum [s layer = (_month) ] \/ [s] _xNUM [s layer = (_day) ] \/ [s] _year [s] @@ _date [] <- _monthNum [s layer = (_month) ] \- [s] _xNUM [s layer = (_day) ] \- [s] _year [s] @@ _date [] <- _monthNum [s layer = (_month) ] \/ [s] _xNUM [s layer = (_day) ] \/ [s] _year [s] @@ @PRE <1,1> numrange(1, 31) @RULES # ambiguity, month-day-year reading is preferred. # here, there's no ambiguity, _date [] <- _xNUM [s layer = (_day) ] \/ [s] _monthNum [s layer = (_month) ] \/ [s] _year [s] @@ _date [] <- _xNUM [s layer = (_day) ] \- [s] _monthNum [s layer = (_month) ] \- [s] _year [s] @@ _date [] <- _xNUM [s layer = (_day) ] \/ [s] _monthNum [s layer = (_month) ] \/ [s] _year [s] @@ @PRE <1,1> numrange(1, 31) @RULES # Ex: 30 July(,.) _date [] <- _xNUM [s layer = (_day) ] _xWHITE [s] _xWILD [s matches = (_monthWord) ] \. [s opt] \, [s opt] _xWHITE [s] _year [s] @@ @PRE <4,4> numrange(1, 31) @RULES # Ex: July(.) 30, _date [] <- _xWILD [s matches = (_monthWord) ] \. [s opt] _xWHITE [s] _xNUM [s layer = (_day) ] \, [s opt] _xWHITE [s] _year [s] @@
@NODES _ROOT @PRE <1,1> uppercase(); @RULES _identifier <- _xALPHA ### (1) \: [one] ### (2) _xALPHA _xEND @@
@PATH _ROOT _paragraph _sentence @PRE <1,1> var("human"); @POST S("con") = getconcept(G("people"),strtolower(phrasetext())); single(); @RULES _person <- _titleCaps ### (1) @@
@CODE L("hello") = 0; @@CODE @NODES _sent # Lone noun. @POST pncopyvars(2); sclearpos(1,1); # Zero out token info. singler(2,2); @RULES _np <- _xWILD [one match=(_prep _vg)] _noun _xWILD [one lookahead match=(_prep _conj _qEOS _xEND)] @@ # Not sure why alpha in first position in these rules.... @CHECK if (!N(2) && !N(3) && !N(4)) fail(); dqaninfo(2,3,4,5); if (!numbersagree(S("first"),S("last"))) fail(); # 06/15/06 AM. @POST if (N(3)) if (!nonliteral(N(3))) { L("tmp3") = N(3); group(3,3,"_num"); pncopyvars(L("tmp3"),N(3)); } if (N(4)) { L("firstj") = N(4); L("lastj") = lasteltnode(4); } groupnp(); @RULES _xNIL <- _xALPHA _xWILD [plus match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_adj _xALPHA _aposS)] @@ _xNIL <- _xALPHA _xWILD [star match=(_det _pro)] _xWILD [plus match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_adj _xALPHA _aposS)] @@ _xNIL <- _xWILD [one fail=(_xALPHA _det _quan _num _xNUM _adj _noun)] _xWILD [s star match=(_det _proPoss)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_adj _xALPHA _aposS)] @@ @POST L("tmp4") = lasteltnode(4); pncopyvars(L("tmp4")); sclearpos(1,0); S("bracket",2) = 1; S("ne") = 0; singler(2,4); @RULES _np <- _xSTART _xWILD [star match=(_det _quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_noun _adj _xALPHA _aposS)] @@ @POST L("n") = lasteltnode(4); pncopyvars(L("n")); sclearpos(1,0); # Zero out token info. singler(1,4); @RULES _np <- _proPoss [s] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xWILD [one lookahead fail=(_noun _xALPHA)] @@ @POST pncopyvars(3); sclearpos(1,0); # Zero out token info. singler(2,3); @RULES _np <- _xWILD [one fail=(_xALPHA _noun _det _quan _num _xNUM _aposS)] _adj [plus] _noun _xWILD [one lookahead fail=(_noun _xALPHA)] @@ # nounless. @POST L("tmp2") = lasteltnode(2); pncopyvars(L("tmp2")); sclearpos(1,0); singler(1,2); @RULES _np <- _xWILD [plus match=(_det _quan _num _xNUM)] _adj [plus] _xWILD [one lookahead fail=(_noun _adj _xALPHA _dbldash)] @@ # vg part of # idiomatic @POST L("tmp2") = N(2); group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); nountonp(2,1); if (pnname(N(1)) == "_vg") if (!N("voice",1)) N("voice",1) = "active"; @RULES _xNIL <- _vg _xWILD [s one match=(part parts)] of [s lookahead] @@ # det noun noun @POST pncopyvars(3); sclearpos(1,0); # Zero out token info. singler(1,3); @RULES _np <- _det _noun _noun _xWILD [lookahead one fail=(_noun _conj)] @@ # prep noun alpha . @CHECK if (!N("noun",3)) fail(); @POST L("tmp3") = N(3); group(3,3,"_noun"); pncopyvars(L("tmp3"),N(3)); group(2,3,"_np"); pncopyvars(L("tmp3"),N(2)); clearpos(N(2),1,1); # Zero out token info. @RULES _xNIL <- _prep _noun _xALPHA _xWILD [one lookahead match=(_qEOS _xEND)] @@ # det alpha @CHECK if (!N("noun",2) && !N("adj",2)) fail(); if (N("noun",2) && N("adj",2)) # Can't decide yet. fail(); @POST L("tmp2") = N(2); if (N("noun",2)) group(2,2,"_noun"); else if (N("adj",2)) group(2,2,"_adj"); pncopyvars(L("tmp2"),N(2)); if (pnname(N(2)) == "_adj") fixadj(N(2)); @RULES _xNIL <- _det _xALPHA @@ # alpha conj det @CHECK if (!N("noun",1)) fail(); @POST L("tmp1") = N(1); group(1,1,"_noun"); pncopyvars(L("tmp1"),N(1)); @RULES _xNIL <- _xALPHA _conj [lookahead] _xWILD [one match=(_det _noun _np)] @@ # pro @POST if (pnname(N(3)) == "_vg") if (!N("voice",3)) N("voice",3) = "active"; if (pnname(N(2)) == "_pro") { nountonp(2,1); } @RULES _xNIL <- _xWILD [one match=(_xSTART _adv _advl _conj _fnword)] _pro [s] _xWILD [one lookahead match=(_vg)] @@ # prep adj alpha # dqa alpha @CHECK if (!N("noun",5) && !N("adj",5)) fail(); dqaninfo(2,3,4,5); if (!numbersagree(S("first"),S("last"))) fail(); @POST L("tmp5") = N(5); group(5,5,"_noun"); pncopyvars(L("tmp5"),N(5)); if (!N(4)) S("firstan") = N(5); S("firstn") = S("last") = S("lastn") = S("lastan") = N(5); # Reset stuff. groupnp(); if (pnname(N(1)) == "_vg") if (!N("voice",1)) N("voice",1) = "active"; @RULES _xNIL <- _xWILD [one match=(_prep _conj _verb _vg)] _det [plus] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _xALPHA _xWILD [one lookahead match=(\, _qEOS _xEND _dbldash _prepj)] @@ _xNIL <- _xWILD [one match=(_prep _conj _verb _vg)] _det [star] _xWILD [plus match=(_quan _num _xNUM)] _adj [star] _xALPHA _xWILD [one lookahead match=(\, _qEOS _xEND _dbldash _prep)] @@ _xNIL <- _xWILD [one match=(_prep _conj _verb _vg)] _det [star] _xWILD [star match=(_quan _num _xNUM)] _adj [plus] _xALPHA _xWILD [one lookahead match=(\, _qEOS _xEND _dbldash _prep)] @@ # alpha alpha # TOO BROAD. @CHECK if (N("verb",2) || N("verb",3)) fail(); if (!N("adj",2) && !N("noun",2)) fail(); if (!N("adj",3) && !N("noun",3)) fail(); @POST L("tmp3") = N(3); L("tmp2") = N(2); if (N("adj",2)) group(2,2,"_adj"); else group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); if (pnname(N(2)) == "_adj") fixadj(N(2)); group(3,3,"_noun"); pncopyvars(L("tmp3"),N(3)); group(2,3,"_np"); pncopyvars(L("tmp3"),N(2)); clearpos(N(2),1,1); # Zero out token info. @RULES _xNIL <- _xWILD [one match=(\, _dbldash)] _xALPHA _xALPHA _xWILD [one lookahead match=(_prep _conj)] @@ # low for @CHECK if (N("adj",2) && N("noun",2)) fail(); # Can't decide here. if (!N("adj",2) && !N("noun",2)) fail(); if (N("verb",2)) fail(); @POST L("tmp2") = N(2); if (N("noun",2)) group(2,2,"_noun"); else group(2,2,"_adj"); pncopyvars(L("tmp2"),N(2)); if (pnname(N(2)) == "_adj") fixadj(N(2)); nountonp(2,1); @RULES _xNIL <- _xWILD [one match=(\,)] _xALPHA _xWILD [one lookahead match=(_prep _conj)] @@ # verb alpha @CHECK if (N("pos num",2) != 2) fail(); if (!N("verb",2)) fail(); if (!vconjq(N(1),"inf")) fail(); # Check for helping verbs etc. @POST L("tmp2") = N(2); if (N("noun",2)) { group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); fixnoun(N(2)); } @RULES _xNIL <- _verb _xALPHA @@ # well over # well under @POST L("tmp2") = N(2); group(2,2,"_adv"); pncopyvars(L("tmp2"),N(2)); if (pnname(N(1)) == "_vg") if (!N("voice",1)) N("voice",1) = "active"; @RULES _xNIL <- _xWILD [one match=(_verb _vg)] well _xWILD [s one lookahead match=(over under)] @@ # _adjc is constituent level concept, like _vg, _np. # vg adj @POST L("tmp3") = N(3); group(3,4,"_adjc"); pncopyvars(L("tmp3"),N(3)); N("ignorepos",3) = 1; # commented 04/19/07 AM. (why) N("posarr len",3) = 0; # 04/21/07 AM. @RULES _xNIL <- _xWILD [one match=(_noun _np _vg)] _xWILD [star match=(_advl _adv)] _adj _xWILD [star match=(_advl _adv)] _xWILD [one lookahead match=(_prep _fnword _qEOS _xEND \,)] @@ @POST singler(2,7); @RULES _np <- _xWILD [one match=(_xSTART)] _det [star] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _adj [plus] _noun [plus] _xWILD [one fail=(_xALPHA _noun _adj)] @@ # verb alpha dqan # vg alpha dqan @CHECK if (!N("adj",2) && !N("noun",2)) fail(); @POST L("tmp2") = N(2); if (N("adj",2)) { group(2,2,"_adj"); pncopyvars(L("tmp2"),N(2)); fixadj(N(2)); } else if (N("noun",2)) { group(2,2,"_noun"); pncopyvars(L("tmp2"),N(2)); fixnoun(N(2)); } @RULES _xNIL <- _xWILD [one match=(_vg _verb)] _xALPHA _noun [plus] @@ # Convert pronouns at some point. @PRE <1,1> varz("proposs"); @CHECK if (pnname(N(1)) == "_np") fail(); @POST nountonp(1,1); @RULES _xNIL <- _xWILD [s one match=(_proSubj _proObj) except=(_whword)] @@
# Count nouns with variable "number" with value not equal to "plural" @PRE <1,1> varne("number","plural"); @POST ++G("count nonplural nouns"); @RULES _xNIL <-  _noun @@
@NODES _LINE @RULES _jobTitle <- _jobTitleRoot [s] _xWHITE [s] _jobTitleRoot [s] @@ _jobTitle <- _fieldName [s] _xWHITE [s] _jobTitleRoot [s] @@
# Fetch the string value from a KB value object. L("return_str") = getstrval(L("val"));
# Move concept before previous sibling. (Moves concept to the left in its list.) movecleft(L("con"));
@CODE DisplayKB(findconcept(findroot(), "phrases"), 1); @@CODE
@NODES _LINE @POST X("word") = N("$text",2); single(); @RULES _word <- _xSTART _xALPHA ### (1) @@ @POST X("pos") = N("$text",3); single(); @RULES _pos <- pos \= _xALPHA ### (1) @@
# Rename con's named attribute to new. renameattr(L("con"), L("name"), L("new"));
@DECL # Take KB string values and convert to array # Args # first_val: first value in kb value list StrValsToArray(L("first_val")) { L("arr"); L("arr_idx") = 0; while (L("first_val")) { L("arr")[L("arr_idx")] = getstrval(L("first_val")); L("arr_idx")++; L("first_val") = nextval(L("first_val")); } return L("arr"); } # Get count of values in attribute list. ValsLength(L("first_val")) { L("arr_len") = 0; while (L("first_val")) { L("arr_len")++; L("first_val") = nextval(L("first_val")); } return L("arr_len"); } @@DECL
@DECL IsPOS(L("node"), L("pos")) { L("vars") = pnvarnames(L("node")); L("i") = 0; while (L("i") < arraylength(L("vars"))) { L("var") = L("vars")[L("i")]; if (strcontains(L("pos"), L("var"))) { return 1; } L("i")++; } return 0; } @@DECL
@NODES _LINE @PRE <1,1> cap(); <3,3> cap(); @RULES # Ex: Professional\_Objective _ObjectiveHeaderPhrase [layer=(_header )] <- Professional [s] _xWHITE [star s] Objective [s] @@
@CODE DictionaryStart(); DictionaryWord("he","gender","male","str"); DictionaryWord("she","gender","female","str"); DictionaryWord("january","date","month","str"); DictionaryWord("january","number","1","num"); DictionaryEnd(); @@CODE
# Same as addword; more principled function name. L("return_con") = dictgetword(L("str"));
@NODES _ROOT @POST singler(2,2); @RULES _entries <- _split ### (1) _xWILD [fails=(_split)] ### (2) @@
@NODES _term @POST excise(1,1); @RULES _xNIL <- _xWHITE @@
@NODES _LINE # Joe (SchoolNamePhrase University) -> ModSchoolPhrase(Joe University) @RULES _SchoolNamePhrase [base] <- _xWILD [one s fails=( _xPUNCT _xWHITE _xNUM _degree _degreePhrase _major _minor _gpa from at in by _DateRange _SingleDate _CityState )] # 10/09/99 PS _xWHITE [one s] # 10/07/99 AM. _xWILD [t one match=( _SchoolNamePhrase _SchoolRoot )] @@ _CompleteSchoolName [] <- _CityState [s] _xWHITE [s] _CompleteSchoolName [s] @@
@PATH _ROOT _translations _headerZone _LINE @RULES _translation <- _xWILD [plus match=(_xALPHA)] ### (1) @@
@NODES _ROOT @POST S("pos") = N("$text",2); S("con") = getconcept(G("funcs"),N("$text",2)); single(); @RULES _pos <- const ### (1) _xALPHA ### (2) \= ### (3) _brackets ### (4) @@
# Fetch string-value of attribute (must be first). L("return_str") = strval(L("con"), L("name"));
############################################### # FILE: custom_funcs # SUBJ: comment # AUTH: Ashton # MODIFIED: ############################################### @DECL # Concatenate # Concatenates two non-empty arrays # # Args: Two non-empty arrayd # Returns: concatenated array Concatenate(L("arr_1"), L("arr_2")) { L("new_arr") = L("arr_1"); L("idx") = 0; while (L("idx") < arraylength(L("arr_2"))) { L("new_arr")[arraylength(L("new_arr"))] = L("arr_2")[L("idx")]; L("idx")++; } return L("new_arr"); } # Swap # Swap elements at first and second indices in array # # Args: # L("arr") = Non-empty array # L("first_idx") = Index of first element to swap # L("second_idx") = Index of second element to swap # # Returns: Array with swapped elements # # Note that indices must be < array len Swap(L("arr"), L("first_idx"), L("second_idx")) { L("temp") = L("arr")[L("first_idx")]; L("arr")[L("first_idx")] = L("arr")[L("second_idx")]; L("arr")[L("second_idx")] = L("temp"); return L("arr"); } # Quicksort wrapper to handle indices, since this can be a challenge. # Use QuickSortPartition for subarray QuickSort(L("arr")) { if (arraylength(L("arr")) <= 1) { return L("arr"); } L("start") = 0; L("end") = arraylength(L("arr")) - 1; return QuickSortPartition(L("arr"), L("start"), L("end")); } # QuickSortPartition # Performs quicksort on array from <low> to <high> # # Args: # L("arr"): Array to sort # L("low"): Starting index of array # L("high"): Upper index of array # # Returns: # Sorted array # # N.B. low and high must be >=0 and < array len QuickSortPartition(L("arr"), L("low"), L("high")) { if (L("low") < L("high")) { # Get pivot index L("pivot") = L("arr")[L("high")]; L("i") = L("low") - 1; L("j") = L("low"); while (L("j") < L("high")) { if (L("arr")[L("j")] <= L("pivot")) { L("i")++; L("arr") = Swap(L("arr"), L("i"), L("j")); } L("j")++; } L("arr") = Swap(L("arr"), L("i")+1, L("high")); L("pivot") = L("i") + 1; # Sort each partition of array recursively L("arr") = QuickSortPartition(L("arr"), L("low"), L("pivot")-1); L("arr") = QuickSortPartition(L("arr"), L("pivot")+1, L("high")); # Return concatenated array # L("sorted") = L("left_arr"); # L("sorted")[arraylength(L("sorted"))] = L("pivot"); # return ConcatArrays(L("sorted"), L("right_arr")); } return L("arr"); } # Naive tokenization function GetTokens(L("str")) { L("new_str"); L("str") = strsubst(L("str"), "n't", " not"); L("str") = strclean(L("str")); L("i") = 0; while (L("i") < strlength(L("str"))) { L("char") = strchar(L("str"), L("i")); L("char_to_add"); if (strisalpha(L("char")) || L("char") == " ") { L("char_to_add") = L("char"); } else { L("char_to_add") = " "; } if (!L("new_str")) { L("new_str") = L("char_to_add"); } else{ L("new_str") = L("new_str") + L("char_to_add"); } L("i")++; } L("tokens") = split(L("new_str"), " "); L("return"); L("j") = 0; "debug.txt" << arraylength(L("tokens")) << "\n"; "debug.txt" << L("new_str") << "\n"; "debug.txt" << L("str") << "\n"; while (L("j") < arraylength(L("tokens"))) { L("this_str") = L("tokens")[L("j")]; if (L("this_str")) { L("this_str") = strclean(L("this_str")); L("this_str") = strtolower(L("this_str")); if ((strlength(L("this_str")) > 2) && (spellword(L("this_str")))) { L("return")[arraylength(L("return"))] = stem(L("this_str")); } } L("j")++; } return L("return"); } # GetUniqueWords # Args: # L("str"): Input string to get words from # # # Should take parsetree node as well # GetUniqueStrWords(L("str")) { # # Assumes non-tokenized string # L("str") # if (L("vocab")) { # for L("") # } # # } # Binary Search # Args # arr: sorted array in which to search BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")) { "binary.txt" << "low: " << L("low") << " high: " << L("high") << "\n"; if (L("low") > L("high")) { return -1; } L("mid") = (L("high") + L("low")) / 2; "binary.txt" << "mid: " << L("mid") << "\n"; if (L("arr")[L("mid")] == L("val")) { return L("mid"); } else if (L("val") < L("arr")[L("mid")]) { L("high") = L("mid") - 1; return BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")); } else { L("low") = L("mid") + 1; return BinarySearchPartition(L("arr"), L("low"), L("high"), L("val")); } } # Binary Search # Args # arr: sorted array to search # val: value to search for # # Returns # integer: -1 if not in array, otherwise idx in array BinarySearch(L("arr"), L("val")) { L("len") = arraylength(L("arr")); L("low") = 0; if (L("len") == 1) { if (L("arr")[L("low")] == L("val")) { return 0; } else { return -1; } } return BinarySearchPartition(L("arr"), L("low"), L("len")-1, L("val")); } # FilterDuplicates # Returns list as a set of unique elements. # Quicksort, then iterate. C # Averge time complexity O(Nlog(N)) # Build a hash function later for O(N) performance # Args # arr: array to sort # Returns # sorted array of unique values in list FilterDuplicates(L("arr"), L("sorted")) { L("len") = arraylength(L("arr")); if (L("len") <= 1) { return L("arr"); } L("i") = 0; # Sort list if (!L("sorted")) { L("sorted_arr") = QuickSort(L("arr")); } else { L("sorted_arr") = L("arr"); } L("set_idx") = 0; L("set"); while (L("i") < L("len")) { L("curr_item") = L("sorted_arr")[L("i")]; L("i")++; # Move i to next unique item while (L("sorted_arr")[L("i")] == L("curr_item")) { L("i")++; } if (!L("set_idx")){ L("set") = L("curr_item"); } else { L("set")[L("set_idx")] = L("curr_item"); } L("set_idx")++; } return L("set"); } # Take KB string values and convert to array # Args # first_val: first value in kb value list StrValsToArray(L("first_val")) { L("arr"); L("arr_idx") = 0; while (L("first_val")) { L("arr")[L("arr_idx")] = getstrval(L("first_val")); L("arr_idx")++; L("first_val") = nextval(L("first_val")); } return L("arr"); } @@DECL
@CODE # Traverse rare split of eui_to_codes concept, saving sum total # and adding eui/count to kb G("rare_eui_sum") = 0; G("rare_eui_exp_sum") = 0; L("iter") = down(G("eui_to_codes_rare")); while (L("iter")) { # Get all codes for eui L("codes") = findvals(L("iter"), "codes"); # Create concept for code in P_eui L("eui_con") = makeconcept(G("P_eui_rare"), conceptname(L("iter"))); # Get number of codes in codes attribute L("num_codes") = ValsLength(L("codes")); # Add to sum total G("rare_eui_sum") = G("rare_eui_sum") + L("num_codes"); # Add to exp total (softmax denominator) G("rare_eui_exp_sum") = flt(G("rare_eui_exp_sum")) + exp(L("num_codes")); # Add to numval addnumval(L("eui_con"), "codes", L("num_codes")); # Advance iterator to sibling concept L("iter") = next(L("iter")); } "p_rare.txt" << "\nTotal sum: " << G("rare_eui_sum") << "\n"; "p_rare.txt" << "Total exp sum: " << G("rare_eui_exp_sum") << "\n\n"; # Calulate softmax P L("iter") = down(G("P_eui_rare")); while (L("iter")) { # Get codes attribute L("frequency") = numval(L("iter"), "codes"); # Calculate exp(eui) / (sum to j [exp(eui_j)]) L("exp_n") = flt(exp(L("frequency"))) L("p") = L("exp_n") / flt(G("rare_eui_exp_sum")); "p_rare.txt" << conceptname(L("iter")) << ": " << L("exp_n") << " / total = " << L("p") << "\n"; # Add p as attribute under eui addstrval(L("iter"), "p", str(L("p"))); # Advance iterator to next sibling L("iter") = next(L("iter")); } @@CODE
# @MULTI _section _sentence _subsection # @POST # L("pn_iter") = N(1); # L("wrote") = 0; # while (L("pn_iter")) { # L("bases") = pnvar(L("pn_iter"), "bases") # if (L("bases")) { # if (strcontains("|",L("bases"))) { # L("all_bases") = split(L("bases"), "|"); # AddUniqueCon(G("note_euis"), pnname(L("pn_iter"))); # "words.txt" << pnname(L("pn_iter")) << ":\n\t" << L("bases") << "\n"; # } # AddUniqueCon(G("note_words"), pnname(L("pn_iter"))); # "words.txt" << pnname(L("pn_iter")) << ":\n\t" << L("bases") << "\n"; # L("wrote") = 1; # } # L("pn_iter") = pnnext(L("pn_iter")); # } # @RULES # _xNIL <- # _xANY # _xWILD [fails=(_xEND)] # _xEND # @@
# Split a string into pieces using a separator character @CODE L("arr") = split("ab|cd|efg", "|"); "output.txt" << L("arr") << "\n"; "output.txt" << L("arr")[2] << "\n"; prints out: ab cd efg efg
@CODE L("hello") = 0; @@CODE #@PATH _ROOT _TEXTZONE _sent @NODES _sent # ^ alpha , @PRE <2,2> var("adv"); <2,2> varz("inc100 ^-alpha-comma"); @POST alphatoadv(2); N("inc100 ^-alpha-comma",2) = 1; @RULES _xNIL <- _xSTART _xALPHA \, [lookahead] @@ # TOO BROAD. Could be imperative etc. # # ^ alpha #@PRE #<2,2> varz("inc #@POST # if (plural(N(2))) # { # if (N("verb",2)) # { # # Clear out verb. # N("verb",2) = 0; # --N("pos num",2); # alphaunambigred(2); # } # } #@RULES #_xNIL <- # _xSTART # _xALPHA # @@ # ^ alpha that @CHECK if (!N("verb",2)) fail(); @POST L("tmp2") = N(2); group(2,2,"_verb"); pncopyvars(L("tmp2"),N(2)); # Todo: reduce to vg. @RULES _xNIL <- _xSTART _xALPHA that [s lookahead] @@ # ^ np that alpha # ^ noun that alpha @PRE <4,4> var("verb"); @CHECK if (!vconjq(N(4),"inf") && !vconjq(N(4),"-s")) fail(); @POST alphatoverb(4,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_np _noun)] that [s] _xALPHA @@ # ^ verb that verb @CHECK if (N("mypos",3)) fail(); if (!vconjq(N(2),"-ing")) fail(); @POST chpos(N(3),"DT"); # that/DT. nountonp(3,1); N("number",3) = "singular"; @RULES _xNIL <- _xSTART _xWILD [one match=(_verb _vg)] that [s except=(_np _det)] _xWILD [one lookahead match=(_modal _verb _vg)] @@ # ^ verb that alpha prep @CHECK if (!N("noun",4)) fail(); @POST L("tmp4") = N(4); group(4,4,"_noun"); pncopyvars(L("tmp4"),N(4)); if (!N("mypos",3)) chpos(N(3),"DT"); # that/DT @RULES _xNIL <- _xSTART _xWILD [one match=(_verb _vg)] that [s] _xALPHA _xWILD [one lookahead match=(_prep)] @@ # ^ that noun @CHECK if (N("mypos",2)) fail(); @POST if (singular(N(4))) { chpos(N(2),"DT"); # that/DT. pnrename(N(2),"_det"); } else chpos(N(2),"IN"); # that/IN. @RULES _xNIL <- _xSTART that [s except=(_det)] _xWILD [star match=(_adv _advl)] _xWILD [one match=(_noun)] @@ # ^ that alpha noun @CHECK if (!N("verb",3)) fail(); S("ed") = vconjq(N(3),"-ed"); if (!S("ed") && !vconjq(N(3),"-s")) fail(); @POST if (!N("mypos",2)) chpos(N(2),"DT"); # that/DT pnrename(N(2),"_det"); L("tmp3") = N(3); group(3,3,"_verb"); pncopyvars(L("tmp3"),N(3)); if (S("ed")) chpos(N(3),"VBD"); else chpos(N(3),"VBZ"); @RULES _xNIL <- _xSTART that [s] _xALPHA _noun [lookahead] @@ # ^ that alpha @PRE <2,2> varz("mypos"); # 05/04/07 AM. @CHECK if (!N("noun",3) && !N("adj",3)) fail(); if (N("verb",3)) { if (!vconjq(N(3),"inf")) fail(); } @POST if (!N("mypos",2)) chpos(N(2),"DT"); # that/DT pnrename(N(2),"_det"); @RULES _xNIL <- _xSTART that [s] _xALPHA @@ # ^ fnword det alpha alpha # ^ fnword dqa alpha alpha # NIBBLE. @POST # Need agreement, plurality checks. if (N("noun",6) && N("adj",6)) { if (plural(N(6))) alphatonoun(6); else if (N("noun",7) || N("adj",7)) alphatoadj(6); else fixnphead(6); } else if (N("noun",6)) alphatonoun(6); else if (N("adj",6)) alphatoadj(6); else if (N("adv",6)) alphatoadv(6); else alphatoadj(6); # Probly some verby form. @RULES _xNIL <- _xSTART _xWILD [one match=(_fnword)] _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _xALPHA _xALPHA @@ # ^ noun modal alpha @CHECK if (!N("verb",6)) fail(); if (!vconjq(N(6),"inf")) fail(); @POST L("tmp6") = N(6); group(6,6,"_verb"); pncopyvars(L("tmp6"),N(6)); @RULES _xNIL <- _xSTART _xWILD [one match=(_pro _noun _np)] _xWILD [star match=(_adv _advl)] _modal _xWILD [star match=(_adv _advl)] _xALPHA _xWILD [star lookahead match=(_adv _advl)] _xWILD [one match=(_noun _np _pro)] @@ # ^ noun alpha that # ^ pro alpha that # that @PRE <4,4> var("verb"); @POST alphatovg(4,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_pro _noun _np)] _xWILD [star match=(_adv _advl)] _xALPHA _xWILD [star lookahead match=(_adv _advl)] _xWILD [s one match=(that)] @@ # ^ dqan alpha that @PRE <7,7> var("verb"); @POST alphatovg(7,"active","VBP"); @RULES _xNIL <- _xSTART ### (1) _xWILD [star match=(_det _pro)] ### (2) _xWILD [star match=(_quan _num _xNUM)] ### (3) _adj [star] ### (4) _noun [plus] ### (5) _xWILD [star match=(_adv _advl)] ### (6) _xALPHA ### (7) _xWILD [star lookahead match=(_adv _advl)] ### (8) that [s one] ### (9) @@ # ^ pro alpha @CHECK if (!N("verb",3)) fail(); @POST alphatoverb(3,"active","VBP"); @RULES _xNIL <- _xSTART _proSubj [s] _xALPHA _xWILD [one lookahead fail=(_modal _verb _vg)] @@ # ^ noun alpha @PRE <4,4> var("verb"); @CHECK if (!plural(N(2))) fail(); if (!vconjq(N(4),"inf")) fail(); @POST alphatovg(4,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_noun _np)] _adv [star] _xALPHA @@ # ^ np alpha alpha @PRE <4,4> var("verb"); @CHECK if (pnname(N(2)) == "_pro" && !N("prosubj",2)) fail(); if (vconjq(N(4),"-ing") || vconjq(N(4),"-en")) fail(); if (singular(N(2)) && vconjq(N(4),"inf")) fail(); if (plural(N(2)) && vconjq(N(4),"-s")) fail(); @POST alphatovg(4,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_noun _np _pro)] _adv [plus] _xALPHA @@ # ^ dqan alpha dqan @PRE <6,6> var("verb"); @POST # todo: Check agreement. alphatovg(6,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xALPHA _xWILD [one lookahead match=(_det _pro _quan _num _xNUM \, )] @@ # ^ dqa alpha , # Apposition @PRE <5,5> var("noun"); @POST fixnphead(5); @RULES _xNIL <- _xSTART _xWILD [star match=(_det)] ### (2) _xWILD [star match=(_quan _num _xNUM)] ### (3) _adj [star] ### (4) _xALPHA \, @@ # ^ dqa alpha noun @PRE <5,5> var("noun"); <6,6> vareq("sem","person"); @CHECK if (!N(2) && !N(3) && !N(4)) fail(); @POST fixnphead(5); dqaninfo(2,3,4,5); groupnp(); @RULES _xNIL <- _xSTART _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _xALPHA _noun @@ # ^ dqan alpha as verb # Ex: People regarded as being weird.... @PRE <6,6> var("verb"); @CHECK if (!vconjq(N(6),"-en")) fail(); @POST alphatovg(6,"passive","VBN"); N("ellipted-that-is",6) = 1; # L("tmp6") = N(6); # group(6,6,"_clause"); # setunsealed(6,"true"); # # N("voice",6) = "passive"; # N("vg node",6) = L("tmp6"); chpos(N(7),"IN"); # as/IN pnrename(N(7),"_fnword"); # prep -> fnword @RULES _xNIL <- _xSTART _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xALPHA as [s] _xWILD [one match=(_verb _vg)] @@ # ^ dqan alpha prep @CHECK if (!N("verb",6)) fail(); if (phrprepverbq(N(6),N(7))) succeed(); if (N("adj",6) || N("noun",6)) fail(); # Agreement... if (vconjq(N(6),"-ing")) fail(); @POST alphatovg(6,0,0); @RULES _xNIL <- _xSTART _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] _xALPHA _xWILD [one lookahead match=(_prep)] @@ # ^ dqan alpha alpha dqan [idiom: let stand] # eg: court let stand a ruling # let # stand @POST alphatoverb(7,"active","VB"); # Cleanup. # 05/28/07 AM. alphatoverb(6,"active","VBD"); # Cleanup. # 05/28/07 AM. group(6,7,"_vg"); N("voice",6) = "active"; @RULES _xNIL <- _xSTART _xWILD [star match=(_det _pro)] _xWILD [star match=(_quan _num _xNUM)] _adj [star] _noun [plus] let stand _xWILD [one lookahead match=(_det _pro _quan _num _xNUM _adj _noun _np _adv _advl)] @@ # ^ dqan alpha adv alpha @PRE <4,4> var("noun"); <6,6> var("verb"); @CHECK if (!plural(N(4))) fail(); if (!vconjq(N(6),"inf")) fail(); @POST alphatoverb(6,"active","VBP"); fixnphead(4); @RULES _xNIL <- _xSTART _adj _noun _xALPHA _adv _xALPHA _xWILD [one lookahead match=(_fnword)] @@ # ^ dqan alpha @CHECK if (N("verb",4)) fail(); if (!N("noun",4) && !N("adj",4)) fail(); if (singular(N(3)) && singular(N(4))) succeed(); fail(); @POST alphatonoun(4); @RULES _xNIL <- _xSTART _det [opt] _noun _xALPHA @@ # ^ dqan alpha alpha noun @PRE <6,6> var("verb"); @CHECK if (!vconjq(N(6),"-ed")) fail(); if (!N("noun",6) && !N("adj",6)) fail(); @POST alphatovg(6,"active","VBD"); @RULES _xNIL <- _xSTART ### (1) _xWILD [star match=(_det _pro)] ### (2) _xWILD [star match=(_quan _num _xNUM)] ### (3) _adj [star] ### (4) _noun [plus] ### (5) _xALPHA ### (6) _xALPHA ### (7) @@ # Special cases. Can't resolve yet. @POST noop(); @RULES _xNIL <- _xSTART _xWILD [one match=(following)] @@ # ^ alpha det @PRE <2,2> varz("inc100 a-det"); @POST L("done") = 0; if (!L("done") && N("verb",2)) { # Presumably gerund tagging will dominate in Treebank. if (vconjq(N(2),"-ing")) # Gerund case. { alphatovg(2,"active","VBG"); # Conform Treebank. L("done") = 1; } else if (vconjq(N(2),"inf")) { alphatovg(2,"active","VB"); L("done") = 1; } } if (!L("done") && N("adv",2)) { alphatoadv(2); L("done") = 1; } if (!L("done") && N("adj",2)) { alphatoadj(2); L("done") = 1; } N("inc100 a-det",2) = 1; @RULES _xNIL <- _xSTART _xALPHA _xWILD [one lookahead match=(_det)] @@ # ^ alpha verb @PRE <2,2> varz("inc100 av"); @CHECK if (!N("noun",2) && !N("unknown",2)) fail(); @POST if (singular(N(2)) && !N("mass",2) && !N("mypos",2)) cappos(N(2),0); fixnphead(2); N("inc100 av",2) = 1; # Loop guard. @RULES _xNIL <- _xSTART _xALPHA _xWILD [star lookahead match=(_adv _advl)] _xWILD [one match=(_verb _vg _modal)] @@ # ^ alpha , pro @CHECK if (!N("adv",2)) fail(); @POST alphatoadv(2); @RULES _xNIL <- _xSTART _xALPHA \, [lookahead] _pro @@ # ^ alpha alpha verb @PRE <2,3> var("noun"); # Require possible nouns. @POST fixnphead(3); fixnpnonhead(2); @RULES _xNIL <- _xSTART _xALPHA _xALPHA _xWILD [one lookahead match=(_modal _verb _vg)] @@ # ^ alpha alpha , @PRE <3,3> var("noun"); # Require possible noun. @CHECK if (!N("adj",2) && !N("noun",2)) fail(); if (N("verb",3)) { if (!vconjq(N(3),"inf") && !vconjq(N(3),"-s")) fail(); } @POST fixnphead(3); fixnpnonhead(2); @RULES _xNIL <- _xSTART _xALPHA _xALPHA _xWILD [one lookahead match=( \, )] @@ # ^ alpha alpha pro @PRE <2,3> var("noun"); # Require possible nouns. @CHECK if (!singular(N(2)) || N("unknown",2)) fail(); # Todo: not proper name. @POST fixnphead(3); fixnpnonhead(2); @RULES _xNIL <- _xSTART _xALPHA _xALPHA _xWILD [s one lookahead match=( himself herself itself themselves ourselves yourselves _noun _verb _vg _adv )] @@ # ^ alpha alpha prep # Note: ellipsis. "people happy with ..." @CHECK if (N("inc100 a-a-prep",2)) fail(); @POST N("inc100 a-a-prep",2) = 1; if (N("noun",2)) { if (plural(N(2))) { if (N("verb",3)) { alphatoverb(3,"active","VBP"); fixnphead(2); } else if (N("adj",3)) { alphatoadj(3); fixnphead(2); } } } @RULES _xNIL <- _xSTART _xALPHA _xALPHA _xWILD [one lookahead match=(_prep)] @@ # ^ alpha alpha adj @CHECK if (!N("verb",3) || !N("noun",2)) fail(); if (!vconjq(N(3),"-ed")) fail(); @POST alphatoverb(3,"active","VBP"); if (singular(N(2)) && !N("mass",2)) cappos(N(2),0); fixnphead(2); @RULES _xNIL <- _xSTART _xALPHA _xALPHA _xWILD [star lookahead match=(_adv _advl)] _xWILD [one match=(_det _quan _num _xNUM _adj _np)] @@ # question patterns. # why are alpha @POST if (N("noun",4)) { if (plural(N(4))) fixnphead(4); else fixnpnonhead(4); } else if (N("adj",4)) fixnpnonhead(4); else if (N("adv",4)) { L("tmp4") = N(4); group(4,4,"_adv"); pncopyvars(L("tmp4"),N(4)); fixadv(N(4)); } else # verb or otherwise. fixnpnonhead(4); @RULES _xNIL <- _xSTART _xWILD [s one match=(_whword)] are [s] _xALPHA @@ # ^ yet @CHECK if (N("mypos",2)) fail(); @POST chpos(N(2),"CC"); # yet/CC pnrename(N(2),"_adv"); @RULES _xNIL <- _xSTART yet [s] @@ # ^ adj alpha alpha [NIBBLE] @CHECK if (!N("adj",3) && !N("noun",3)) fail(); @POST if (singular(N(3))) fixnpnonhead(3); else fixnphead(3); @RULES _xNIL <- _xSTART _adj _xALPHA _xALPHA @@ # ^ noun 's alpha @CHECK if (!N("adj",4)) fail(); @POST alphatoadj(4); pnrename(N(3),"_verb"); chpos(N(3),"VBZ"); # 's/VBZ N("copula",3) = 1; @RULES _xNIL <- _xSTART _noun _aposS _xALPHA _xWILD [one lookahead match=(_dbldash \, \; _qEOS)] @@ # ^ noun verb noun , alpha noun @PRE <6,6> var("verb"); @CHECK L("v") = N("verb node",3); S("c") = samevconj(L("v"),N(6)); if (!S("c")) fail(); @POST alphatovg(6,"active","VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_noun _np)] _xWILD [one match=(_verb _vg)] _xWILD [one match=(_noun _np)] \, _xALPHA _xWILD [one lookahead match=(_noun _np _det _quan _num _xNUM _adj _xALPHA)] @@ # to alpha alpha dqan @PRE <2,2> var("adv"); <3,3> var("verb"); @CHECK if (!vconjq(N(3),"inf")) fail(); @POST alphatovg(3,"active","VB"); alphatoadv(2); @RULES _xNIL <- to [s] _xALPHA _xALPHA _xWILD [one lookahead match=(_det _pro _quan _num _xNUM _np)] @@ # ^ np , dqan alpha , @PRE <6,6> var("verb"); @CHECK if (plural(N(5)) && vconjq(N(6),"-s")) fail(); if (singular(N(5)) && vconjq(N(6),"inf")) fail(); if (!verbfeat(N(6),"T5")) fail(); @POST alphatovg(6,0,"VBP"); @RULES _xNIL <- _xSTART _xWILD [one match=(_np _noun)] \, _det _noun _xALPHA \, [lookahead] @@ # ^ alpha num @PRE <2,2> var("verb"); @CHECK if (!vconjq(N(2),"inf")) fail(); @POST alphatovg(2,"active","VB"); @RULES _xNIL <- _xSTART _xALPHA _xWILD [one lookahead match=(_quan _num _xNUM)] @@ # ^ and then @PRE <3,3> vareq("stem","then"); @POST group(2,3,"_adv"); @RULES _xNIL <- _xSTART _conj _fnword [opt] @@ # ^ before alpha # ^ before ving @PRE <3,3> var("verb"); @CHECK if (!vconjq(N(3),"-ing")) fail(); @POST alphatovg(3,"active","VBG"); @RULES _xNIL <- _xSTART before [s] _xALPHA @@ # Sentinel. @POST # When done matching rules against starts, # finish with current segment. # NLP++ opt: would be nice to have a way to exit the # current dominating context (ie, the current _sent). # something like an xcontinue() or xnext() function. noop(); @RULES _xNIL <- _xANY [plus] @@
@NODES _LINE @POST S("header") = N("$text",2); X("header") = 1; S("count") = strlength(N("$text",1)); "headers.txt" << SpacesStr(S("count")) << S("count") << " " << S("header") << "\n"; if (S("count") > G("max header")) G("max header") = S("count"); single(); @RULES _header <- _xWILD [min=2 match=(\=)] ### (1) _xWILD [plus fail=(\=)] ### (2) _xWILD [min=2 match=(\=)] ### (3) @@
@NODES _ROOT @POST AddUniqueStr(G("alphaNumeric"), "terms", N("$text", 1) + N("$text", 2)); single(); @RULES _alphaNum <- _xALPHA ### (1) _xNUM ### (2) @@
@PATH _ROOT @POST S("ref") = N("ref",1); "bib.txt" << "Ref: " << S("ref") << "\n"; single(); @RULES _bibItem <- _bibRef ### (1) _xWILD [fail=(_bibRef _endBib)] ### (2) @@
# Create two children concepts then delete them both. Then create them again and deletes one by name: if(findconcept(findroot(),"a concept")) rmconcept(findconcept(findroot(),"a concept")); G("aParent")= makeconcept(findroot(),"a concept") G("aChild") = makeconcept(G("aParent"),"a child"); G("aChild") = makeconcept(G("aParent"),"a sibling"); rmchildren(G("aParent")); G("aChild") = makeconcept(G("aParent"),"a child"); G("aChild") = makeconcept(G("aParent"),"a sibling"); rmchild(G("aParent"),"a sibling");
@CODE DictionaryClear(); DictionaryWord("intact","nominal", "true", "str"); DictionaryEnd(); @@CODE
@NODES _ROOT @RULES _paragraph [unsealed] <- _sentence [s plus] ### (1) _xWILD [s matches=(_paragraphSeparator _xEND)] ### (2) @@
@NODES _ROOT @PRE <2,2> uppercase(); @POST S("header") = N("$text", 2); singler(2,3); @RULES _zoneHeader <- _xWILD [one matches=(\n \r _header)] ### (1) _xALPHA [plus] ### (2) _xWILD [one matches=(\n \r)] ### (3) @@
# Find entire path of dictionary concept for the given string. (If not present, don't add the word.) @CODE "output.txt" << "1 " << conceptname(addword("hello")) << "\n"; "output.txt" << "2 " << conceptname(wordindex("hello")) << "\n"; "output.txt" << "3 " << findwordpath("hello") << "\n"; "output.txt" << "4 " << findwordpath("olleh") << "\n"; "output.txt" << "5 " << wordpath("foobaz") << "\n"; "output.txt" << "6 " << conceptname(dictfindword("hello")) << \n"; rmword("foobaz"); Prints out: 1 hello 2 he 3 "concept" "sys" "dict" "a" "h" "he" "hello" 4 5 "concept" "sys" "dict" "a" "f" "fo" "foobaz" 6 hello
@NODES _paragraph @POST singler(2,2); @RULES _endSent <- _xWILD [s one match=(_xALPHA _xNUM)] ### (1) _xWILD [one trig match=(\. \? \!)] ### (2) _xWILD [one match=(_xWHITE _xALPHA _xNUM _xEND _xCTRL)] ### (3) @@
@NODES _LINE @POST S("con") = N("$text",2); X("con") = N("$text",2); single(); @RULES _con <- \" ### (1) _xWILD [fail=(\")] ### (2) \" ### (3) @@
@CODE DisplayKB(G("resumes"),1); @@CODE
@NODES _LINE @POST L("node") = N(1); L("last") = 0; L("final") = 0; "number.txt" << phrasetext() << "\n"; while (L("node")) { L("name") = pnname(L("node")); if (L("name") != "," && L("name") != "-" && L("name") != "and" && L("name") != "&") { L("num") = num(pnvar(L("node"),"numeric")); L("power") = num(pnvar(L("node"),"power")); "number.txt" << pnname(L("node")) << " " << L("num") << " " << L("last") << " => "; if (L("last power") <= 1 && L("power") == 1) { L("last") = L("last") * 100 + L("num"); } else if (L("power") <= 2) { if (L("num") > L("last")) { if (L("last")) L("last") = L("last") * L("num"); else L("last") = L("num"); } else { L("last") = L("last") + L("num"); } "number.txt" << L("last"); } else { L("final") = L("final") + L("last") * L("num"); "number.txt" << "final = " << L("final") << " "; L("last") = 0; } "number.txt" << "\n"; L("last power") = L("power"); } L("node") = pnnext(L("node")); } L("final") = L("final") + L("last"); "number.txt" << "=====> " << L("final") << "\n\n"; S("numeric") = L("final"); single(); @RULES _number <- _xWILD [plus match=(_number \- \, and \&)] ### (1) @@
@NODES _term @POST G("root") = makeconcept(G("radlex"), N("$text", 1)); exitpass(); @RULES _xNIL <- _xWILD [fail=(\t \n \r)] ### (1) ### (2) _xWILD [one matches=(\n \r)] ### (3) @@
# Rename an attribute named oldAttrNameStr to newAttrNameStr belonging to the concept aConcept if(findconcept(findroot(),"apples")) rmconcept(findconcept(findroot(),"apples")); G("apples") = makeconcept(findroot(),"apples"); addstrval(G("apples"),"have","seeds"); renameconcept(G("apples"),"fruit"); "output.txt" << conceptname(G("apples")) << "\n"; renameattr(G("apples"),"have","are"); "output.txt" << attrname(findattr(G("apples"),"are")) << "\n"; "output.txt" << strval(G("apples"),"are") << "\n"; # rmconcept(G("apples")); This code prints out: fruit are seeds
@NODES _ROOT @PRE <3,3> var("lang"); <5,5> var("pos"); @RULES _lang <- \{ ### (1) \{ ### (2) _xALPHA ### (3) \- ### (4) _xALPHA ### (5) _xWILD [fail=(\})] ### (6) \} ### (7) \} ### (8) @@ @RULES _curly <- \{ ### (1) \{ ### (2) _xWILD [fail=(\})] ### (3) \} ### (4) \} ### (5) @@ @POST excise(1,2); @RULES _xNIL <- \[ \[ @@ @POST excise(1,2); @RULES _xNIL <- \] \] @@ @POST excise(1,2); @RULES _xNIL <- \( \( @@ @POST excise(1,2); @RULES _xNIL <- \) \) @@ @POST excise(1,2); @RULES _xNIL <- \< \< @@ @POST excise(1,2); @RULES _xNIL <- \> \> @@ @POST excise(1,2); @RULES _xNIL <- \> \> @@ @POST excise(1,3); @RULES _xNIL <- \' \' \' [opt] @@
# Check if first char of string1 is uppercase @CHECK if (strisupper(N("$text",1)))  fail(); @RULES _modal <- will @@
@NODES _LINE @POST X("code",2) = strtolower(N("$text",2)); single(); @RULES _code <- \[ ### (1) _xALPHA ### (2) \] ### (3) @@
@MULTI _ROOT @PRE <1,1> var("key_words"); @POST "pass_25.txt" << N("key_words", 1) << "\n"; extractCodeRanks(N("key_words", 1), "diagnoses"); extractCodeRanks(N("key_words", 1), "procedures"); @RULES _xNIL <- _xWILD [one matches=( _section _subsection _sentence _looseText _item)] @@
@CODE # Get our eui to codes mapping G("eui_to_codes") = getconcept(findroot(),"eui_to_codes"); G("eui_to_codes_top") = getconcept(G("eui_to_codes"),"top"); G("eui_to_codes_rare") = getconcept(G("eui_to_codes"),"rare"); DisplayKB(findroot(), 2); # Check whether P_eui concept exists, if not, create it. G("P_eui") = getconcept(findroot(),"P_eui"); if (!G("P_eui")) { G("P_eui") = makeconcept(findroot(), "P_eui"); } rmchildren(G("P_eui")); # Make concept for top split of P_eui G("P_eui_top") = getconcept(G("P_eui"),"top"); if (!G("P_eui_top")) { G("P_eui_top") = makeconcept(G("P_eui"), "top"); } rmchildren(G("P_eui_top")); # Make concept for rare split of P_eui G("P_eui_rare") = getconcept(G("P_eui"),"rare"); if (!G("P_eui_rare")) { G("P_eui_rare") = makeconcept(G("P_eui"), "rare"); } rmchildren(G("P_eui_rare")); @@CODE
@NODES _ROOT @RULES _patientID <- \[ ### (1) \* ### (2) \* ### (3) _xWILD [fails=(\*)] ### (4) \* ### (5) \* ### (6) \] ### (7) @@ @RULES _time <- _xNUM \: _xNUM \: [opt] _xNUM [opt] \_ [opt] _xWILD [opt match=(am pm AM PM)] @@ @POST excise(6,6); excise(4,4); excise(2,2); single(); # Initialism @RULES _init <- _xWILD [one matches=(_xNUM _xALPHA)] ### (1) \. ### (2) _xWILD [one matches=(_xNUM _xALPHA)] ### (3) \. ### (4) _xWILD [opt matches=(_xNUM _xALPHA)] ### (5) \. [opt] ### (6) @@ @POST excise(2,2); singler(1,1); @RULES _init <- # Add abbreviations here, in the form dr., jr., etc _xWILD [one match=(Dr DR dr q etc)] \. _xWILD [one matches=(\_ \, \:)] @@
@CODE L("hello") = 0; @@CODE #@PATH _ROOT _TEXTZONE _tok @NODES _tok # Treebank note: JJ is the default, since alpha-alpha # nonheads of nps are about 77% adjective. @POST X("hyphenated") = 1; X("pos_np") = "JJ"; # Default. # 06/07/06 AM. if (strisupper(N("$text",2)) || strisupper(N("$text",4))) X("cap") = 1; xrename("_adj"); chpos(X(),"JJ"); @RULES _xNIL <- _xSTART short \- term _xEND @@ _xNIL <- _xSTART worst \- case _xEND @@ _xNIL <- _xSTART pro \- forma _xEND @@ _xNIL <- _xSTART pre \- _xWILD [one match=(_xALPHA _xNUM)] _xEND @@ _xNIL <- _xSTART _xALPHA # run, drag \- down _xEND @@ _xNIL <- _xSTART _xALPHA \- free _xEND @@ # Treebank note: JJ is the default, since alpha-alpha # nonheads of nps are about 77% adjective. @POST X("hyphenated") = 1; X("pos_np") = "NN"; if (strisupper(N("$text",2)) || strisupper(N("$text",4))) X("cap") = 1; xrename("_adj"); chpos(X(),"NN"); @RULES _xNIL <- # NN/JJ 15/5. _xSTART high \- technology _xEND @@ # buy-out # buy - out _xNIL <- # NN/JJ 499/25 _xSTART buy \- out _xEND @@ _xNIL <- # NN/JJ 499/25 _xSTART stock \- index _xEND @@ _xNIL <- _xSTART twin \- jet _xEND @@ _xNIL <- _xSTART close \- up _xEND @@ _xNIL <- _xSTART cop \- killer _xEND @@ @POST X("hyphenated") = 1; X("pos_np") = "NN"; if (strisupper(N("$text",2)) || strisupper(N("$text",4))) X("cap") = 1; xrename("_noun"); chpos(X(),"NNS"); @RULES _xNIL <- _xSTART mid \- _xNUM s _xEND @@ @POST X("hyphenated") = 1; X("pos_np") = "NN"; if (strisupper(N("$text",4))) X("cap") = 1; xrename("_noun"); chpos(X(),"NN"); @RULES _xNIL <- _xSTART t \- shirt _xEND @@ @POST X("hyphenated") = 1; X("pos_np") = "NNS"; if (strisupper(N("$text",4))) X("cap") = 1; xrename("_noun"); chpos(X(),"NNS"); @RULES _xNIL <- _xSTART t \- shirts _xEND @@ @PRE <2,2> length(4); <4,4> lengthr(1,4); @POST X("hyphenated") = 1; X("pos_np") = "CD"; xrename("_noun"); chpos(X(),"CD"); @RULES _xNIL <- _xSTART _xNUM \- _xNUM _xEND @@ @POST X("hyphenated") = 1; if (strisupper(N("$text",2)) || strisupper(N("$text",4))) X("cap") = 1; xrename("_noun"); if (singular(N(2))) { X("pos_np") = "NN"; chpos(X(),"NN"); } else { X("pos_np") = "NNS"; chpos(X(),"NNS"); } @RULES _xNIL <- _xSTART _xWILD [one match=(passer passers)] \- by _xEND @@ @POST X("idiom") = 1; xrename("_verb"); chpos(X(),"VB"); X("verb") = 1; X("pos num") = 1; X("inf") = 1; X("imperative") = 1; @RULES _xNIL <- _xSTART c \' mon _xEND @@ # Note: exclusively in "single-handedly". @POST N("adv") = 1; N("sem") = N("stem") = "handed"; N("pos num") = 1; N("pos") = "_adv"; if (N("unknown")) N("unknown") = 0; @RULES _xNIL <- handedly @@
@PATH _ROOT _experienceZone _experienceInstance _LINE # name regardless of position, etc. Should raise confidence here. # May want to collect confidence in the instance. @CHECK if ( !X("company name",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == -1 || N("tmp") == 1) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("company name",3) = N("$text"); @RULES _xNIL <- _company [s] @@ # Don't get cocky, Luke! @CHECK if ( !X("job title",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == -1 || N("tmp") == 1) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("job title") = N("$text"); @RULES _xNIL <- _jobTitle [s] @@ _xNIL <- _jobPhrase [s] @@ @CHECK if ( !X("country",3) && (N("tmp") = (X("lineno") - X("anchor lineno",3))) && (N("tmp") == -1 || N("tmp") == 1) # Within 2 lines of anchor. ) succeed(); fail(); @POST X("country",3) = N("$text"); @RULES _xNIL <- _country [s] @@
@NODES _LINE @POST S("str") = N("str"); S("str") = 1; excise(3,3); excise(1,1); single(); @RULES _item <- \" ### (1) _xWILD [plus fail=(\")] ### (2) \" ### (3) @@
@CODE L("hello") = 0; @@CODE @NODES _TEXTZONE @POST ++X("line count",2); splice(1,1); @RULES _xNIL <- _LINE @@
# Format the current date and time as a string @CODE "output.txt" << today() << "\n"; @@CODE
@NODES _ROOT @POST G("file") << N("$text") << "\n"; @RULES _xNIL <- _LINE @@
@CODE G("word dict") = G("$kbpath") + "vocab.dict"; L("debugpath") = G("$kbpath") + "debug.txt"; @@CODE
@NODES _ROOT @PRE <1,1> var("negation"); @POST if (N("negation")=="CONJ") { pnrename(N(1), "_conj"); } else { pnrename(N(1), "_negation"); } @RULES _xNIL <- _phrase ### (1) @@ @PRE <1,1> var("negation"); @POST S("negation") = pnvar(N(1), "negation"); single(); @RULES _negation <- _xALPHA [one] ### (1) @@
@NODES _ROOT @POST "verb.txt" << N("$text",4) << "\n"; single(); @RULES _header <- \< ### (1) h2 ### (2) \> ### (3) _xALPHA ### (4) Conjugation ### (5) \: ### (6) Present ### (7) Tense ### (8) \< ### (9) \/ ### (10) h2 ### (11) \> ### (12) @@ @RULES _tbodyStart <- \< ### (1) tbody ### (2) \> ### (3) @@ @RULES _tbodyEnd <- \< ### (1) \/ ### (2) tbody ### (3) \> ### (4) @@
@CODE fileout("skills.txt"); #varstrs("skills") @@CODE @NODES _LINE @POST addstrs("skills",1); prrange("skills.txt",1,1); prlit("skills.txt","\n"); @RULES _xNIL <- _skill [s] @@
@NODES _ROOT @POST S("header") = N("header",1); S("count") = 5; single(); @RULES _headerZone <- _headerFive ### (1) _xWILD [plus fails=(_headerFive _headerFour _headerThree _headerTwo _xEND)] ### (2) @@
@NODES _LINE @POST singler(3,3); @RULES _rouge <- _space ### (1) _xWILD [plus fails=(_space)] ### (2) _space ### (3) @@
@NODES _LINE @RULES _email <- _xWILD [plus match=(_xALPHA _xNUM \_ \.)] ### (1) \@ ### (2) _xWILD [min=2 match=(_xALPHA _xNUM \.)] ### (3) @@
@CODE G("filepath") = G("$kbpath") + "\\" + "en-firstnames.dict"; L("type") = "app"; if (!G("$isdirrun") || G("$isfirstfile")) G("file") = openfile(G("filepath")); else G("file") = openfile(G("filepath"),"app"); @@CODE
@NODES _LINE @POST group(2,2,"_item"); @RULES _xNIL <- _xSTART ### (1) _comma ### (2) @@
@NODES _LINE @RULES # Ex: whose _posPRO [layer=(_funWORD )] <- _xWILD [min=1 max=1 s match=("whose" "me" "myself" "my" "mine" "we" "us" "ourselves" "our" "ours" "you" "yourself" "yourselves" "your" "yours" "he" "him" "himself" "his" "she" "her" "herself" "hers" "it" "itself" "its" "they" "them" "themselves" "their" "theirs" "who" "whom" "which" "that" "I" "everyone" "everybody" "every" "everything" "each" "all" "many" "much" "more" "most" "few" "fewer" "fewest" "little" "least" "several" "enough" "both" "one" "ones" "somebody" "someone" "either" "something" "anybody" "anyone" "anything" "any" "some" "no" "nobody" "neither" "none" "nothing")] @@
@CODE sortchilds(G("adjMatrixData")); L("i") = 0; L("con") = down(G("adjMatrixData")); while( L("con") ) { addnumval(L("con"), "index", L("i")); ++L("i"); L("con") = next(L("con")); } @@CODE
# Fetch numeric-value of attribute (must be first). L("return_num") = numval(L("con"), L("name"));
@CODE prlit( "edu.txt", "KEY= 0-none,1-major,2-minor,3-school,\n" ); prlit( "edu.txt", " 4-degree,5-grade,6-date\n\n"); prlit( "edu.txt", "LINE DATA IN EDUCATION ZONE\n" ); prlit( "edu.txt", "---------------------------\n" ); prlit( "edu.txt", " NEW TOT FIRST LAST MAJ MIN SCH DEG GPA DAT\n" ); prlit( "edu.txt", " num key key ord ord ord ord ord ord\n" ); prlit( "edu.txt", "---------------------------------------------\n" ); @@CODE @PATH _ROOT _educationZone # Print out relevant vars for each line that has parts. @CHECK #Ngt(1, "eduparts", 0) # More than zero education parts. if (N("eduparts",1) <= 0) fail(); @POST "edu.txt" << " " << rightjustifynum(N("instance",1),3) << " "; "edu.txt" << rightjustifynum(N("eduparts",1),3) << " "; "edu.txt" << rightjustifynum(N("first edupart",1),5) << " "; "edu.txt" << rightjustifynum(N("last edupart",1),4) << " "; "edu.txt" << rightjustifynum(N("Omajor",1), 3) << " "; "edu.txt" << rightjustifynum(N("Ominor",1), 3) << " "; "edu.txt" << rightjustifynum(N("Oschool",1),3) << " "; "edu.txt" << rightjustifynum(N("Odegree",1),3) << " "; "edu.txt" << rightjustifynum(N("Ograde",1), 3) << " "; "edu.txt" << rightjustifynum(N("Odate",1), 3) << "\n"; #noop(); @RULES _xNIL <- _LINE @@ # Make note of lines that have no eduparts. @CHECK # Neq(1, "eduparts", 0) if (!N("eduparts",1)) fail(); @POST prlit( "edu.txt", " - - -\n"); @RULES _xNIL <- _LINE @@ @POST prlit( "edu.txt", " blank line\n"); @RULES _xNIL <- _BLANKLINE @@
@NODES _ROOT @RULES _test <- _code ### (1) @@
@NODES _LINE @RULES # Ex: Extension\_ _phoneExtension <- _xWILD [min=1 max=1 s layer=("_extendWord") match=("Extension" "Ext" "Ex" "x")] _xWHITE [star s] _xNUM [s layer=("_extension")] @@ @PRE <1,1> cap(); @RULES # Ex: Ext.\_ _phoneExtension <- _xWILD [min=1 max=1 s layer=("_extendWord") match=("Ext" "Ex")] \. [s] _xWHITE [star s] _xNUM [s layer=("_extension")] @@
@CODE #DisplayKB(G("labels"), 1); DisplayKB(G("phrases"), 1); # Replace "labels.kb" with file path to store in different dir #kbdumptree(G("labels"), "labels.kb"); DumpKB(G("phrases"), "phrases"); DictionaryEnd(); @@CODE
@CODE DisplayKB(G("kb"),0); @@CODE
@CODE G("dictfile") = G("$kbpath") + "states.dict"; G("abbrevdict") = G("$kbpath") + "abbrev.dict"; G("kbbfile") = G("$kbpath") + "states.kbb"; G("citiesdict") = G("$kbpath") + "cities.dict"; G("debugfile") = G("$kbpath") + "debug.txt"; G("debug") = openfile(G("debugfile"),"app"); G("state name") = strsubst(G("$inputhead"),"-"," "); G("states") = getconcept(findroot(),"states"); if (G("$isfirstfile") || !G("$isdirrun")) { rmchildren(G("states")); L("type") = "w"; } else { L("type") = "app"; } G("state") = getconcept(G("states"),G("state name")); G("dict") = openfile(G("dictfile"),L("type")); G("kbb") = openfile(G("kbbfile"),L("type")); G("abbrev") = openfile(G("abbrevdict"),L("type")); G("cities") = openfile(G("citiesdict"),L("type")); @@CODE