file_id
int64
1
215k
content
stringlengths
7
454k
repo
stringlengths
6
113
path
stringlengths
6
251
214,416
package sdk.chat.core.utils; import java.util.HashMap; public class MimeTypesMap { public static final String MIME_APPLICATION_ANDREW_INSET = "application/andrew-inset"; public static final String MIME_APPLICATION_JSON = "application/json"; public static final String MIME_APPLICATION_ZIP = "application/zip"; public static final String MIME_APPLICATION_X_GZIP = "application/x-gzip"; public static final String MIME_APPLICATION_TGZ = "application/tgz"; public static final String MIME_APPLICATION_MSWORD = "application/msword"; public static final String MIME_APPLICATION_MSWORD_2007 = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"; public static final String MIME_APPLICATION_VND_TEXT = "application/vnd.oasis.opendocument.text"; public static final String MIME_APPLICATION_POSTSCRIPT = "application/postscript"; public static final String MIME_APPLICATION_PDF = "application/pdf"; public static final String MIME_APPLICATION_JNLP = "application/jnlp"; public static final String MIME_APPLICATION_MAC_BINHEX40 = "application/mac-binhex40"; public static final String MIME_APPLICATION_MAC_COMPACTPRO = "application/mac-compactpro"; public static final String MIME_APPLICATION_MATHML_XML = "application/mathml+xml"; public static final String MIME_APPLICATION_OCTET_STREAM = "application/octet-stream"; public static final String MIME_APPLICATION_ODA = "application/oda"; public static final String MIME_APPLICATION_RDF_XML = "application/rdf+xml"; public static final String MIME_APPLICATION_JAVA_ARCHIVE = "application/java-archive"; public static final String MIME_APPLICATION_RDF_SMIL = "application/smil"; public static final String MIME_APPLICATION_SRGS = "application/srgs"; public static final String MIME_APPLICATION_SRGS_XML = "application/srgs+xml"; public static final String MIME_APPLICATION_VND_MIF = "application/vnd.mif"; public static final String MIME_APPLICATION_VND_MSEXCEL = "application/vnd.ms-excel"; public static final String MIME_APPLICATION_VND_MSEXCEL_2007 = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"; public static final String MIME_APPLICATION_VND_SPREADSHEET = "application/vnd.oasis.opendocument.spreadsheet"; public static final String MIME_APPLICATION_VND_MSPOWERPOINT = "application/vnd.ms-powerpoint"; public static final String MIME_APPLICATION_VND_RNREALMEDIA = "application/vnd.rn-realmedia"; public static final String MIME_APPLICATION_X_BCPIO = "application/x-bcpio"; public static final String MIME_APPLICATION_X_CDLINK = "application/x-cdlink"; public static final String MIME_APPLICATION_X_CHESS_PGN = "application/x-chess-pgn"; public static final String MIME_APPLICATION_X_CPIO = "application/x-cpio"; public static final String MIME_APPLICATION_X_CSH = "application/x-csh"; public static final String MIME_APPLICATION_X_DIRECTOR = "application/x-director"; public static final String MIME_APPLICATION_X_DVI = "application/x-dvi"; public static final String MIME_APPLICATION_X_FUTURESPLASH = "application/x-futuresplash"; public static final String MIME_APPLICATION_X_GTAR = "application/x-gtar"; public static final String MIME_APPLICATION_X_HDF = "application/x-hdf"; public static final String MIME_APPLICATION_X_JAVASCRIPT = "application/x-javascript"; public static final String MIME_APPLICATION_X_KOAN = "application/x-koan"; public static final String MIME_APPLICATION_X_LATEX = "application/x-latex"; public static final String MIME_APPLICATION_X_NETCDF = "application/x-netcdf"; public static final String MIME_APPLICATION_X_OGG = "application/x-ogg"; public static final String MIME_APPLICATION_X_SH = "application/x-sh"; public static final String MIME_APPLICATION_X_SHAR = "application/x-shar"; public static final String MIME_APPLICATION_X_SHOCKWAVE_FLASH = "application/x-shockwave-flash"; public static final String MIME_APPLICATION_X_STUFFIT = "application/x-stuffit"; public static final String MIME_APPLICATION_X_SV4CPIO = "application/x-sv4cpio"; public static final String MIME_APPLICATION_X_SV4CRC = "application/x-sv4crc"; public static final String MIME_APPLICATION_X_TAR = "application/x-tar"; public static final String MIME_APPLICATION_X_RAR_COMPRESSED = "application/x-rar-compressed"; public static final String MIME_APPLICATION_X_TCL = "application/x-tcl"; public static final String MIME_APPLICATION_X_TEX = "application/x-tex"; public static final String MIME_APPLICATION_X_TEXINFO = "application/x-texinfo"; public static final String MIME_APPLICATION_X_TROFF = "application/x-troff"; public static final String MIME_APPLICATION_X_TROFF_MAN = "application/x-troff-man"; public static final String MIME_APPLICATION_X_TROFF_ME = "application/x-troff-me"; public static final String MIME_APPLICATION_X_TROFF_MS = "application/x-troff-ms"; public static final String MIME_APPLICATION_X_USTAR = "application/x-ustar"; public static final String MIME_APPLICATION_X_WAIS_SOURCE = "application/x-wais-source"; public static final String MIME_APPLICATION_VND_MOZZILLA_XUL_XML = "application/vnd.mozilla.xul+xml"; public static final String MIME_APPLICATION_XHTML_XML = "application/xhtml+xml"; public static final String MIME_APPLICATION_XSLT_XML = "application/xslt+xml"; public static final String MIME_APPLICATION_XML = "application/xml"; public static final String MIME_APPLICATION_XML_DTD = "application/xml-dtd"; public static final String MIME_IMAGE_BMP = "image/bmp"; public static final String MIME_IMAGE_CGM = "image/cgm"; public static final String MIME_IMAGE_GIF = "image/gif"; public static final String MIME_IMAGE_IEF = "image/ief"; public static final String MIME_IMAGE_JPEG = "image/jpeg"; public static final String MIME_IMAGE_TIFF = "image/tiff"; public static final String MIME_IMAGE_PNG = "image/png"; public static final String MIME_IMAGE_SVG_XML = "image/svg+xml"; public static final String MIME_IMAGE_VND_DJVU = "image/vnd.djvu"; public static final String MIME_IMAGE_WAP_WBMP = "image/vnd.wap.wbmp"; public static final String MIME_IMAGE_X_CMU_RASTER = "image/x-cmu-raster"; public static final String MIME_IMAGE_X_ICON = "image/x-icon"; public static final String MIME_IMAGE_X_PORTABLE_ANYMAP = "image/x-portable-anymap"; public static final String MIME_IMAGE_X_PORTABLE_BITMAP = "image/x-portable-bitmap"; public static final String MIME_IMAGE_X_PORTABLE_GRAYMAP = "image/x-portable-graymap"; public static final String MIME_IMAGE_X_PORTABLE_PIXMAP = "image/x-portable-pixmap"; public static final String MIME_IMAGE_X_RGB = "image/x-rgb"; public static final String MIME_AUDIO_BASIC = "audio/basic"; public static final String MIME_AUDIO_MIDI = "audio/midi"; public static final String MIME_AUDIO_MPEG = "audio/mpeg"; public static final String MIME_AUDIO_X_AIFF = "audio/x-aiff"; public static final String MIME_AUDIO_X_MPEGURL = "audio/x-mpegurl"; public static final String MIME_AUDIO_X_PN_REALAUDIO = "audio/x-pn-realaudio"; public static final String MIME_AUDIO_X_WAV = "audio/x-wav"; public static final String MIME_AUDIO_MP4 = "audio/mp4"; public static final String MIME_CHEMICAL_X_PDB = "chemical/x-pdb"; public static final String MIME_CHEMICAL_X_XYZ = "chemical/x-xyz"; public static final String MIME_MODEL_IGES = "model/iges"; public static final String MIME_MODEL_MESH = "model/mesh"; public static final String MIME_MODEL_VRLM = "model/vrml"; public static final String MIME_TEXT_PLAIN = "text/plain"; public static final String MIME_TEXT_RICHTEXT = "text/richtext"; public static final String MIME_TEXT_RTF = "text/rtf"; public static final String MIME_TEXT_HTML = "text/html"; public static final String MIME_TEXT_CALENDAR = "text/calendar"; public static final String MIME_TEXT_CSS = "text/css"; public static final String MIME_TEXT_SGML = "text/sgml"; public static final String MIME_TEXT_TAB_SEPARATED_VALUES = "text/tab-separated-values"; public static final String MIME_TEXT_VND_WAP_XML = "text/vnd.wap.wml"; public static final String MIME_TEXT_VND_WAP_WMLSCRIPT = "text/vnd.wap.wmlscript"; public static final String MIME_TEXT_X_SETEXT = "text/x-setext"; public static final String MIME_TEXT_X_COMPONENT = "text/x-component"; public static final String MIME_VIDEO_QUICKTIME = "video/quicktime"; public static final String MIME_VIDEO_MPEG = "video/mpeg"; public static final String MIME_VIDEO_VND_MPEGURL = "video/vnd.mpegurl"; public static final String MIME_VIDEO_X_MSVIDEO = "video/x-msvideo"; public static final String MIME_VIDEO_X_MS_WMV = "video/x-ms-wmv"; public static final String MIME_VIDEO_X_SGI_MOVIE = "video/x-sgi-movie"; public static final String MIME_VIDEO_MP4 = "video/mp4"; public static final String MIME_X_CONFERENCE_X_COOLTALK = "x-conference/x-cooltalk"; private static HashMap<String, String> mimeTypeMapping; private static HashMap<String, String> extMapping; static { mimeTypeMapping = new HashMap<String, String>(200) { private void put1(String key, String value) { if (put(key, value) != null) { throw new IllegalArgumentException("Duplicated extension: " + key); } } { put1("xul", MIME_APPLICATION_VND_MOZZILLA_XUL_XML); put1("json", MIME_APPLICATION_JSON); put1("ice", MIME_X_CONFERENCE_X_COOLTALK); put1("movie", MIME_VIDEO_X_SGI_MOVIE); put1("avi", MIME_VIDEO_X_MSVIDEO); put1("wmv", MIME_VIDEO_X_MS_WMV); put1("m4u", MIME_VIDEO_VND_MPEGURL); put1("mxu", MIME_VIDEO_VND_MPEGURL); put1("mp4", MIME_VIDEO_MP4); put1("htc", MIME_TEXT_X_COMPONENT); put1("etx", MIME_TEXT_X_SETEXT); put1("wmls", MIME_TEXT_VND_WAP_WMLSCRIPT); put1("wml", MIME_TEXT_VND_WAP_XML); put1("tsv", MIME_TEXT_TAB_SEPARATED_VALUES); put1("sgm", MIME_TEXT_SGML); put1("sgml", MIME_TEXT_SGML); put1("css", MIME_TEXT_CSS); put1("ifb", MIME_TEXT_CALENDAR); put1("ics", MIME_TEXT_CALENDAR); put1("wrl", MIME_MODEL_VRLM); put1("vrlm", MIME_MODEL_VRLM); put1("silo", MIME_MODEL_MESH); put1("mesh", MIME_MODEL_MESH); put1("msh", MIME_MODEL_MESH); put1("iges", MIME_MODEL_IGES); put1("igs", MIME_MODEL_IGES); put1("rgb", MIME_IMAGE_X_RGB); put1("ppm", MIME_IMAGE_X_PORTABLE_PIXMAP); put1("pgm", MIME_IMAGE_X_PORTABLE_GRAYMAP); put1("pbm", MIME_IMAGE_X_PORTABLE_BITMAP); put1("pnm", MIME_IMAGE_X_PORTABLE_ANYMAP); put1("ico", MIME_IMAGE_X_ICON); put1("ras", MIME_IMAGE_X_CMU_RASTER); put1("wbmp", MIME_IMAGE_WAP_WBMP); put1("djv", MIME_IMAGE_VND_DJVU); put1("djvu", MIME_IMAGE_VND_DJVU); put1("svg", MIME_IMAGE_SVG_XML); put1("ief", MIME_IMAGE_IEF); put1("cgm", MIME_IMAGE_CGM); put1("bmp", MIME_IMAGE_BMP); put1("xyz", MIME_CHEMICAL_X_XYZ); put1("pdb", MIME_CHEMICAL_X_PDB); put1("ra", MIME_AUDIO_X_PN_REALAUDIO); put1("ram", MIME_AUDIO_X_PN_REALAUDIO); put1("m3u", MIME_AUDIO_X_MPEGURL); put1("aifc", MIME_AUDIO_X_AIFF); put1("aif", MIME_AUDIO_X_AIFF); put1("aiff", MIME_AUDIO_X_AIFF); put1("mp3", MIME_AUDIO_MPEG); put1("mp2", MIME_AUDIO_MPEG); put1("mp1", MIME_AUDIO_MPEG); put1("mpga", MIME_AUDIO_MPEG); put1("kar", MIME_AUDIO_MIDI); put1("mid", MIME_AUDIO_MIDI); put1("midi", MIME_AUDIO_MIDI); put1("m4a", MIME_AUDIO_MP4); put1("dtd", MIME_APPLICATION_XML_DTD); put1("xsl", MIME_APPLICATION_XML); put1("xml", MIME_APPLICATION_XML); put1("xslt", MIME_APPLICATION_XSLT_XML); put1("xht", MIME_APPLICATION_XHTML_XML); put1("xhtml", MIME_APPLICATION_XHTML_XML); put1("src", MIME_APPLICATION_X_WAIS_SOURCE); put1("ustar", MIME_APPLICATION_X_USTAR); put1("ms", MIME_APPLICATION_X_TROFF_MS); put1("me", MIME_APPLICATION_X_TROFF_ME); put1("man", MIME_APPLICATION_X_TROFF_MAN); put1("roff", MIME_APPLICATION_X_TROFF); put1("tr", MIME_APPLICATION_X_TROFF); put1("t", MIME_APPLICATION_X_TROFF); put1("texi", MIME_APPLICATION_X_TEXINFO); put1("texinfo", MIME_APPLICATION_X_TEXINFO); put1("tex", MIME_APPLICATION_X_TEX); put1("tcl", MIME_APPLICATION_X_TCL); put1("sv4crc", MIME_APPLICATION_X_SV4CRC); put1("sv4cpio", MIME_APPLICATION_X_SV4CPIO); put1("sit", MIME_APPLICATION_X_STUFFIT); put1("swf", MIME_APPLICATION_X_SHOCKWAVE_FLASH); put1("shar", MIME_APPLICATION_X_SHAR); put1("sh", MIME_APPLICATION_X_SH); put1("cdf", MIME_APPLICATION_X_NETCDF); put1("nc", MIME_APPLICATION_X_NETCDF); put1("latex", MIME_APPLICATION_X_LATEX); put1("skm", MIME_APPLICATION_X_KOAN); put1("skt", MIME_APPLICATION_X_KOAN); put1("skd", MIME_APPLICATION_X_KOAN); put1("skp", MIME_APPLICATION_X_KOAN); put1("js", MIME_APPLICATION_X_JAVASCRIPT); put1("hdf", MIME_APPLICATION_X_HDF); put1("gtar", MIME_APPLICATION_X_GTAR); put1("spl", MIME_APPLICATION_X_FUTURESPLASH); put1("dvi", MIME_APPLICATION_X_DVI); put1("dxr", MIME_APPLICATION_X_DIRECTOR); put1("dir", MIME_APPLICATION_X_DIRECTOR); put1("dcr", MIME_APPLICATION_X_DIRECTOR); put1("csh", MIME_APPLICATION_X_CSH); put1("cpio", MIME_APPLICATION_X_CPIO); put1("pgn", MIME_APPLICATION_X_CHESS_PGN); put1("vcd", MIME_APPLICATION_X_CDLINK); put1("bcpio", MIME_APPLICATION_X_BCPIO); put1("rm", MIME_APPLICATION_VND_RNREALMEDIA); put1("ppt", MIME_APPLICATION_VND_MSPOWERPOINT); put1("mif", MIME_APPLICATION_VND_MIF); put1("grxml", MIME_APPLICATION_SRGS_XML); put1("gram", MIME_APPLICATION_SRGS); put1("smil", MIME_APPLICATION_RDF_SMIL); put1("smi", MIME_APPLICATION_RDF_SMIL); put1("rdf", MIME_APPLICATION_RDF_XML); put1("ogg", MIME_APPLICATION_X_OGG); put1("oda", MIME_APPLICATION_ODA); put1("dmg", MIME_APPLICATION_OCTET_STREAM); put1("lzh", MIME_APPLICATION_OCTET_STREAM); put1("so", MIME_APPLICATION_OCTET_STREAM); put1("lha", MIME_APPLICATION_OCTET_STREAM); put1("dms", MIME_APPLICATION_OCTET_STREAM); put1("bin", MIME_APPLICATION_OCTET_STREAM); put1("mathml", MIME_APPLICATION_MATHML_XML); put1("cpt", MIME_APPLICATION_MAC_COMPACTPRO); put1("hqx", MIME_APPLICATION_MAC_BINHEX40); put1("jnlp", MIME_APPLICATION_JNLP); put1("ez", MIME_APPLICATION_ANDREW_INSET); put1("txt", MIME_TEXT_PLAIN); put1("ini", MIME_TEXT_PLAIN); put1("c", MIME_TEXT_PLAIN); put1("h", MIME_TEXT_PLAIN); put1("cpp", MIME_TEXT_PLAIN); put1("cxx", MIME_TEXT_PLAIN); put1("cc", MIME_TEXT_PLAIN); put1("chh", MIME_TEXT_PLAIN); put1("java", MIME_TEXT_PLAIN); put1("csv", MIME_TEXT_PLAIN); put1("bat", MIME_TEXT_PLAIN); put1("cmd", MIME_TEXT_PLAIN); put1("asc", MIME_TEXT_PLAIN); put1("rtf", MIME_TEXT_RTF); put1("rtx", MIME_TEXT_RICHTEXT); put1("html", MIME_TEXT_HTML); put1("htm", MIME_TEXT_HTML); put1("zip", MIME_APPLICATION_ZIP); put1("rar", MIME_APPLICATION_X_RAR_COMPRESSED); put1("gzip", MIME_APPLICATION_X_GZIP); put1("gz", MIME_APPLICATION_X_GZIP); put1("tgz", MIME_APPLICATION_TGZ); put1("tar", MIME_APPLICATION_X_TAR); put1("gif", MIME_IMAGE_GIF); put1("jpeg", MIME_IMAGE_JPEG); put1("jpg", MIME_IMAGE_JPEG); put1("jpe", MIME_IMAGE_JPEG); put1("tiff", MIME_IMAGE_TIFF); put1("tif", MIME_IMAGE_TIFF); put1("png", MIME_IMAGE_PNG); put1("au", MIME_AUDIO_BASIC); put1("snd", MIME_AUDIO_BASIC); put1("wav", MIME_AUDIO_X_WAV); put1("mov", MIME_VIDEO_QUICKTIME); put1("qt", MIME_VIDEO_QUICKTIME); put1("mpeg", MIME_VIDEO_MPEG); put1("mpg", MIME_VIDEO_MPEG); put1("mpe", MIME_VIDEO_MPEG); put1("abs", MIME_VIDEO_MPEG); put1("doc", MIME_APPLICATION_MSWORD); put1("docx", MIME_APPLICATION_MSWORD_2007); put1("odt", MIME_APPLICATION_VND_TEXT); put1("xls", MIME_APPLICATION_VND_MSEXCEL); put1("xlsx", MIME_APPLICATION_VND_MSEXCEL_2007); put1("ods", MIME_APPLICATION_VND_SPREADSHEET); put1("eps", MIME_APPLICATION_POSTSCRIPT); put1("ai", MIME_APPLICATION_POSTSCRIPT); put1("ps", MIME_APPLICATION_POSTSCRIPT); put1("pdf", MIME_APPLICATION_PDF); put1("exe", MIME_APPLICATION_OCTET_STREAM); put1("dll", MIME_APPLICATION_OCTET_STREAM); put1("class", MIME_APPLICATION_OCTET_STREAM); put1("jar", MIME_APPLICATION_JAVA_ARCHIVE); } }; } static { extMapping = new HashMap<String, String>(200) { private void put1(String key, String value) { if (put(key, value) != null) { throw new IllegalArgumentException("Duplicated Mimetype: " + key); } } { put1(MIME_APPLICATION_VND_MOZZILLA_XUL_XML, "xul"); put1(MIME_APPLICATION_JSON, "json"); put1(MIME_X_CONFERENCE_X_COOLTALK, "ice"); put1(MIME_VIDEO_X_SGI_MOVIE, "movie"); put1(MIME_VIDEO_X_MSVIDEO, "avi"); put1(MIME_VIDEO_X_MS_WMV, "wmv"); put1(MIME_VIDEO_VND_MPEGURL, "m4u"); put1(MIME_TEXT_X_COMPONENT, "htc"); put1(MIME_TEXT_X_SETEXT, "etx"); put1(MIME_TEXT_VND_WAP_WMLSCRIPT, "wmls"); put1(MIME_TEXT_VND_WAP_XML, "wml"); put1(MIME_TEXT_TAB_SEPARATED_VALUES, "tsv"); put1(MIME_TEXT_SGML, "sgml"); put1(MIME_TEXT_CSS, "css"); put1(MIME_TEXT_CALENDAR, "ics"); put1(MIME_MODEL_VRLM, "vrlm"); put1(MIME_MODEL_MESH, "mesh"); put1(MIME_MODEL_IGES, "iges"); put1(MIME_IMAGE_X_RGB, "rgb"); put1(MIME_IMAGE_X_PORTABLE_PIXMAP, "ppm"); put1(MIME_IMAGE_X_PORTABLE_GRAYMAP, "pgm"); put1(MIME_IMAGE_X_PORTABLE_BITMAP, "pbm"); put1(MIME_IMAGE_X_PORTABLE_ANYMAP, "pnm"); put1(MIME_IMAGE_X_ICON, "ico"); put1(MIME_IMAGE_X_CMU_RASTER, "ras"); put1(MIME_IMAGE_WAP_WBMP, "wbmp"); put1(MIME_IMAGE_VND_DJVU, "djvu"); put1(MIME_IMAGE_SVG_XML, "svg"); put1(MIME_IMAGE_IEF, "ief"); put1(MIME_IMAGE_CGM, "cgm"); put1(MIME_IMAGE_BMP, "bmp"); put1(MIME_CHEMICAL_X_XYZ, "xyz"); put1(MIME_CHEMICAL_X_PDB, "pdb"); put1(MIME_AUDIO_X_PN_REALAUDIO, "ra"); put1(MIME_AUDIO_X_MPEGURL, "m3u"); put1(MIME_AUDIO_X_AIFF, "aiff"); put1(MIME_AUDIO_MPEG, "mp3"); put1(MIME_AUDIO_MIDI, "midi"); put1(MIME_AUDIO_MP4, "m4a"); put1(MIME_APPLICATION_XML_DTD, "dtd"); put1(MIME_APPLICATION_XML, "xml"); put1(MIME_APPLICATION_XSLT_XML, "xslt"); put1(MIME_APPLICATION_XHTML_XML, "xhtml"); put1(MIME_APPLICATION_X_WAIS_SOURCE, "src"); put1(MIME_APPLICATION_X_USTAR, "ustar"); put1(MIME_APPLICATION_X_TROFF_MS, "ms"); put1(MIME_APPLICATION_X_TROFF_ME, "me"); put1(MIME_APPLICATION_X_TROFF_MAN, "man"); put1(MIME_APPLICATION_X_TROFF, "roff"); put1(MIME_APPLICATION_X_TEXINFO, "texi"); put1(MIME_APPLICATION_X_TEX, "tex"); put1(MIME_APPLICATION_X_TCL, "tcl"); put1(MIME_APPLICATION_X_SV4CRC, "sv4crc"); put1(MIME_APPLICATION_X_SV4CPIO, "sv4cpio"); put1(MIME_APPLICATION_X_STUFFIT, "sit"); put1(MIME_APPLICATION_X_SHOCKWAVE_FLASH, "swf"); put1(MIME_APPLICATION_X_SHAR, "shar"); put1(MIME_APPLICATION_X_SH, "sh"); put1(MIME_APPLICATION_X_NETCDF, "cdf"); put1(MIME_APPLICATION_X_LATEX, "latex"); put1(MIME_APPLICATION_X_KOAN, "skm"); put1(MIME_APPLICATION_X_JAVASCRIPT, "js"); put1(MIME_APPLICATION_X_HDF, "hdf"); put1(MIME_APPLICATION_X_GTAR, "gtar"); put1(MIME_APPLICATION_X_FUTURESPLASH, "spl"); put1(MIME_APPLICATION_X_DVI, "dvi"); put1(MIME_APPLICATION_X_DIRECTOR, "dir"); put1(MIME_APPLICATION_X_CSH, "csh"); put1(MIME_APPLICATION_X_CPIO, "cpio"); put1(MIME_APPLICATION_X_CHESS_PGN, "pgn"); put1(MIME_APPLICATION_X_CDLINK, "vcd"); put1(MIME_APPLICATION_X_BCPIO, "bcpio"); put1(MIME_APPLICATION_VND_RNREALMEDIA, "rm"); put1(MIME_APPLICATION_VND_MSPOWERPOINT, "ppt"); put1(MIME_APPLICATION_VND_MIF, "mif"); put1(MIME_APPLICATION_SRGS_XML, "grxml"); put1(MIME_APPLICATION_SRGS, "gram"); put1(MIME_APPLICATION_RDF_SMIL, "smil"); put1(MIME_APPLICATION_RDF_XML, "rdf"); put1(MIME_APPLICATION_X_OGG, "ogg"); put1(MIME_APPLICATION_ODA, "oda"); put1(MIME_APPLICATION_MATHML_XML, "mathml"); put1(MIME_APPLICATION_MAC_COMPACTPRO, "cpt"); put1(MIME_APPLICATION_MAC_BINHEX40, "hqx"); put1(MIME_APPLICATION_JNLP, "jnlp"); put1(MIME_APPLICATION_ANDREW_INSET, "ez"); put1(MIME_TEXT_PLAIN, "txt"); put1(MIME_TEXT_RTF, "rtf"); put1(MIME_TEXT_RICHTEXT, "rtx"); put1(MIME_TEXT_HTML, "html"); put1(MIME_APPLICATION_ZIP, "zip"); put1(MIME_APPLICATION_X_RAR_COMPRESSED, "rar"); put1(MIME_APPLICATION_X_GZIP, "gzip"); put1(MIME_APPLICATION_TGZ, "tgz"); put1(MIME_APPLICATION_X_TAR, "tar"); put1(MIME_IMAGE_GIF, "gif"); put1(MIME_IMAGE_JPEG, "jpg"); put1(MIME_IMAGE_TIFF, "tiff"); put1(MIME_IMAGE_PNG, "png"); put1(MIME_AUDIO_BASIC, "au"); put1(MIME_AUDIO_X_WAV, "wav"); put1(MIME_VIDEO_QUICKTIME, "mov"); put1(MIME_VIDEO_MPEG, "mpg"); put1(MIME_APPLICATION_MSWORD, "doc"); put1(MIME_APPLICATION_MSWORD_2007, "docx"); put1(MIME_APPLICATION_VND_TEXT, "odt"); put1(MIME_APPLICATION_VND_MSEXCEL, "xls"); put1(MIME_APPLICATION_VND_SPREADSHEET, "ods"); put1(MIME_APPLICATION_POSTSCRIPT, "ps"); put1(MIME_APPLICATION_PDF, "pdf"); put1(MIME_APPLICATION_OCTET_STREAM, "exe"); put1(MIME_APPLICATION_JAVA_ARCHIVE, "jar"); put1(MIME_VIDEO_MP4, "mp4"); } }; } /** * Registers MIME type for provided extension. Existing extension type will be overriden. */ public static void registerMimeType(String ext, String mimeType) { mimeTypeMapping.put(ext, mimeType); } /** * Returns the corresponding MIME type to the given extension. * If no MIME type was found it returns 'application/octet-stream' type. */ public static String getMimeType(String ext) { String mimeType = lookupMimeType(ext); if (mimeType == null) { mimeType = MIME_APPLICATION_OCTET_STREAM; } return mimeType; } /** * Simply returns MIME type or <code>null</code> if no type is found. */ public static String lookupMimeType(String ext) { return mimeTypeMapping.get(ext.toLowerCase()); } /** * Simply returns Ext or <code>null</code> if no Mimetype is found. */ public static String lookupExt(String mimeType) { return extMapping.get(mimeType.toLowerCase()); } /** * Returns the default Ext to the given MimeType. * If no MIME type was found it returns 'unknown' ext. */ public static String getDefaultExt(String mimeType) { String ext = lookupExt(mimeType); if (ext == null) { ext = "unknown"; } return ext; } }
chat-sdk/chat-sdk-android
chat-sdk-core/src/main/java/sdk/chat/core/utils/MimeTypesMap.java
214,418
/** * A package for doing Chinese word segmentation. * <p> * This package makes use of the CRFClassifier class (a conditional random * field sequence classifier) to do Chinese word segmentation. * </p> * <p> * On the Stanford NLP machines, usable properties files can be found at: * <code> /u/nlp/data/chinese-segmenter/Sighan2005/prop </code> * </p> * <p> * Usage: For simplified Chinese: * </p> * <blockquote><code> * java -mx200m edu.stanford.nlp.ie.crf.CRFClassifier -sighanCorporaDict $CH_SEG/data -NormalizationTable $CH_SEG/data/norm.simp.utf8 -normTableEncoding UTF-8 -loadClassifier $CH_SEG/data/ctb.gz -testFile $file -inputEncoding $enc * </code></blockquote> * * @author Pi-Chuan Chang * @author Huihsin Tseng * @author Galen Andrew */ package edu.stanford.nlp.wordseg;
stanfordnlp/CoreNLP
src/edu/stanford/nlp/wordseg/package-info.java
214,419
package com.baeldung.accessparamsjs; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.servlet.ModelAndView; import java.util.Map; /** * Sample rest controller for the tutorial article * "Access Spring MVC Model object in JavaScript". * * @author Andrew Shcherbakov * */ @RestController public class Controller { /** * Define two model objects (one integer and one string) and pass them to the view. * * @param model * @return */ @RequestMapping("/index") public ModelAndView thymeleafView(Map<String, Object> model) { model.put("number", 1234); model.put("message", "Hello from Spring MVC"); return new ModelAndView("thymeleaf/index"); } }
eugenp/tutorials
spring-web-modules/spring-mvc-java/src/main/java/com/baeldung/accessparamsjs/Controller.java
214,421
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.NoSuchElementException; import java.util.StringTokenizer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Shell; import com.google.common.annotations.VisibleForTesting; /** * Wrapper for the Unix stat(1) command. Used to workaround the lack of * lstat(2) in Java 6. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public class Stat extends Shell { private final Path original; private final Path qualified; private final Path path; private final long blockSize; private final boolean dereference; private FileStatus stat; public Stat(Path path, long blockSize, boolean deref, FileSystem fs) throws IOException { super(0L, true); // Original path this.original = path; // Qualify the original and strip out URI fragment via toUri().getPath() Path stripped = new Path( original.makeQualified(fs.getUri(), fs.getWorkingDirectory()) .toUri().getPath()); // Re-qualify the bare stripped path and store it this.qualified = stripped.makeQualified(fs.getUri(), fs.getWorkingDirectory()); // Strip back down to a plain path this.path = new Path(qualified.toUri().getPath()); this.blockSize = blockSize; this.dereference = deref; // LANG = C setting Map<String, String> env = new HashMap<String, String>(); env.put("LANG", "C"); setEnvironment(env); } public FileStatus getFileStatus() throws IOException { run(); return stat; } /** * Whether Stat is supported on the current platform * @return */ public static boolean isAvailable() { if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) { return true; } return false; } @VisibleForTesting FileStatus getFileStatusForTesting() { return stat; } @Override protected String[] getExecString() { String derefFlag = "-"; if (dereference) { derefFlag = "-L"; } if (Shell.LINUX) { return new String[] { "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() }; } else if (Shell.FREEBSD || Shell.MAC) { return new String[] { "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'", path.toString() }; } else { throw new UnsupportedOperationException( "stat is not supported on this platform"); } } @Override protected void parseExecResult(BufferedReader lines) throws IOException { // Reset stat stat = null; String line = lines.readLine(); if (line == null) { throw new IOException("Unable to stat path: " + original); } if (line.endsWith("No such file or directory") || line.endsWith("Not a directory")) { throw new FileNotFoundException("File " + original + " does not exist"); } if (line.endsWith("Too many levels of symbolic links")) { throw new IOException("Possible cyclic loop while following symbolic" + " link " + original); } // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target' // OR // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,'link' -> 'target' StringTokenizer tokens = new StringTokenizer(line, ","); try { long length = Long.parseLong(tokens.nextToken()); boolean isDir = tokens.nextToken().equalsIgnoreCase("directory") ? true : false; // Convert from seconds to milliseconds long modTime = Long.parseLong(tokens.nextToken())*1000; long accessTime = Long.parseLong(tokens.nextToken())*1000; String octalPerms = tokens.nextToken(); // FreeBSD has extra digits beyond 4, truncate them if (octalPerms.length() > 4) { int len = octalPerms.length(); octalPerms = octalPerms.substring(len-4, len); } FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8)); String owner = tokens.nextToken(); String group = tokens.nextToken(); String symStr = tokens.nextToken(); // 'notalink' // `link' -> `target' OR 'link' -> 'target' // '' -> '' Path symlink = null; String parts[] = symStr.split(" -> "); try { String target = parts[1]; target = target.substring(1, target.length()-1); if (!target.isEmpty()) { symlink = new Path(target); } } catch (ArrayIndexOutOfBoundsException e) { // null if not a symlink } // Set stat stat = new FileStatus(length, isDir, 1, blockSize, modTime, accessTime, perms, owner, group, symlink, qualified); } catch (NumberFormatException e) { throw new IOException("Unexpected stat output: " + line, e); } catch (NoSuchElementException e) { throw new IOException("Unexpected stat output: " + line, e); } } }
apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
214,423
/** * Copyright © 2016-2024 The Thingsboard Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.thingsboard.server.actors.device; import lombok.Data; import org.thingsboard.server.gen.transport.TransportProtos.SessionType; /** * @author Andrew Shvayka */ @Data public class SessionInfo { private final SessionType type; private final String nodeId; }
thingsboard/thingsboard
application/src/main/java/org/thingsboard/server/actors/device/SessionInfo.java
214,425
/* * Copyright (C) 2012 Andrew Neal * Copyright (C) 2014 The CyanogenMod Project * Licensed under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law * or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package com.naman14.timber.utils; import android.provider.MediaStore; /** * Holds all of the sort orders for each list type. * * @author Andrew Neal ([email protected]) */ public final class SortOrder { /** * This class is never instantiated */ public SortOrder() { } /** * Artist sort order entries. */ public interface ArtistSortOrder { /* Artist sort order A-Z */ String ARTIST_A_Z = MediaStore.Audio.Artists.DEFAULT_SORT_ORDER; /* Artist sort order Z-A */ String ARTIST_Z_A = ARTIST_A_Z + " DESC"; /* Artist sort order number of songs */ String ARTIST_NUMBER_OF_SONGS = MediaStore.Audio.Artists.NUMBER_OF_TRACKS + " DESC"; /* Artist sort order number of albums */ String ARTIST_NUMBER_OF_ALBUMS = MediaStore.Audio.Artists.NUMBER_OF_ALBUMS + " DESC"; } /** * Album sort order entries. */ public interface AlbumSortOrder { /* Album sort order A-Z */ String ALBUM_A_Z = MediaStore.Audio.Albums.DEFAULT_SORT_ORDER; /* Album sort order Z-A */ String ALBUM_Z_A = ALBUM_A_Z + " DESC"; /* Album sort order songs */ String ALBUM_NUMBER_OF_SONGS = MediaStore.Audio.Albums.NUMBER_OF_SONGS + " DESC"; /* Album sort order artist */ String ALBUM_ARTIST = MediaStore.Audio.Albums.ARTIST; /* Album sort order year */ String ALBUM_YEAR = MediaStore.Audio.Albums.FIRST_YEAR + " DESC"; } /** * Song sort order entries. */ public interface SongSortOrder { /* Song sort order A-Z */ String SONG_A_Z = MediaStore.Audio.Media.DEFAULT_SORT_ORDER; /* Song sort order Z-A */ String SONG_Z_A = SONG_A_Z + " DESC"; /* Song sort order artist */ String SONG_ARTIST = MediaStore.Audio.Media.ARTIST; /* Song sort order album */ String SONG_ALBUM = MediaStore.Audio.Media.ALBUM; /* Song sort order year */ String SONG_YEAR = MediaStore.Audio.Media.YEAR + " DESC"; /* Song sort order duration */ String SONG_DURATION = MediaStore.Audio.Media.DURATION + " DESC"; /* Song sort order date */ String SONG_DATE = MediaStore.Audio.Media.DATE_ADDED + " DESC"; /* Song sort order filename */ String SONG_FILENAME = MediaStore.Audio.Media.DATA; } /** * Album song sort order entries. */ public interface AlbumSongSortOrder { /* Album song sort order A-Z */ String SONG_A_Z = MediaStore.Audio.Media.DEFAULT_SORT_ORDER; /* Album song sort order Z-A */ String SONG_Z_A = SONG_A_Z + " DESC"; /* Album song sort order track list */ String SONG_TRACK_LIST = MediaStore.Audio.Media.TRACK + ", " + MediaStore.Audio.Media.DEFAULT_SORT_ORDER; /* Album song sort order duration */ String SONG_DURATION = SongSortOrder.SONG_DURATION; /* Album Song sort order year */ String SONG_YEAR = MediaStore.Audio.Media.YEAR + " DESC"; /* Album song sort order filename */ String SONG_FILENAME = SongSortOrder.SONG_FILENAME; } /** * Artist song sort order entries. */ public interface ArtistSongSortOrder { /* Artist song sort order A-Z */ String SONG_A_Z = MediaStore.Audio.Media.DEFAULT_SORT_ORDER; /* Artist song sort order Z-A */ String SONG_Z_A = SONG_A_Z + " DESC"; /* Artist song sort order album */ String SONG_ALBUM = MediaStore.Audio.Media.ALBUM; /* Artist song sort order year */ String SONG_YEAR = MediaStore.Audio.Media.YEAR + " DESC"; /* Artist song sort order duration */ String SONG_DURATION = MediaStore.Audio.Media.DURATION + " DESC"; /* Artist song sort order date */ String SONG_DATE = MediaStore.Audio.Media.DATE_ADDED + " DESC"; /* Artist song sort order filename */ String SONG_FILENAME = SongSortOrder.SONG_FILENAME; } /** * Artist album sort order entries. */ public interface ArtistAlbumSortOrder { /* Artist album sort order A-Z */ String ALBUM_A_Z = MediaStore.Audio.Albums.DEFAULT_SORT_ORDER; /* Artist album sort order Z-A */ String ALBUM_Z_A = ALBUM_A_Z + " DESC"; /* Artist album sort order songs */ String ALBUM_NUMBER_OF_SONGS = MediaStore.Audio.Artists.Albums.NUMBER_OF_SONGS + " DESC"; /* Artist album sort order year */ String ALBUM_YEAR = MediaStore.Audio.Artists.Albums.FIRST_YEAR + " DESC"; } }
naman14/Timber
app/src/main/java/com/naman14/timber/utils/SortOrder.java
214,427
404: Not Found
Twitter4J/Twitter4J
twitter4j-core/src/http/java/twitter4j/HttpResponseListener.java
214,428
/** * Copyright 2013 Dennis Ippel * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package org.rajawali3d.postprocessing.passes; /** * Code heavily referenced from Three.js post processing framework. */ import android.opengl.GLES20; import org.rajawali3d.postprocessing.APass; import org.rajawali3d.primitives.ScreenQuad; import org.rajawali3d.renderer.Renderer; import org.rajawali3d.renderer.RenderTarget; import org.rajawali3d.scene.Scene; /** * Masked render pass for drawing to stencil buffer. * @author Andrew Jo ([email protected] / www.andrewjo.com) */ public class MaskPass extends APass { protected Scene mScene; protected boolean mInverse; public MaskPass(Scene scene) { mPassType = PassType.MASK; mScene = scene; mEnabled = true; mClear = true; mNeedsSwap = false; mInverse = false; } /** * Returns whether the stencil is inverted. * @return True if inverted, false otherwise. */ public boolean isInverse() { return mInverse; } /** * Sets whether to invert the stencil buffer. * @param inverse True to invert, false otherwise. */ public void setInverse(boolean inverse) { mInverse = inverse; } @Override public void render(Scene scene, Renderer render, ScreenQuad screenQuad, RenderTarget writeBuffer, RenderTarget readBuffer, long elapsedTime, double deltaTime) { // Do not update color or depth. GLES20.glColorMask(false, false, false, false); GLES20.glDepthMask(false); // Set up stencil. int writeValue, clearValue; if (mInverse) { writeValue = 0; clearValue = 1; } else { writeValue = 1; clearValue = 0; } GLES20.glEnable(GLES20.GL_STENCIL_TEST); GLES20.glStencilOp(GLES20.GL_REPLACE, GLES20.GL_REPLACE, GLES20.GL_REPLACE); GLES20.glStencilFunc(GLES20.GL_ALWAYS, writeValue, 0xffffffff); GLES20.glClearStencil(clearValue); // Draw into the stencil buffer. mScene.render(elapsedTime, deltaTime, readBuffer); mScene.render(elapsedTime, deltaTime, writeBuffer); // Re-enable color and depth. GLES20.glColorMask(true, true, true, true); GLES20.glDepthMask(true); // Only render where stencil is set to 1. GLES20.glStencilFunc(GLES20.GL_EQUAL, 1, 0xffffffff); GLES20.glStencilOp(GLES20.GL_KEEP, GLES20.GL_KEEP, GLES20.GL_KEEP); } }
Rajawali/Rajawali
rajawali/src/main/java/org/rajawali3d/postprocessing/passes/MaskPass.java
214,429
/* * Copyright 2019 The Error Prone Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.errorprone.bugpatterns.javadoc; import static com.google.errorprone.BugPattern.LinkType.CUSTOM; import static com.google.errorprone.BugPattern.SeverityLevel.WARNING; import static com.google.errorprone.bugpatterns.javadoc.Utils.diagnosticPosition; import com.google.errorprone.BugPattern; import com.google.errorprone.VisitorState; import com.google.errorprone.bugpatterns.BugChecker; import com.google.errorprone.bugpatterns.BugChecker.ClassTreeMatcher; import com.google.errorprone.bugpatterns.BugChecker.MethodTreeMatcher; import com.google.errorprone.bugpatterns.BugChecker.VariableTreeMatcher; import com.google.errorprone.fixes.SuggestedFix; import com.google.errorprone.matchers.Description; import com.sun.source.doctree.BlockTagTree; import com.sun.source.doctree.DeprecatedTree; import com.sun.source.doctree.DocTree; import com.sun.source.doctree.ParamTree; import com.sun.source.doctree.ReturnTree; import com.sun.source.doctree.ThrowsTree; import com.sun.source.tree.ClassTree; import com.sun.source.tree.MethodTree; import com.sun.source.tree.VariableTree; import com.sun.source.util.DocTreePath; import com.sun.source.util.DocTreePathScanner; import java.util.List; /** * Matches block tags ({@literal @}param, {@literal @}return, {@literal @}throws, * {@literal @}deprecated) with an empty description. * * @author [email protected] (Andrew Ash) */ @BugPattern( summary = "A block tag (@param, @return, @throws, @deprecated) has an empty description. Block tags" + " without descriptions don't add much value for future readers of the code; consider" + " removing the tag entirely or adding a description.", severity = WARNING, linkType = CUSTOM, link = "https://google.github.io/styleguide/javaguide.html#s7.1.3-javadoc-block-tags", documentSuppression = false) public final class EmptyBlockTag extends BugChecker implements ClassTreeMatcher, MethodTreeMatcher, VariableTreeMatcher { @Override public Description matchClass(ClassTree classTree, VisitorState state) { checkForEmptyBlockTags(state); return Description.NO_MATCH; } @Override public Description matchMethod(MethodTree methodTree, VisitorState state) { checkForEmptyBlockTags(state); return Description.NO_MATCH; } @Override public Description matchVariable(VariableTree variableTree, VisitorState state) { checkForEmptyBlockTags(state); return Description.NO_MATCH; } private void checkForEmptyBlockTags(VisitorState state) { DocTreePath path = Utils.getDocTreePath(state); if (path != null) { new EmptyBlockTagChecker(state).scan(path, null); } } private final class EmptyBlockTagChecker extends DocTreePathScanner<Void, Void> { private final VisitorState state; private EmptyBlockTagChecker(VisitorState state) { this.state = state; } @Override public Void visitParam(ParamTree paramTree, Void unused) { reportMatchIfEmpty(paramTree, paramTree.getDescription()); return super.visitParam(paramTree, null); } @Override public Void visitReturn(ReturnTree returnTree, Void unused) { reportMatchIfEmpty(returnTree, returnTree.getDescription()); return super.visitReturn(returnTree, null); } @Override public Void visitThrows(ThrowsTree throwsTree, Void unused) { reportMatchIfEmpty(throwsTree, throwsTree.getDescription()); return super.visitThrows(throwsTree, null); } @Override public Void visitDeprecated(DeprecatedTree deprecatedTree, Void unused) { reportMatchIfEmpty(deprecatedTree, deprecatedTree.getBody()); return super.visitDeprecated(deprecatedTree, null); } private void reportMatchIfEmpty( BlockTagTree blockTagTree, List<? extends DocTree> description) { if (description.isEmpty()) { state.reportMatch( describeMatch( diagnosticPosition(getCurrentPath(), state), // Don't generate a fix for deprecated; this will be annoying in conjunction with // the check which requires a @deprecated tag for @Deprecated elements. blockTagTree.getTagName().equals("deprecated") ? SuggestedFix.emptyFix() : Utils.replace(blockTagTree, "", state))); } } } }
google/error-prone
core/src/main/java/com/google/errorprone/bugpatterns/javadoc/EmptyBlockTag.java
214,432
package org.apereo.cas.authentication.principal.merger; import lombok.val; import java.util.List; import java.util.Map; /** * Merger which implements accumulation of Map entries such that entries once * established are individually immutable. * * @author [email protected] * @since 7.1.0 */ public class NoncollidingAttributeAdder extends BaseAdditiveAttributeMerger { @Override protected Map<String, List<Object>> mergePersonAttributes(final Map<String, List<Object>> toModify, final Map<String, List<Object>> toConsider) { for (val sourceEntry : toConsider.entrySet()) { val sourceKey = sourceEntry.getKey(); if (!toModify.containsKey(sourceKey)) { val sourceValue = sourceEntry.getValue(); toModify.put(sourceKey, sourceValue); } } return toModify; } }
apereo/cas
core/cas-server-core-authentication-api/src/main/java/org/apereo/cas/authentication/principal/merger/NoncollidingAttributeAdder.java
214,434
/* * Copyright 2002-2023 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.web.servlet.tags; import java.io.IOException; import java.nio.charset.UnsupportedCharsetException; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import jakarta.servlet.ServletRequest; import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletResponse; import jakarta.servlet.jsp.JspException; import jakarta.servlet.jsp.PageContext; import org.springframework.lang.Nullable; import org.springframework.util.Assert; import org.springframework.util.StringUtils; import org.springframework.web.servlet.support.RequestDataValueProcessor; import org.springframework.web.util.JavaScriptUtils; import org.springframework.web.util.TagUtils; import org.springframework.web.util.UriUtils; /** * The {@code <url>} tag creates URLs. Modeled after the JSTL {@code c:url} tag with * backwards compatibility in mind. * * <p>Enhancements to the JSTL functionality include: * <ul> * <li>URL encoded template URI variables</li> * <li>HTML/XML escaping of URLs</li> * <li>JavaScript escaping of URLs</li> * </ul> * * <p>Template URI variables are indicated in the {@link #setValue(String) 'value'} * attribute and marked by braces '{variableName}'. The braces and attribute name are * replaced by the URL encoded value of a parameter defined with the spring:param tag * in the body of the url tag. If no parameter is available the literal value is * passed through. Params matched to template variables will not be added to the query * string. * * <p>Use of the spring:param tag for URI template variables is strongly recommended * over direct EL substitution as the values are URL encoded. Failure to properly * encode URL can leave an application vulnerable to XSS and other injection attacks. * * <p>URLs can be HTML/XML escaped by setting the {@link #setHtmlEscape(boolean) * 'htmlEscape'} attribute to 'true'. Detects an HTML escaping setting, either on * this tag instance, the page level, or the {@code web.xml} level. The default * is 'false'. When setting the URL value into a variable, escaping is not recommended. * * <p>Example usage: * <pre class="code">&lt;spring:url value="/url/path/{variableName}"&gt; * &lt;spring:param name="variableName" value="more than JSTL c:url" /&gt; * &lt;/spring:url&gt;</pre> * * <p>The above results in: * {@code /currentApplicationContext/url/path/more%20than%20JSTL%20c%3Aurl} * * <table> * <caption>Attribute Summary</caption> * <thead> * <tr> * <th>Attribute</th> * <th>Required?</th> * <th>Runtime Expression?</th> * <th>Description</th> * </tr> * </thead> * <tbody> * <tr> * <td>value</td> * <td>true</td> * <td>true</td> * <td>The URL to build. This value can include template {placeholders} that are * replaced with the URL encoded value of the named parameter. Parameters * must be defined using the param tag inside the body of this tag.</td> * </tr> * <tr> * <td>context</td> * <td>false</td> * <td>true</td> * <td>Specifies a remote application context path. * The default is the current application context path.</td> * </tr> * <tr> * <td>var</td> * <td>false</td> * <td>true</td> * <td>The name of the variable to export the URL value to. * If not specified the URL is written as output.</td> * </tr> * <tr> * <td>scope</td> * <td>false</td> * <td>true</td> * <td>The scope for the var. 'application', 'session', 'request' and 'page' * scopes are supported. Defaults to page scope. This attribute has no * effect unless the var attribute is also defined.</td> * </tr> * <tr> * <td>htmlEscape</td> * <td>false</td> * <td>true</td> * <td>Set HTML escaping for this tag, as a boolean value. Overrides the * default HTML escaping setting for the current page.</td> * </tr> * <tr> * <td>javaScriptEscape</td> * <td>false</td> * <td>true</td> * <td>Set JavaScript escaping for this tag, as a boolean value. * Default is {@code false}.</td> * </tr> * </tbody> * </table> * * @author Scott Andrews * @since 3.0 * @see ParamTag */ @SuppressWarnings("serial") public class UrlTag extends HtmlEscapingAwareTag implements ParamAware { private static final String URL_TEMPLATE_DELIMITER_PREFIX = "{"; private static final String URL_TEMPLATE_DELIMITER_SUFFIX = "}"; private static final String URL_TYPE_ABSOLUTE = "://"; private List<Param> params = Collections.emptyList(); private Set<String> templateParams = Collections.emptySet(); @Nullable private UrlType type; @Nullable private String value; @Nullable private String context; @Nullable private String var; private int scope = PageContext.PAGE_SCOPE; private boolean javaScriptEscape = false; /** * Set the value of the URL. */ public void setValue(String value) { if (value.contains(URL_TYPE_ABSOLUTE)) { this.type = UrlType.ABSOLUTE; this.value = value; } else if (value.startsWith("/")) { this.type = UrlType.CONTEXT_RELATIVE; this.value = value; } else { this.type = UrlType.RELATIVE; this.value = value; } } /** * Set the context path for the URL. * Defaults to the current context. */ public void setContext(String context) { if (context.startsWith("/")) { this.context = context; } else { this.context = "/" + context; } } /** * Set the variable name to expose the URL under. Defaults to rendering the * URL to the current JspWriter */ public void setVar(String var) { this.var = var; } /** * Set the scope to export the URL variable to. This attribute has no * meaning unless var is also defined. */ public void setScope(String scope) { this.scope = TagUtils.getScope(scope); } /** * Set JavaScript escaping for this tag, as boolean value. * Default is "false". */ public void setJavaScriptEscape(boolean javaScriptEscape) throws JspException { this.javaScriptEscape = javaScriptEscape; } @Override public void addParam(Param param) { this.params.add(param); } @Override public int doStartTagInternal() throws JspException { this.params = new ArrayList<>(); this.templateParams = new HashSet<>(); return EVAL_BODY_INCLUDE; } @Override public int doEndTag() throws JspException { String url = createUrl(); RequestDataValueProcessor processor = getRequestContext().getRequestDataValueProcessor(); ServletRequest request = this.pageContext.getRequest(); if ((processor != null) && (request instanceof HttpServletRequest httpServletRequest)) { url = processor.processUrl(httpServletRequest, url); } if (this.var == null) { // print the url to the writer try { this.pageContext.getOut().print(url); } catch (IOException ex) { throw new JspException(ex); } } else { // store the url as a variable this.pageContext.setAttribute(this.var, url, this.scope); } return EVAL_PAGE; } /** * Build the URL for the tag from the tag attributes and parameters. * @return the URL value as a String */ String createUrl() throws JspException { Assert.state(this.value != null, "No value set"); HttpServletRequest request = (HttpServletRequest) this.pageContext.getRequest(); HttpServletResponse response = (HttpServletResponse) this.pageContext.getResponse(); StringBuilder url = new StringBuilder(); if (this.type == UrlType.CONTEXT_RELATIVE) { // add application context to url if (this.context == null) { url.append(request.getContextPath()); } else { if (this.context.endsWith("/")) { url.append(this.context, 0, this.context.length() - 1); } else { url.append(this.context); } } } if (this.type != UrlType.RELATIVE && this.type != UrlType.ABSOLUTE && !this.value.startsWith("/")) { url.append('/'); } url.append(replaceUriTemplateParams(this.value, this.params, this.templateParams)); url.append(createQueryString(this.params, this.templateParams, (url.indexOf("?") == -1))); String urlStr = url.toString(); if (this.type != UrlType.ABSOLUTE) { // Add the session identifier if needed // (Do not embed the session identifier in a remote link!) urlStr = response.encodeURL(urlStr); } // HTML and/or JavaScript escape, if demanded. urlStr = htmlEscape(urlStr); urlStr = (this.javaScriptEscape ? JavaScriptUtils.javaScriptEscape(urlStr) : urlStr); return urlStr; } /** * Build the query string from available parameters that have not already * been applied as template params. * <p>The names and values of parameters are URL encoded. * @param params the parameters to build the query string from * @param usedParams set of parameter names that have been applied as * template params * @param includeQueryStringDelimiter true if the query string should start * with a '?' instead of '&amp;' * @return the query string */ protected String createQueryString(List<Param> params, Set<String> usedParams, boolean includeQueryStringDelimiter) throws JspException { String encoding = this.pageContext.getResponse().getCharacterEncoding(); StringBuilder qs = new StringBuilder(); for (Param param : params) { if (!usedParams.contains(param.getName()) && StringUtils.hasLength(param.getName())) { if (includeQueryStringDelimiter && qs.length() == 0) { qs.append('?'); } else { qs.append('&'); } try { qs.append(UriUtils.encodeQueryParam(param.getName(), encoding)); if (param.getValue() != null) { qs.append('='); qs.append(UriUtils.encodeQueryParam(param.getValue(), encoding)); } } catch (UnsupportedCharsetException ex) { throw new JspException(ex); } } } return qs.toString(); } /** * Replace template markers in the URL matching available parameters. The * name of matched parameters are added to the used parameters set. * <p>Parameter values are URL encoded. * @param uri the URL with template parameters to replace * @param params parameters used to replace template markers * @param usedParams set of template parameter names that have been replaced * @return the URL with template parameters replaced */ protected String replaceUriTemplateParams(String uri, List<Param> params, Set<String> usedParams) throws JspException { String encoding = this.pageContext.getResponse().getCharacterEncoding(); for (Param param : params) { String template = URL_TEMPLATE_DELIMITER_PREFIX + param.getName() + URL_TEMPLATE_DELIMITER_SUFFIX; if (uri.contains(template)) { usedParams.add(param.getName()); String value = param.getValue(); try { uri = StringUtils.replace(uri, template, (value != null ? UriUtils.encodePath(value, encoding) : "")); } catch (UnsupportedCharsetException ex) { throw new JspException(ex); } } else { template = URL_TEMPLATE_DELIMITER_PREFIX + '/' + param.getName() + URL_TEMPLATE_DELIMITER_SUFFIX; if (uri.contains(template)) { usedParams.add(param.getName()); String value = param.getValue(); try { uri = StringUtils.replace(uri, template, (value != null ? UriUtils.encodePathSegment(value, encoding) : "")); } catch (UnsupportedCharsetException ex) { throw new JspException(ex); } } } } return uri; } /** * Internal enum that classifies URLs by type. */ private enum UrlType { CONTEXT_RELATIVE, RELATIVE, ABSOLUTE } }
spring-projects/spring-framework
spring-webmvc/src/main/java/org/springframework/web/servlet/tags/UrlTag.java
214,436
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams.processor.internals; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.internals.KafkaFutureImpl; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.errors.UnknownTopologyException; import org.apache.kafka.streams.internals.StreamsConfigUtils; import org.apache.kafka.streams.internals.StreamsConfigUtils.ProcessingMode; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.TaskId; import org.apache.kafka.streams.processor.internals.InternalTopologyBuilder.TopicsInfo; import org.apache.kafka.streams.TopologyConfig.TaskConfig; import org.apache.kafka.streams.processor.internals.namedtopology.NamedTopology; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static java.util.Collections.emptySet; public class TopologyMetadata { private Logger log; // the "__" (double underscore) string is not allowed for topology names, so it's safe to use to indicate // that it's not a named topology public static final String UNNAMED_TOPOLOGY = "__UNNAMED_TOPOLOGY__"; private static final Pattern EMPTY_ZERO_LENGTH_PATTERN = Pattern.compile(""); private final StreamsConfig config; private final ProcessingMode processingMode; private final TopologyVersion version; private final TaskExecutionMetadata taskExecutionMetadata; private final Set<String> pausedTopologies; private final ConcurrentNavigableMap<String, InternalTopologyBuilder> builders; // Keep sorted by topology name for readability private ProcessorTopology globalTopology; private final Map<String, StateStore> globalStateStores = new HashMap<>(); private final Set<String> allInputTopics = new HashSet<>(); private final Map<String, Long> threadVersions = new ConcurrentHashMap<>(); public static class TopologyVersion { public AtomicLong topologyVersion = new AtomicLong(0L); // the local topology version public ReentrantLock topologyLock = new ReentrantLock(); public Condition topologyCV = topologyLock.newCondition(); public List<TopologyVersionListener> activeTopologyUpdateListeners = new LinkedList<>(); } public static class TopologyVersionListener { final long topologyVersion; // the (minimum) version to wait for these threads to cross final KafkaFutureImpl<Void> future; // the future waiting on all threads to be updated public TopologyVersionListener(final long topologyVersion, final KafkaFutureImpl<Void> future) { this.topologyVersion = topologyVersion; this.future = future; } } public TopologyMetadata(final InternalTopologyBuilder builder, final StreamsConfig config) { this.version = new TopologyVersion(); this.processingMode = StreamsConfigUtils.processingMode(config); this.config = config; this.log = LoggerFactory.getLogger(getClass()); this.pausedTopologies = ConcurrentHashMap.newKeySet(); builders = new ConcurrentSkipListMap<>(); if (builder.hasNamedTopology()) { builders.put(builder.topologyName(), builder); } else { builders.put(UNNAMED_TOPOLOGY, builder); } this.taskExecutionMetadata = new TaskExecutionMetadata(builders.keySet(), pausedTopologies, processingMode); } public TopologyMetadata(final ConcurrentNavigableMap<String, InternalTopologyBuilder> builders, final StreamsConfig config) { this.version = new TopologyVersion(); this.processingMode = StreamsConfigUtils.processingMode(config); this.config = config; this.log = LoggerFactory.getLogger(getClass()); this.pausedTopologies = ConcurrentHashMap.newKeySet(); this.builders = builders; if (builders.isEmpty()) { log.info("Created an empty KafkaStreams app with no topology"); } this.taskExecutionMetadata = new TaskExecutionMetadata(builders.keySet(), pausedTopologies, processingMode); } // Need to (re)set the log here to pick up the `processId` part of the clientId in the prefix public void setLog(final LogContext logContext) { log = logContext.logger(getClass()); } public ProcessingMode processingMode() { return processingMode; } public long topologyVersion() { return version.topologyVersion.get(); } private void lock() { version.topologyLock.lock(); } private void unlock() { version.topologyLock.unlock(); } public Collection<String> sourceTopicsForTopology(final String name) { return builders.get(name).fullSourceTopicNames(); } public boolean needsUpdate(final String threadName) { return threadVersions.get(threadName) < topologyVersion(); } public void registerThread(final String threadName) { threadVersions.put(threadName, 0L); } public void unregisterThread(final String threadName) { threadVersions.remove(threadName); maybeNotifyTopologyVersionListeners(); } public TaskExecutionMetadata taskExecutionMetadata() { return taskExecutionMetadata; } public void executeTopologyUpdatesAndBumpThreadVersion(final Consumer<Set<String>> handleTopologyAdditions, final Consumer<Set<String>> handleTopologyRemovals) { try { version.topologyLock.lock(); final long latestTopologyVersion = topologyVersion(); handleTopologyAdditions.accept(namedTopologiesView()); handleTopologyRemovals.accept(namedTopologiesView()); threadVersions.put(Thread.currentThread().getName(), latestTopologyVersion); } finally { version.topologyLock.unlock(); } } public void maybeNotifyTopologyVersionListeners() { try { lock(); final long minThreadVersion = getMinimumThreadVersion(); final Iterator<TopologyVersionListener> iterator = version.activeTopologyUpdateListeners.listIterator(); TopologyVersionListener topologyVersionListener; while (iterator.hasNext()) { topologyVersionListener = iterator.next(); final long topologyVersionWaitersVersion = topologyVersionListener.topologyVersion; if (minThreadVersion >= topologyVersionWaitersVersion) { topologyVersionListener.future.complete(null); iterator.remove(); log.info("All threads are now on topology version {}", topologyVersionListener.topologyVersion); } } } finally { unlock(); } } // Return the minimum version across all live threads, or Long.MAX_VALUE if there are no threads running private long getMinimumThreadVersion() { final Optional<Long> minVersion = threadVersions.values().stream().min(Long::compare); return minVersion.orElse(Long.MAX_VALUE); } public void wakeupThreads() { try { lock(); version.topologyCV.signalAll(); } finally { unlock(); } } public void maybeWaitForNonEmptyTopology(final Supplier<StreamThread.State> threadState) { if (isEmpty() && threadState.get().isAlive()) { try { lock(); while (isEmpty() && threadState.get().isAlive()) { try { log.debug("Detected that the topology is currently empty, waiting for something to process"); version.topologyCV.await(); } catch (final InterruptedException e) { log.error("StreamThread was interrupted while waiting on empty topology", e); } } } finally { unlock(); } } } /** * Adds the topology and registers a future that listens for all threads on the older version to see the update */ public void registerAndBuildNewTopology(final KafkaFutureImpl<Void> future, final InternalTopologyBuilder newTopologyBuilder) { try { lock(); buildAndVerifyTopology(newTopologyBuilder); log.info("New NamedTopology {} passed validation and will be added, old topology version is {}", newTopologyBuilder.topologyName(), version.topologyVersion.get()); version.topologyVersion.incrementAndGet(); version.activeTopologyUpdateListeners.add(new TopologyVersionListener(topologyVersion(), future)); builders.put(newTopologyBuilder.topologyName(), newTopologyBuilder); wakeupThreads(); log.info("Added NamedTopology {} and updated topology version to {}", newTopologyBuilder.topologyName(), version.topologyVersion.get()); } catch (final Throwable throwable) { log.error("Failed to add NamedTopology {}, please retry the operation.", newTopologyBuilder.topologyName()); future.completeExceptionally(throwable); } finally { unlock(); } } /** * Pauses a topology by name * @param topologyName Name of the topology to pause */ public void pauseTopology(final String topologyName) { pausedTopologies.add(topologyName); } /** * Checks if a given topology is paused. * @param topologyName If null, assume that we are checking the `UNNAMED_TOPOLOGY`. * @return A boolean indicating if the topology is paused. */ public boolean isPaused(final String topologyName) { if (topologyName == null) { return pausedTopologies.contains(UNNAMED_TOPOLOGY); } else { return pausedTopologies.contains(topologyName); } } /** * Resumes a topology by name * @param topologyName Name of the topology to resume */ public void resumeTopology(final String topologyName) { pausedTopologies.remove(topologyName); } /** * Removes the topology and registers a future that listens for all threads on the older version to see the update */ public KafkaFuture<Void> unregisterTopology(final KafkaFutureImpl<Void> removeTopologyFuture, final String topologyName) { try { lock(); log.info("Beginning removal of NamedTopology {}, old topology version is {}", topologyName, version.topologyVersion.get()); version.topologyVersion.incrementAndGet(); version.activeTopologyUpdateListeners.add(new TopologyVersionListener(topologyVersion(), removeTopologyFuture)); final InternalTopologyBuilder removedBuilder = builders.remove(topologyName); removedBuilder.fullSourceTopicNames().forEach(allInputTopics::remove); removedBuilder.allSourcePatternStrings().forEach(allInputTopics::remove); log.info("Finished removing NamedTopology {}, topology version was updated to {}", topologyName, version.topologyVersion.get()); } catch (final Throwable throwable) { log.error("Failed to remove NamedTopology {}, please retry.", topologyName); removeTopologyFuture.completeExceptionally(throwable); } finally { unlock(); } return removeTopologyFuture; } public TaskConfig getTaskConfigFor(final TaskId taskId) { final InternalTopologyBuilder builder = lookupBuilderForTask(taskId); return builder.topologyConfigs().getTaskConfig(); } public void buildAndRewriteTopology() { applyToEachBuilder(this::buildAndVerifyTopology); } private void buildAndVerifyTopology(final InternalTopologyBuilder builder) { builder.rewriteTopology(config); builder.buildTopology(); final Set<String> allInputTopicsCopy = new HashSet<>(allInputTopics); // As we go, check each topology for overlap in the set of input topics/patterns final int numInputTopics = allInputTopicsCopy.size(); final List<String> inputTopics = builder.fullSourceTopicNames(); final Collection<String> inputPatterns = builder.allSourcePatternStrings(); final Set<String> newInputTopics = new HashSet<>(inputTopics); newInputTopics.addAll(inputPatterns); final int numNewInputTopics = newInputTopics.size(); allInputTopicsCopy.addAll(newInputTopics); if (allInputTopicsCopy.size() != numInputTopics + numNewInputTopics) { inputTopics.retainAll(allInputTopicsCopy); inputPatterns.retainAll(allInputTopicsCopy); log.error("Tried to add the NamedTopology {} but it had overlap with other input topics {} or patterns {}", builder.topologyName(), inputTopics, inputPatterns); throw new TopologyException("Named Topologies may not subscribe to the same input topics or patterns"); } final ProcessorTopology globalTopology = builder.buildGlobalStateTopology(); if (globalTopology != null) { if (builder.topologyName() != null) { throw new IllegalStateException("Global state stores are not supported with Named Topologies"); } else if (this.globalTopology != null) { throw new TopologyException("Topology builder had global state, but global topology has already been set"); } else { this.globalTopology = globalTopology; globalStateStores.putAll(builder.globalStateStores()); } } allInputTopics.addAll(newInputTopics); } public int getNumStreamThreads(final StreamsConfig config) { final int configuredNumStreamThreads = config.getInt(StreamsConfig.NUM_STREAM_THREADS_CONFIG); // If there are named topologies but some are empty, this indicates a bug in user code if (hasNamedTopologies()) { if (hasNoLocalTopology()) { log.error("Detected a named topology with no input topics, a named topology may not be empty."); throw new TopologyException("Topology has no stream threads and no global threads, " + "must subscribe to at least one source topic or pattern."); } } else { // If both the global and non-global topologies are empty, this indicates a bug in user code if (hasNoLocalTopology() && !hasGlobalTopology()) { log.error("Topology with no input topics will create no stream threads and no global thread."); throw new TopologyException("Topology has no stream threads and no global threads, " + "must subscribe to at least one source topic or global table."); } } // Lastly we check for an empty non-global topology and override the threads to zero if set otherwise if (configuredNumStreamThreads != 0 && hasNoLocalTopology()) { log.info("Overriding number of StreamThreads to zero for global-only topology"); return 0; } return configuredNumStreamThreads; } /** * @return true iff the app is using named topologies, or was started up with no topology at all */ public boolean hasNamedTopologies() { return !builders.containsKey(UNNAMED_TOPOLOGY); } public Set<String> namedTopologiesView() { return hasNamedTopologies() ? Collections.unmodifiableSet(builders.keySet()) : emptySet(); } /** * @return true iff any of the topologies have a global topology */ public boolean hasGlobalTopology() { return evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::hasGlobalStores); } /** * @return true iff any of the topologies have no local (aka non-global) topology */ public boolean hasNoLocalTopology() { return evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::hasNoLocalTopology); } public boolean hasPersistentStores() { // If the app is using named topologies, there may not be any persistent state when it first starts up // but a new NamedTopology may introduce it later, so we must return true if (hasNamedTopologies()) { return true; } return evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::hasPersistentStores); } public boolean hasStore(final String name) { return evaluateConditionIsTrueForAnyBuilders(b -> b.hasStore(name)); } public boolean hasOffsetResetOverrides() { // Return true if using named topologies, as there may be named topologies added later which do have overrides return hasNamedTopologies() || evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::hasOffsetResetOverrides); } public OffsetResetStrategy offsetResetStrategy(final String topic) { for (final InternalTopologyBuilder builder : builders.values()) { if (builder.containsTopic(topic)) { return builder.offsetResetStrategy(topic); } } log.warn("Unable to look up offset reset strategy for topic {} " + "as this topic does not appear in the sources of any of the current topologies: {}\n " + "This may be due to natural race condition when removing a topology but it should not " + "persist or appear frequently.", topic, namedTopologiesView() ); return null; } public Collection<String> fullSourceTopicNamesForTopology(final String topologyName) { Objects.requireNonNull(topologyName, "topology name must not be null"); return lookupBuilderForNamedTopology(topologyName).fullSourceTopicNames(); } public Collection<String> allFullSourceTopicNames() { final List<String> sourceTopics = new ArrayList<>(); applyToEachBuilder(b -> sourceTopics.addAll(b.fullSourceTopicNames())); return sourceTopics; } Pattern sourceTopicPattern() { final StringBuilder patternBuilder = new StringBuilder(); applyToEachBuilder(b -> { final String patternString = b.sourceTopicPatternString(); if (patternString.length() > 0) { patternBuilder.append(patternString).append("|"); } }); if (patternBuilder.length() > 0) { patternBuilder.setLength(patternBuilder.length() - 1); return Pattern.compile(patternBuilder.toString()); } else { return EMPTY_ZERO_LENGTH_PATTERN; } } public boolean usesPatternSubscription() { return evaluateConditionIsTrueForAnyBuilders(InternalTopologyBuilder::usesPatternSubscription); } // Can be empty if app is started up with no Named Topologies, in order to add them on later public boolean isEmpty() { return builders.isEmpty(); } public String topologyDescriptionString() { if (isEmpty()) { return ""; } final StringBuilder sb = new StringBuilder(); applyToEachBuilder(b -> sb.append(b.describe().toString())); return sb.toString(); } /** * @return the {@link ProcessorTopology subtopology} built for this task, guaranteed to be non-null * * @throws UnknownTopologyException if the task is from a named topology that this client isn't aware of */ public ProcessorTopology buildSubtopology(final TaskId task) { final InternalTopologyBuilder builder = lookupBuilderForTask(task); return builder.buildSubtopology(task.subtopology()); } public ProcessorTopology globalTaskTopology() { if (hasNamedTopologies()) { throw new IllegalStateException("Global state stores are not supported with Named Topologies"); } return globalTopology; } public Map<String, StateStore> globalStateStores() { return globalStateStores; } public Map<String, List<String>> stateStoreNameToSourceTopicsForTopology(final String topologyName) { return lookupBuilderForNamedTopology(topologyName).stateStoreNameToFullSourceTopicNames(); } public Map<String, List<String>> stateStoreNameToSourceTopics() { final Map<String, List<String>> stateStoreNameToSourceTopics = new HashMap<>(); applyToEachBuilder(b -> stateStoreNameToSourceTopics.putAll(b.stateStoreNameToFullSourceTopicNames())); return stateStoreNameToSourceTopics; } public String getStoreForChangelogTopic(final String topicName) { for (final InternalTopologyBuilder builder : builders.values()) { final String store = builder.getStoreForChangelogTopic(topicName); if (store != null) { return store; } } log.warn("Unable to locate any store for topic {}", topicName); return ""; } /** * @param storeName the name of the state store * @param topologyName the name of the topology to search for stores within * @return topics subscribed from source processors that are connected to these state stores */ public Collection<String> sourceTopicsForStore(final String storeName, final String topologyName) { return lookupBuilderForNamedTopology(topologyName).sourceTopicsForStore(storeName); } public static String getTopologyNameOrElseUnnamed(final String topologyName) { return topologyName == null ? UNNAMED_TOPOLOGY : topologyName; } /** * @param topologiesToExclude the names of any topologies to exclude from the returned topic groups, * eg because they have missing source topics and can't be processed yet * * @return flattened map of all subtopologies (from all topologies) to topics info */ public Map<Subtopology, TopicsInfo> subtopologyTopicsInfoMapExcluding(final Set<String> topologiesToExclude) { final Map<Subtopology, TopicsInfo> subtopologyTopicsInfo = new HashMap<>(); applyToEachBuilder(b -> { if (!topologiesToExclude.contains(b.topologyName())) { subtopologyTopicsInfo.putAll(b.subtopologyToTopicsInfo()); } }); return subtopologyTopicsInfo; } /** * @return map from topology to its subtopologies and their topics info */ public Map<String, Map<Subtopology, TopicsInfo>> topologyToSubtopologyTopicsInfoMap() { final Map<String, Map<Subtopology, TopicsInfo>> topologyToSubtopologyTopicsInfoMap = new HashMap<>(); applyToEachBuilder(b -> topologyToSubtopologyTopicsInfoMap.put(b.topologyName(), b.subtopologyToTopicsInfo())); return topologyToSubtopologyTopicsInfoMap; } public Map<String, List<String>> nodeToSourceTopics(final TaskId task) { return lookupBuilderForTask(task).nodeToSourceTopics(); } void addSubscribedTopicsFromMetadata(final Set<String> topics, final String logPrefix) { applyToEachBuilder(b -> b.addSubscribedTopicsFromMetadata(topics, logPrefix)); } void addSubscribedTopicsFromAssignment(final Set<TopicPartition> partitions, final String logPrefix) { applyToEachBuilder(b -> b.addSubscribedTopicsFromAssignment(partitions, logPrefix)); } public Collection<Set<String>> copartitionGroups() { final List<Set<String>> copartitionGroups = new ArrayList<>(); applyToEachBuilder(b -> copartitionGroups.addAll(b.copartitionGroups())); return copartitionGroups; } /** * @return the {@link InternalTopologyBuilder} for this task's topology, guaranteed to be non-null * * @throws UnknownTopologyException if the task is from a named topology that this client isn't aware of */ private InternalTopologyBuilder lookupBuilderForTask(final TaskId task) { final InternalTopologyBuilder builder = task.topologyName() == null ? builders.get(UNNAMED_TOPOLOGY) : builders.get(task.topologyName()); if (builder == null) { throw new UnknownTopologyException("Unable to locate topology builder", task.topologyName()); } else { return builder; } } public Collection<NamedTopology> getAllNamedTopologies() { return builders.values() .stream() .map(InternalTopologyBuilder::namedTopology) .collect(Collectors.toSet()); } /** * @return the InternalTopologyBuilder for the NamedTopology with the given {@code topologyName} * or the builder for a regular Topology if {@code topologyName} is {@code null}, * else returns {@code null} if {@code topologyName} is non-null but no such NamedTopology exists */ public InternalTopologyBuilder lookupBuilderForNamedTopology(final String topologyName) { if (topologyName == null) { return builders.get(UNNAMED_TOPOLOGY); } else { return builders.get(topologyName); } } private boolean evaluateConditionIsTrueForAnyBuilders(final Function<InternalTopologyBuilder, Boolean> condition) { for (final InternalTopologyBuilder builder : builders.values()) { if (condition.apply(builder)) { return true; } } return false; } private void applyToEachBuilder(final Consumer<InternalTopologyBuilder> function) { for (final InternalTopologyBuilder builder : builders.values()) { function.accept(builder); } } public static class Subtopology implements Comparable<Subtopology> { final int nodeGroupId; final String namedTopology; public Subtopology(final int nodeGroupId, final String namedTopology) { this.nodeGroupId = nodeGroupId; this.namedTopology = namedTopology; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final Subtopology that = (Subtopology) o; return nodeGroupId == that.nodeGroupId && Objects.equals(namedTopology, that.namedTopology); } @Override public int hashCode() { return Objects.hash(nodeGroupId, namedTopology); } @Override public int compareTo(final Subtopology other) { if (nodeGroupId != other.nodeGroupId) { return Integer.compare(nodeGroupId, other.nodeGroupId); } if (namedTopology == null) { return other.namedTopology == null ? 0 : -1; } if (other.namedTopology == null) { return 1; } // Both not null return namedTopology.compareTo(other.namedTopology); } } }
apache/kafka
streams/src/main/java/org/apache/kafka/streams/processor/internals/TopologyMetadata.java
214,440
/* * (C) Copyright 2008-2023, by Ilya Razenshteyn and Contributors. * * JGraphT : a free Java graph-theory library * * See the CONTRIBUTORS.md file distributed with this work for additional * information regarding copyright ownership. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0, or the * GNU Lesser General Public License v2.1 or later * which is available at * http://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html. * * SPDX-License-Identifier: EPL-2.0 OR LGPL-2.1-or-later */ package org.jgrapht.alg.flow; import org.jgrapht.*; import org.jgrapht.alg.util.extension.*; import java.util.*; /** * This class computes a maximum flow in a * <a href = "http://en.wikipedia.org/wiki/Flow_network">flow network</a> using * <a href = "http://en.wikipedia.org/wiki/Edmonds-Karp_algorithm">Edmonds-Karp algorithm</a>. Given * is a weighted directed or undirected graph $G(V,E)$ with vertex set $V$ and edge set $E$. Each * edge $e\in E$ has an associated non-negative capacity $u_e$. The maximum flow problem involves * finding a feasible flow from a source vertex $s$ to a sink vertex $t$ which is maximum. The * amount of flow $f_e$ through any edge $e$ cannot exceed capacity $u_e$. Moreover, flow * conservation must hold: the sum of flows entering a node must equal the sum of flows exiting that * node, except for the source and the sink nodes. * <p> * Mathematically, the maximum flow problem is stated as follows: \[ \begin{align} \max~&amp; * \sum_{e\in \delta^+(s)}f_e &amp;\\ \mbox{s.t. }&amp;\sum_{e\in \delta^-(i)} f_e=\sum_{e\in * \delta^+(i)} f_e &amp; \forall i\in V\setminus\{s,t\}\\ &amp;0\leq f_e \leq u_e &amp; \forall * e\in E \end{align} \] Here $\delta^+(i)$ resp $\delta^-(i)$ denote resp the outgoing and incoming * edges of vertex $i$. * <p> * When the input graph is undirected, an edge $(i,j)$ is treated as two directed arcs: $(i,j)$ and * $(j,i)$. In such a case, there is the additional restriction that the flow can only go in one * direction: the flow either goes form $i$ to $j$, or from $j$ to $i$, but there cannot be a * positive flow on $(i,j)$ and $(j,i)$ simultaneously. * <p> * The runtime complexity of this class is $O(nm^2)$, where $n$ is the number of vertices and $m$ * the number of edges in the graph. For a more efficient algorithm, consider using * {@link PushRelabelMFImpl} instead. * * <p> * This class can also compute minimum s-t cuts. Effectively, to compute a minimum s-t cut, the * implementation first computes a minimum s-t flow, after which a BFS is run on the residual graph. * * <p> * For more details see Andrew V. Goldberg's <i>Combinatorial Optimization (Lecture Notes)</i>. * * Note: even though the algorithm accepts any kind of graph, currently only Simple directed and * undirected graphs are supported (and tested!). * * @param <V> the graph vertex type * @param <E> the graph edge type * * @author Ilya Razensteyn */ public final class EdmondsKarpMFImpl<V, E> extends MaximumFlowAlgorithmBase<V, E> { /* current source vertex */ private VertexExtension currentSource; /* current sink vertex */ private VertexExtension currentSink; private final ExtensionFactory<VertexExtension> vertexExtensionsFactory; private final ExtensionFactory<AnnotatedFlowEdge> edgeExtensionsFactory; /** * Constructs {@code MaximumFlow} instance to work with <i>a copy of</i> * {@code network}. Current source and sink are set to {@code null}. If * {@code network} is weighted, then capacities are weights, otherwise all capacities are * equal to one. Doubles are compared using {@link #DEFAULT_EPSILON} tolerance. * * @param network network, where maximum flow will be calculated */ public EdmondsKarpMFImpl(Graph<V, E> network) { this(network, DEFAULT_EPSILON); } /** * Constructs {@code MaximumFlow} instance to work with <i>a copy of</i> * {@code network}. Current source and sink are set to {@code null}. If * {@code network} is weighted, then capacities are weights, otherwise all capacities are * equal to one. * * @param network network, where maximum flow will be calculated * @param epsilon tolerance for comparing doubles */ public EdmondsKarpMFImpl(Graph<V, E> network, double epsilon) { super(network, epsilon); this.vertexExtensionsFactory = () -> new VertexExtension(); this.edgeExtensionsFactory = () -> new AnnotatedFlowEdge(); if (network == null) { throw new NullPointerException("network is null"); } if (epsilon <= 0) { throw new IllegalArgumentException("invalid epsilon (must be positive)"); } for (E e : network.edgeSet()) { if (network.getEdgeWeight(e) < -epsilon) { throw new IllegalArgumentException("invalid capacity (must be non-negative)"); } } } /** * Sets current source to {@code source}, current sink to {@code sink}, then * calculates maximum flow from {@code source} to {@code sink}. Note, that * {@code source} and {@code sink} must be vertices of the {@code network} * passed to the constructor, and they must be different. * * @param source source vertex * @param sink sink vertex * * @return a maximum flow */ public MaximumFlow<E> getMaximumFlow(V source, V sink) { this.calculateMaximumFlow(source, sink); maxFlow = composeFlow(); return new MaximumFlowImpl<>(maxFlowValue, maxFlow); } /** * Sets current source to {@code source}, current sink to {@code sink}, then * calculates maximum flow from {@code source} to {@code sink}. Note, that * {@code source} and {@code sink} must be vertices of the {@code network} * passed to the constructor, and they must be different. If desired, a flow map * can be queried afterwards; this will not require a new invocation of the algorithm. * * @param source source vertex * @param sink sink vertex * * @return the value of the maximum flow */ public double calculateMaximumFlow(V source, V sink) { super.init(source, sink, vertexExtensionsFactory, edgeExtensionsFactory); if (!network.containsVertex(source)) { throw new IllegalArgumentException("invalid source (null or not from this network)"); } if (!network.containsVertex(sink)) { throw new IllegalArgumentException("invalid sink (null or not from this network)"); } if (source.equals(sink)) { throw new IllegalArgumentException("source is equal to sink"); } currentSource = getVertexExtension(source); currentSink = getVertexExtension(sink); for (;;) { breadthFirstSearch(); if (!currentSink.visited) { break; } maxFlowValue += augmentFlow(); } return maxFlowValue; } /** * Method which finds a path from source to sink the in the residual graph. Note that this * method tries to find multiple paths at once. Once a single path has been discovered, no new * nodes are added to the queue, but nodes which are already in the queue are fully explored. As * such there's a chance that multiple paths are discovered. */ private void breadthFirstSearch() { for (V v : network.vertexSet()) { getVertexExtension(v).visited = false; getVertexExtension(v).lastArcs = null; } Queue<VertexExtension> queue = new ArrayDeque<>(); queue.offer(currentSource); currentSource.visited = true; currentSource.excess = Double.POSITIVE_INFINITY; currentSink.excess = 0.0; boolean seenSink = false; while (queue.size() != 0) { VertexExtension ux = queue.poll(); for (AnnotatedFlowEdge ex : ux.getOutgoing()) { if (comparator.compare(ex.flow, ex.capacity) < 0) { VertexExtension vx = ex.getTarget(); if (vx == currentSink) { vx.visited = true; if (vx.lastArcs == null) { vx.lastArcs = new ArrayList<>(); } vx.lastArcs.add(ex); vx.excess += Math.min(ux.excess, ex.capacity - ex.flow); seenSink = true; } else if (!vx.visited) { vx.visited = true; vx.excess = Math.min(ux.excess, ex.capacity - ex.flow); vx.lastArcs = Collections.singletonList(ex); if (!seenSink) { queue.add(vx); } } } } } } /** * For all paths which end in the sink. trace them back to the source and push flow through * them. * * @return total increase in flow from source to sink */ private double augmentFlow() { double flowIncrease = 0; Set<VertexExtension> seen = new HashSet<>(); for (AnnotatedFlowEdge ex : currentSink.lastArcs) { double deltaFlow = Math.min(ex.getSource().excess, ex.capacity - ex.flow); if (augmentFlowAlongInternal(deltaFlow, ex.<VertexExtension> getSource(), seen)) { pushFlowThrough(ex, deltaFlow); flowIncrease += deltaFlow; } } return flowIncrease; } private boolean augmentFlowAlongInternal( double deltaFlow, VertexExtension node, Set<VertexExtension> seen) { if (node == currentSource) { return true; } if (seen.contains(node)) { return false; } seen.add(node); AnnotatedFlowEdge prev = node.lastArcs.get(0); if (augmentFlowAlongInternal(deltaFlow, prev.<VertexExtension> getSource(), seen)) { pushFlowThrough(prev, deltaFlow); return true; } return false; } private VertexExtension getVertexExtension(V v) { return (VertexExtension) vertexExtensionManager.getExtension(v); } class VertexExtension extends VertexExtensionBase { boolean visited; // this mark is used during BFS to mark visited nodes List<AnnotatedFlowEdge> lastArcs; // last arc(-s) in the shortest path used to reach this // vertex } }
jgrapht/jgrapht
jgrapht-core/src/main/java/org/jgrapht/alg/flow/EdmondsKarpMFImpl.java
214,441
package edu.stanford.nlp.process; import java.io.Serializable; import java.util.List; import java.util.Collection; import edu.stanford.nlp.ling.HasWord; import edu.stanford.nlp.ling.TaggedWord; import edu.stanford.nlp.trees.Tree; /** An interface for segmenting strings into words * (in unwordsegmented languages). * * @author Galen Andrew */ public interface WordSegmenter extends Serializable { void initializeTraining(double numTrees); void train(Collection<Tree> trees); void train(Tree trees); void train(List<TaggedWord> sentence); void finishTraining(); void loadSegmenter(String filename); List<HasWord> segment(String s); }
stanfordnlp/CoreNLP
src/edu/stanford/nlp/process/WordSegmenter.java
214,442
/* * Property.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.util; import java.lang.reflect.Method; /** * Gets a property of another object using introspection. * * @author Andrew Rambaut * @author Alexei Drummond * @version $Id: Property.java,v 1.19 2005/05/24 20:26:01 rambaut Exp $ */ public class Property implements Attribute { private Object object = null; private Method getter = null; private Object argument = null; private String name = null; public Property(Object object, String name) { this(object, name, null); } public Property(Object object, String name, Object argument) { this.name = name; this.argument = argument; this.object = object; StringBuffer getterName = new StringBuffer("get"); getterName.append(name.substring(0, 1).toUpperCase()); getterName.append(name.substring(1)); Class c = object.getClass(); //System.out.println(getterName + "(" + argument + ")"); try { if (argument != null) getter = c.getMethod(getterName.toString(), new Class[]{argument.getClass()}); else getter = c.getMethod(getterName.toString(), (Class[]) null); } catch (NoSuchMethodException e) { } } public Method getGetter() { return getter; } //public Object getObject() { return object; } public String getAttributeName() { if (argument == null) return name; return name + "." + argument; } public Object getAttributeValue() { if (object == null || getter == null) return null; Object result = null; Object[] args = null; if (argument != null) args = new Object[]{argument}; try { result = getter.invoke(object, args); } catch (Exception e) { e.printStackTrace(System.out); throw new RuntimeException(e.getMessage()); } return result; } public String getPropertyName() { return name; } public String toString() { return getAttributeValue().toString(); } }
maxbiostat/beast-mcmc
src/dr/util/Property.java
214,443
/* * Attribute.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.util; import java.io.Serializable; /** * An immutable attribute has a name and value. * * @author Alexei Drummond * @version $Id: Attribute.java,v 1.24 2005/05/24 20:26:01 rambaut Exp $ */ public interface Attribute<T> extends Serializable { public final static String ATTRIBUTE = "att"; public final static String NAME = "name"; public final static String VALUE = "value"; String getAttributeName(); T getAttributeValue(); public class Default<T> implements Attribute<T> { public Default(String name, T value) { this.name = name; this.value = value; } public String getAttributeName() { return name; } public T getAttributeValue() { return value; } public String toString() { return name + ": " + value; } private final String name; private final T value; } }
maxbiostat/beast-mcmc
src/dr/util/Attribute.java
214,444
/* * OSType.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.app.util; /** * @author Walter Xie */ public enum OSType { WINDOWS, MAC, UNIX_LINUX; static OSType detect() { if (os.indexOf("mac") >= 0) { return MAC; } if (os.indexOf("win") >= 0) { return WINDOWS; } if (os.indexOf( "nix") >=0 || os.indexOf( "nux") >=0) { return UNIX_LINUX; } return null; } public static boolean isWindows(){ //windows return (os.indexOf( "win" ) >= 0); } public static boolean isMac(){ //Mac return (os.indexOf( "mac" ) >= 0); } public static boolean isUnixOrLinux(){ //linux or unix return (os.indexOf( "nix") >=0 || os.indexOf( "nux") >=0); } public String toString() { return os; } public String version() { return System.getProperty("os.version"); } static final String os = System.getProperty("os.name").toLowerCase(); }
maxbiostat/beast-mcmc
src/dr/app/util/OSType.java
214,445
package org.qii.weiciyuan.support.utils; import org.qii.weiciyuan.bean.MessageBean; import android.content.IntentFilter; /** * User: qii * Date: 13-4-21 */ public class AppEventAction { //use ordered broadcast to decide to use which method to show new message notification, //Android notification bar or Weiciyuan activity if user has opened this app, //activity can interrupt this broadcast //Must equal AndroidManifest's .othercomponent.unreadnotification.UnreadMsgReceiver action name public static final String NEW_MSG_PRIORITY_BROADCAST = "org.qii.weiciyuan.newmsg.priority"; //mentions weibo, mentions comment, comments to me fragment use this broadcast to receive actual data public static final String NEW_MSG_BROADCAST = "org.qii.weiciyuan.newmsg"; public static IntentFilter getSystemMusicBroadcastFilterAction() { IntentFilter musicFilter = new IntentFilter(); musicFilter.addAction("com.android.music.metachanged"); musicFilter.addAction("com.android.music.playstatechanged"); musicFilter.addAction("com.android.music.playbackcomplete"); musicFilter.addAction("com.android.music.queuechanged"); musicFilter.addAction("com.htc.music.metachanged"); musicFilter.addAction("fm.last.android.metachanged"); musicFilter.addAction("com.sec.android.app.music.metachanged"); musicFilter.addAction("com.nullsoft.winamp.metachanged"); musicFilter.addAction("com.amazon.mp3.metachanged"); musicFilter.addAction("com.miui.player.metachanged"); musicFilter.addAction("com.real.IMP.metachanged"); musicFilter.addAction("com.sonyericsson.music.metachanged"); musicFilter.addAction("com.rdio.android.metachanged"); musicFilter.addAction("com.samsung.sec.android.MusicPlayer.metachanged"); musicFilter.addAction("com.andrew.apollo.metachanged"); return musicFilter; } public static final String SLIDING_MENU_CLOSED_BROADCAST = "org.qii.weiciyuan.slidingmenu_closed"; private static final String SEND_COMMENT_OR_REPLY_SUCCESSFULLY = "org.qii.weiciyuan.SEND.COMMENT.COMPLETED"; private static final String SEND_REPOST_SUCCESSFULLY = "org.qii.weiciyuan.SEND.REPOST.COMPLETED"; public static String buildSendCommentOrReplySuccessfullyAction(MessageBean oriMsg) { return SEND_COMMENT_OR_REPLY_SUCCESSFULLY + oriMsg.getId(); } public static String buildSendRepostSuccessfullyAction(MessageBean oriMsg) { return SEND_REPOST_SUCCESSFULLY + oriMsg.getId(); } }
qii/weiciyuan
src/org/qii/weiciyuan/support/utils/AppEventAction.java
214,446
// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE package org.bytedeco.opencv.global; import org.bytedeco.opencv.opencv_text.*; import org.bytedeco.javacpp.annotation.Index; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; import static org.bytedeco.javacpp.presets.javacpp.*; import static org.bytedeco.openblas.global.openblas_nolapack.*; import static org.bytedeco.openblas.global.openblas.*; import org.bytedeco.opencv.opencv_core.*; import static org.bytedeco.opencv.global.opencv_core.*; import org.bytedeco.opencv.opencv_imgproc.*; import static org.bytedeco.opencv.global.opencv_imgproc.*; import org.bytedeco.opencv.opencv_dnn.*; import static org.bytedeco.opencv.global.opencv_dnn.*; import static org.bytedeco.opencv.global.opencv_imgcodecs.*; import org.bytedeco.opencv.opencv_videoio.*; import static org.bytedeco.opencv.global.opencv_videoio.*; import org.bytedeco.opencv.opencv_highgui.*; import static org.bytedeco.opencv.global.opencv_highgui.*; import org.bytedeco.opencv.opencv_flann.*; import static org.bytedeco.opencv.global.opencv_flann.*; import org.bytedeco.opencv.opencv_features2d.*; import static org.bytedeco.opencv.global.opencv_features2d.*; import org.bytedeco.opencv.opencv_ml.*; import static org.bytedeco.opencv.global.opencv_ml.*; public class opencv_text extends org.bytedeco.opencv.presets.opencv_text { static { Loader.load(); } // Targeting ../opencv_text/IntDeque.java // Targeting ../opencv_text/ERStatVector.java // Targeting ../opencv_text/ERStatVectorVector.java // Targeting ../opencv_text/IntVector.java // Targeting ../opencv_text/FloatVector.java // Targeting ../opencv_text/DoubleVector.java // Parsed from <opencv2/text.hpp> /* By downloading, copying, installing or using the software you agree to this license. If you do not agree to this license, do not download, install, copy or use the software. License Agreement For Open Source Computer Vision Library (3-clause BSD License) Copyright (C) 2013, OpenCV Foundation, all rights reserved. Third party copyrights are property of their respective owners. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall copyright holders or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ // #ifndef __OPENCV_TEXT_HPP__ // #define __OPENCV_TEXT_HPP__ // #include "opencv2/text/erfilter.hpp" // #include "opencv2/text/ocr.hpp" // #include "opencv2/text/textDetector.hpp" // #include "opencv2/text/swt_text_detection.hpp" /** \defgroup text Scene Text Detection and Recognition <p> The opencv_text module provides different algorithms for text detection and recognition in natural scene images. <p> \{ \defgroup text_detect Scene Text Detection <p> Class-specific Extremal Regions for Scene Text Detection -------------------------------------------------------- <p> The scene text detection algorithm described below has been initially proposed by Lukás Neumann & Jiri Matas \cite Neumann11. The main idea behind Class-specific Extremal Regions is similar to the MSER in that suitable Extremal Regions (ERs) are selected from the whole component tree of the image. However, this technique differs from MSER in that selection of suitable ERs is done by a sequential classifier trained for character detection, i.e. dropping the stability requirement of MSERs and selecting class-specific (not necessarily stable) regions. <p> The component tree of an image is constructed by thresholding by an increasing value step-by-step from 0 to 255 and then linking the obtained connected components from successive levels in a hierarchy by their inclusion relation: <p> ![image](pics/component_tree.png) <p> The component tree may contain a huge number of regions even for a very simple image as shown in the previous image. This number can easily reach the order of 1 x 10\^6 regions for an average 1 Megapixel image. In order to efficiently select suitable regions among all the ERs the algorithm make use of a sequential classifier with two differentiated stages. <p> In the first stage incrementally computable descriptors (area, perimeter, bounding box, and Euler's number) are computed (in O(1)) for each region r and used as features for a classifier which estimates the class-conditional probability p(r|character). Only the ERs which correspond to local maximum of the probability p(r|character) are selected (if their probability is above a global limit p_min and the difference between local maximum and local minimum is greater than a delta_min value). <p> In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. (Hole area ratio, convex hull ratio, and the number of outer boundary inflexion points). <p> This ER filtering process is done in different single-channel projections of the input image in order to increase the character localization recall. <p> After the ER filtering is done on each input channel, character candidates must be grouped in high-level text blocks (i.e. words, text lines, paragraphs, ...). The opencv_text module implements two different grouping algorithms: the Exhaustive Search algorithm proposed in \cite Neumann12 for grouping horizontally aligned text, and the method proposed by Lluis Gomez and Dimosthenis Karatzas in \cite Gomez13 \cite Gomez14 for grouping arbitrary oriented text (see erGrouping). <p> To see the text detector at work, have a look at the textdetection demo: <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/textdetection.cpp> <p> \defgroup text_recognize Scene Text Recognition \} */ // #endif // Parsed from <opencv2/text/erfilter.hpp> /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ // #ifndef __OPENCV_TEXT_ERFILTER_HPP__ // #define __OPENCV_TEXT_ERFILTER_HPP__ // #include "opencv2/core.hpp" // #include <vector> // #include <deque> // #include <string> // Targeting ../opencv_text/ERStat.java // Targeting ../opencv_text/ERFilter.java /** \brief Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm \cite Neumann12. <p> @param cb : Callback with the classifier. Default classifier can be implicitly load with function loadClassifierNM1, e.g. from file in samples/cpp/trained_classifierNM1.xml @param thresholdDelta : Threshold step in subsequent thresholds when extracting the component tree @param minArea : The minimum area (% of image size) allowed for retreived ER's @param maxArea : The maximum area (% of image size) allowed for retreived ER's @param minProbability : The minimum probability P(er|character) allowed for retreived ER's @param nonMaxSuppression : Whenever non-maximum suppression is done over the branch probabilities @param minProbabilityDiff : The minimum probability difference between local maxima and local minima ERs <p> The component tree of the image is extracted by a threshold increased step by step from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness, number of holes, and number of horizontal crossings) are computed for each ER and used as features for a classifier which estimates the class-conditional probability P(er|character). The value of P(er|character) is tracked using the inclusion relation of ER across all thresholds and only the ERs which correspond to local maximum of the probability P(er|character) are selected (if the local maximum of the probability is above a global limit pmin and the difference between local maximum and local minimum is greater than minProbabilityDiff). */ @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Ptr ERFilter.Callback cb, int thresholdDelta/*=1*/, float minArea/*=(float)0.00025*/, float maxArea/*=(float)0.13*/, float minProbability/*=(float)0.4*/, @Cast("bool") boolean nonMaxSuppression/*=true*/, float minProbabilityDiff/*=(float)0.1*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Ptr ERFilter.Callback cb); /** \brief Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm \cite Neumann12. <p> @param cb : Callback with the classifier. Default classifier can be implicitly load with function loadClassifierNM2, e.g. from file in samples/cpp/trained_classifierNM2.xml @param minProbability : The minimum probability P(er|character) allowed for retreived ER's <p> In the second stage, the ERs that passed the first stage are classified into character and non-character classes using more informative but also more computationally expensive features. The classifier uses all the features calculated in the first stage and the following additional features: hole area ratio, convex hull ratio, and number of outer inflexion points. */ @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Ptr ERFilter.Callback cb, float minProbability/*=(float)0.3*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Ptr ERFilter.Callback cb); /** \brief Reads an Extremal Region Filter for the 1st stage classifier of N&M algorithm from the provided path e.g. /path/to/cpp/trained_classifierNM1.xml <p> \overload */ @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Str BytePointer filename, int thresholdDelta/*=1*/, float minArea/*=(float)0.00025*/, float maxArea/*=(float)0.13*/, float minProbability/*=(float)0.4*/, @Cast("bool") boolean nonMaxSuppression/*=true*/, float minProbabilityDiff/*=(float)0.1*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Str String filename, int thresholdDelta/*=1*/, float minArea/*=(float)0.00025*/, float maxArea/*=(float)0.13*/, float minProbability/*=(float)0.4*/, @Cast("bool") boolean nonMaxSuppression/*=true*/, float minProbabilityDiff/*=(float)0.1*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM1(@Str String filename); /** \brief Reads an Extremal Region Filter for the 2nd stage classifier of N&M algorithm from the provided path e.g. /path/to/cpp/trained_classifierNM2.xml <p> \overload */ @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Str BytePointer filename, float minProbability/*=(float)0.3*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Str String filename, float minProbability/*=(float)0.3*/); @Namespace("cv::text") public static native @Ptr ERFilter createERFilterNM2(@Str String filename); /** \brief Allow to implicitly load the default classifier when creating an ERFilter object. <p> @param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM1.xml) <p> returns a pointer to ERFilter::Callback. */ @Namespace("cv::text") public static native @Ptr ERFilter.Callback loadClassifierNM1(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr ERFilter.Callback loadClassifierNM1(@Str String filename); /** \brief Allow to implicitly load the default classifier when creating an ERFilter object. <p> @param filename The XML or YAML file with the classifier model (e.g. trained_classifierNM2.xml) <p> returns a pointer to ERFilter::Callback. */ @Namespace("cv::text") public static native @Ptr ERFilter.Callback loadClassifierNM2(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr ERFilter.Callback loadClassifierNM2(@Str String filename); /** computeNMChannels operation modes */ /** enum cv::text:: */ public static final int ERFILTER_NM_RGBLGrad = 0, ERFILTER_NM_IHSGrad = 1; /** \brief Compute the different channels to be processed independently in the N&M algorithm \cite Neumann12. <p> @param _src Source image. Must be RGB CV_8UC3. <p> @param _channels Output vector\<Mat\> where computed channels are stored. <p> @param _mode Mode of operation. Currently the only available options are: **ERFILTER_NM_RGBLGrad** (used by default) and **ERFILTER_NM_IHSGrad**. <p> In N&M algorithm, the combination of intensity (I), hue (H), saturation (S), and gradient magnitude channels (Grad) are used in order to obtain high localization recall. This implementation also provides an alternative combination of red (R), green (G), blue (B), lightness (L), and gradient magnitude (Grad). */ @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal MatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal MatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal UMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal UMatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal GpuMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal Mat _src, @ByVal GpuMatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal MatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal MatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal UMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal UMatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal GpuMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal UMat _src, @ByVal GpuMatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal MatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal MatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal UMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal UMatVector _channels); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal GpuMatVector _channels, int _mode/*=cv::text::ERFILTER_NM_RGBLGrad*/); @Namespace("cv::text") public static native void computeNMChannels(@ByVal GpuMat _src, @ByVal GpuMatVector _channels); /** text::erGrouping operation modes */ /** enum cv::text::erGrouping_Modes */ public static final int /** Exhaustive Search algorithm proposed in \cite Neumann11 for grouping horizontally aligned text. The algorithm models a verification function for all the possible ER sequences. The verification fuction for ER pairs consists in a set of threshold-based pairwise rules which compare measurements of two regions (height ratio, centroid angle, and region distance). The verification function for ER triplets creates a word text line estimate using Least Median-Squares fitting for a given triplet and then verifies that the estimate is valid (based on thresholds created during training). Verification functions for sequences larger than 3 are approximated by verifying that the text line parameters of all (sub)sequences of length 3 are consistent. */ ERGROUPING_ORIENTATION_HORIZ = 0, /** Text grouping method proposed in \cite Gomez13 \cite Gomez14 for grouping arbitrary oriented text. Regions are agglomerated by Single Linkage Clustering in a weighted feature space that combines proximity (x,y coordinates) and similarity measures (color, size, gradient magnitude, stroke width, etc.). SLC provides a dendrogram where each node represents a text group hypothesis. Then the algorithm finds the branches corresponding to text groups by traversing this dendrogram with a stopping rule that combines the output of a rotation invariant text group classifier and a probabilistic measure for hierarchical clustering validity assessment. <p> \note This mode is not supported due NFA code removal ( https://github.com/opencv/opencv_contrib/issues/2235 ) */ ERGROUPING_ORIENTATION_ANY = 1; /** \brief Find groups of Extremal Regions that are organized as text blocks. <p> @param img Original RGB or Greyscale image from wich the regions were extracted. <p> @param channels Vector of single channel images CV_8UC1 from wich the regions were extracted. <p> @param regions Vector of ER's retrieved from the ERFilter algorithm from each channel. <p> @param groups The output of the algorithm is stored in this parameter as set of lists of indexes to provided regions. <p> @param groups_rects The output of the algorithm are stored in this parameter as list of rectangles. <p> @param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY. <p> @param filename The XML or YAML file with the classifier model (e.g. samples/trained_classifier_erGrouping.xml). Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. <p> @param minProbablity The minimum probability for accepting a group. Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. */ @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal MatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal UMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString BytePointer filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat img, @ByVal GpuMatVector channels, @ByRef ERStatVectorVector regions, @Cast("std::vector<std::vector<cv::Vec2i> >*") @ByRef PointVectorVector groups, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @StdString String filename/*=std::string()*/, float minProbablity/*=0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat image, @ByVal Mat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat image, @ByVal Mat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal Mat image, @ByVal Mat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat image, @ByVal UMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat image, @ByVal UMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal UMat image, @ByVal UMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat image, @ByVal GpuMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat image, @ByVal GpuMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void erGrouping(@ByVal GpuMat image, @ByVal GpuMat channel, @ByVal PointVectorVector regions, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbablity/*=(float)0.5*/); /** \brief Converts MSER contours (vector\<Point\>) to ERStat regions. <p> @param image Source image CV_8UC1 from which the MSERs where extracted. <p> @param contours Input vector with all the contours (vector\<Point\>). <p> @param regions Output where the ERStat regions are stored. <p> It takes as input the contours provided by the OpenCV MSER feature detector and returns as output two vectors of ERStats. This is because MSER() output contains both MSER+ and MSER- regions in a single vector\<Point\>, the function separates them in two different vectors (this is as if the ERStats where extracted from two different channels). <p> An example of MSERsToERStats in use can be found in the text detection webcam_demo: <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/webcam_demo.cpp> */ @Namespace("cv::text") public static native void MSERsToERStats(@ByVal Mat image, @ByRef PointVectorVector contours, @ByRef ERStatVectorVector regions); @Namespace("cv::text") public static native void MSERsToERStats(@ByVal UMat image, @ByRef PointVectorVector contours, @ByRef ERStatVectorVector regions); @Namespace("cv::text") public static native void MSERsToERStats(@ByVal GpuMat image, @ByRef PointVectorVector contours, @ByRef ERStatVectorVector regions); // Utility funtion for scripting @Namespace("cv::text") public static native void detectRegions(@ByVal Mat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef PointVectorVector regions); @Namespace("cv::text") public static native void detectRegions(@ByVal UMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef PointVectorVector regions); @Namespace("cv::text") public static native void detectRegions(@ByVal GpuMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef PointVectorVector regions); /** \brief Extracts text regions from image. <p> @param image Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color). @param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm \cite Neumann12 @param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm \cite Neumann12 @param groups_rects Output list of rectangle blocks with text @param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY. @param filename The XML or YAML file with the classifier model (e.g. samples/trained_classifier_erGrouping.xml). Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. @param minProbability The minimum probability for accepting a group. Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. <p> <p> */ @Namespace("cv::text") public static native void detectRegions(@ByVal Mat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); @Namespace("cv::text") public static native void detectRegions(@ByVal Mat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void detectRegions(@ByVal Mat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); @Namespace("cv::text") public static native void detectRegions(@ByVal UMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); @Namespace("cv::text") public static native void detectRegions(@ByVal UMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void detectRegions(@ByVal UMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); @Namespace("cv::text") public static native void detectRegions(@ByVal GpuMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str BytePointer filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); @Namespace("cv::text") public static native void detectRegions(@ByVal GpuMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects); @Namespace("cv::text") public static native void detectRegions(@ByVal GpuMat image, @Ptr ERFilter er_filter1, @Ptr ERFilter er_filter2, @ByRef RectVector groups_rects, int method/*=cv::text::ERGROUPING_ORIENTATION_HORIZ*/, @Str String filename/*=cv::String()*/, float minProbability/*=(float)0.5*/); /** \} */ // #endif // _OPENCV_TEXT_ERFILTER_HPP_ // Parsed from <opencv2/text/ocr.hpp> /*M////////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ // #ifndef __OPENCV_TEXT_OCR_HPP__ // #define __OPENCV_TEXT_OCR_HPP__ // #include <opencv2/core.hpp> // #include <vector> // #include <string> /** \addtogroup text_recognize * \{ */ /** enum cv::text:: */ public static final int OCR_LEVEL_WORD = 0, OCR_LEVEL_TEXTLINE = 1; /** Tesseract.PageSegMode Enumeration */ /** enum cv::text::page_seg_mode */ public static final int PSM_OSD_ONLY = 0, PSM_AUTO_OSD = 1, PSM_AUTO_ONLY = 2, PSM_AUTO = 3, PSM_SINGLE_COLUMN = 4, PSM_SINGLE_BLOCK_VERT_TEXT = 5, PSM_SINGLE_BLOCK = 6, PSM_SINGLE_LINE = 7, PSM_SINGLE_WORD = 8, PSM_CIRCLE_WORD = 9, PSM_SINGLE_CHAR = 10; /** Tesseract.OcrEngineMode Enumeration */ /** enum cv::text::ocr_engine_mode */ public static final int OEM_TESSERACT_ONLY = 0, OEM_CUBE_ONLY = 1, OEM_TESSERACT_CUBE_COMBINED = 2, OEM_DEFAULT = 3; // Targeting ../opencv_text/BaseOCR.java // Targeting ../opencv_text/OCRTesseract.java /* OCR HMM Decoder */ /** enum cv::text::decoder_mode */ public static final int OCR_DECODER_VITERBI = 0; // Other algorithms may be added /* OCR classifier type*/ /** enum cv::text::classifier_type */ public static final int OCR_KNN_CLASSIFIER = 0, OCR_CNN_CLASSIFIER = 1; // Targeting ../opencv_text/OCRHMMDecoder.java /** \brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object. <p> @param filename The XML or YAML file with the classifier model (e.g. OCRHMM_knn_model_data.xml) <p> The KNN default classifier is based in the scene text recognition method proposed by Lukás Neumann & Jiri Matas in [Neumann11b]. Basically, the region (contour) in the input image is normalized to a fixed size, while retaining the centroid and aspect ratio, in order to extract a feature vector based on gradient orientations along the chain-code of its perimeter. Then, the region is classified using a KNN model trained with synthetic data of rendered characters with different standard font types. <p> @deprecated loadOCRHMMClassifier instead */ @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifierNM(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifierNM(@Str String filename); /** \brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object. <p> @param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz) <p> The CNN default classifier is based in the scene text recognition method proposed by Adam Coates & Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions at each window location. <p> @deprecated use loadOCRHMMClassifier instead */ @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifierCNN(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifierCNN(@Str String filename); /** \brief Allow to implicitly load the default character classifier when creating an OCRHMMDecoder object. <p> @param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz) <p> @param classifier Can be one of classifier_type enum values. <p> */ @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifier(@Str BytePointer filename, int classifier); @Namespace("cv::text") public static native @Ptr OCRHMMDecoder.ClassifierCallback loadOCRHMMClassifier(@Str String filename, int classifier); /** \} <p> /** \brief Utility function to create a tailored language model transitions table from a given list of words (lexicon). * * @param vocabulary The language vocabulary (chars when ASCII English text). * * @param lexicon The list of words that are expected to be found in a particular image. * * @param transition_probabilities_table Output table with transition probabilities between character pairs. cols == rows == vocabulary.size(). * * The function calculate frequency statistics of character pairs from the given lexicon and fills the output transition_probabilities_table with them. The transition_probabilities_table can be used as input in the OCRHMMDecoder::create() and OCRBeamSearchDecoder::create() methods. * \note * - (C++) An alternative would be to load the default generic language transition table provided in the text module samples folder (created from ispell 42869 english words list) : * <https://github.com/opencv/opencv_contrib/blob/master/modules/text/samples/OCRHMM_transitions_table.xml> **/ @Namespace("cv::text") public static native void createOCRHMMTransitionsTable(@StdString @ByRef BytePointer vocabulary, @ByRef StringVector lexicon, @ByVal Mat transition_probabilities_table); @Namespace("cv::text") public static native void createOCRHMMTransitionsTable(@StdString @ByRef BytePointer vocabulary, @ByRef StringVector lexicon, @ByVal UMat transition_probabilities_table); @Namespace("cv::text") public static native void createOCRHMMTransitionsTable(@StdString @ByRef BytePointer vocabulary, @ByRef StringVector lexicon, @ByVal GpuMat transition_probabilities_table); @Namespace("cv::text") public static native @ByVal Mat createOCRHMMTransitionsTable(@Str BytePointer vocabulary, @ByRef StringVector lexicon); @Namespace("cv::text") public static native @ByVal Mat createOCRHMMTransitionsTable(@Str String vocabulary, @ByRef StringVector lexicon); // Targeting ../opencv_text/OCRBeamSearchDecoder.java /** \brief Allow to implicitly load the default character classifier when creating an OCRBeamSearchDecoder object. <p> @param filename The XML or YAML file with the classifier model (e.g. OCRBeamSearch_CNN_model_data.xml.gz) <p> The CNN default classifier is based in the scene text recognition method proposed by Adam Coates & Andrew NG in [Coates11a]. The character classifier consists in a Single Layer Convolutional Neural Network and a linear classifier. It is applied to the input image in a sliding window fashion, providing a set of recognitions at each window location. */ @Namespace("cv::text") public static native @Ptr OCRBeamSearchDecoder.ClassifierCallback loadOCRBeamSearchClassifierCNN(@Str BytePointer filename); @Namespace("cv::text") public static native @Ptr OCRBeamSearchDecoder.ClassifierCallback loadOCRBeamSearchClassifierCNN(@Str String filename); // Targeting ../opencv_text/OCRHolisticWordRecognizer.java /** \} */ // cv::text:: // #endif // _OPENCV_TEXT_OCR_HPP_ // Parsed from opencv2/text/textDetector.hpp // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // #ifndef __OPENCV_TEXT_TEXTDETECTOR_HPP__ // #define __OPENCV_TEXT_TEXTDETECTOR_HPP__ // #include "ocr.hpp" // Targeting ../opencv_text/TextDetector.java // Targeting ../opencv_text/TextDetectorCNN.java /** \} */ //namespace text //namespace cv // #endif // _OPENCV_TEXT_OCR_HPP_ }
bytedeco/javacpp-presets
opencv/src/gen/java/org/bytedeco/opencv/global/opencv_text.java
214,447
package edu.stanford.nlp.process; import java.io.Reader; import java.io.Serializable; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.LinkedHashMap; import edu.stanford.nlp.ling.CoreLabel; import edu.stanford.nlp.ling.HasWord; import edu.stanford.nlp.util.Maps; import edu.stanford.nlp.util.StringUtils; /** A tokenizer that works by calling a WordSegmenter. * This is used for Chinese and Arabic. * * @author Galen Andrew * @author Spence Green */ public class WordSegmentingTokenizer extends AbstractTokenizer<HasWord> { private Iterator<HasWord> wordIter; private Tokenizer<CoreLabel> tok; private WordSegmenter wordSegmenter; public WordSegmentingTokenizer(WordSegmenter segmenter, Reader r) { this(segmenter, WhitespaceTokenizer.newCoreLabelWhitespaceTokenizer(r)); } public WordSegmentingTokenizer(WordSegmenter segmenter, Tokenizer<CoreLabel> tokenizer) { wordSegmenter = segmenter; tok = tokenizer; } @Override protected HasWord getNext() { while (wordIter == null || ! wordIter.hasNext()) { if ( ! tok.hasNext()) { return null; } CoreLabel token = tok.next(); String s = token.word(); if (s == null) { return null; } if (s.equals(WhitespaceLexer.NEWLINE)) { // if newlines were significant, we should make sure to return // them when we see them List<HasWord> se = Collections.<HasWord>singletonList(token); wordIter = se.iterator(); } else { List<HasWord> se = wordSegmenter.segment(s); wordIter = se.iterator(); } } return wordIter.next(); } public static TokenizerFactory<HasWord> factory(WordSegmenter wordSegmenter) { return new WordSegmentingTokenizerFactory(wordSegmenter); } private static class WordSegmentingTokenizerFactory implements TokenizerFactory<HasWord>, Serializable { private static final long serialVersionUID = -4697961121607489828L; boolean tokenizeNLs = false; private WordSegmenter segmenter; public WordSegmentingTokenizerFactory(WordSegmenter wordSegmenter) { segmenter = wordSegmenter; } public Iterator<HasWord> getIterator(Reader r) { return getTokenizer(r); } public Tokenizer<HasWord> getTokenizer(Reader r) { return getTokenizer(r, null); } public Tokenizer<HasWord> getTokenizer(Reader r, String extraOptions) { boolean tokenizeNewlines = this.tokenizeNLs; if (extraOptions != null) { LinkedHashMap<String, String> prop = StringUtils.stringToPropertiesMap(extraOptions); tokenizeNewlines = Maps.getBool(prop, "tokenizeNLs", this.tokenizeNLs); } return new WordSegmentingTokenizer(segmenter, WhitespaceTokenizer.newCoreLabelWhitespaceTokenizer(r, tokenizeNewlines)); } public void setOptions(String options) { LinkedHashMap<String, String> prop = StringUtils.stringToPropertiesMap(options); tokenizeNLs = Maps.getBool(prop, "tokenizeNLs", tokenizeNLs); } } }
stanfordnlp/CoreNLP
src/edu/stanford/nlp/process/WordSegmentingTokenizer.java
214,448
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hive.ql; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.security.AccessControlException; import java.io.FileNotFoundException; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * List of all error messages. * This list contains both compile time and run-time errors. * * This class supports parametrized messages such as (@link #TRUNCATE_FOR_NON_MANAGED_TABLE}. These are * preferable over un-parametrized ones where arbitrary String is appended to the end of the message, * for example {@link #getMsg(String)} and {@link #INVALID_TABLE}. */ public enum ErrorMsg { // The error codes are Hive-specific and partitioned into the following ranges: // 10000 to 19999: Errors occurring during semantic analysis and compilation of the query. // 20000 to 29999: Runtime errors where Hive believes that retries are unlikely to succeed. // 30000 to 39999: Runtime errors which Hive thinks may be transient and retrying may succeed. // 40000 to 49999: Errors where Hive is unable to advise about retries. // In addition to the error code, ErrorMsg also has a SQLState field. // SQLStates are taken from Section 22.1 of ISO-9075. // See http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt // Most will just rollup to the generic syntax error state of 42000, but // specific errors can override the that state. // See this page for how MySQL uses SQLState codes: // http://dev.mysql.com/doc/refman/5.0/en/connector-j-reference-error-sqlstates.html GENERIC_ERROR(40000, "Exception while processing"), //========================== 10000 range starts here ========================// INVALID_TABLE(10001, "Table not found", "42S02"), INVALID_COLUMN(10002, "Invalid column reference"), INVALID_TABLE_OR_COLUMN(10004, "Invalid table alias or column reference"), AMBIGUOUS_TABLE_OR_COLUMN(10005, "Ambiguous table alias or column reference"), INVALID_PARTITION(10006, "Partition not found"), AMBIGUOUS_COLUMN(10007, "Ambiguous column reference"), AMBIGUOUS_TABLE_ALIAS(10008, "Ambiguous table alias"), INVALID_TABLE_ALIAS(10009, "Invalid table alias"), NO_TABLE_ALIAS(10010, "No table alias"), INVALID_FUNCTION(10011, "Invalid function"), INVALID_FUNCTION_SIGNATURE(10012, "Function argument type mismatch"), INVALID_OPERATOR_SIGNATURE(10013, "Operator argument type mismatch"), INVALID_ARGUMENT(10014, "Wrong arguments"), INVALID_ARGUMENT_LENGTH(10015, "Arguments length mismatch", "21000"), INVALID_ARGUMENT_TYPE(10016, "Argument type mismatch"), @Deprecated INVALID_JOIN_CONDITION_1(10017, "Both left and right aliases encountered in JOIN"), @Deprecated INVALID_JOIN_CONDITION_2(10018, "Neither left nor right aliases encountered in JOIN"), @Deprecated INVALID_JOIN_CONDITION_3(10019, "OR not supported in JOIN currently"), INVALID_TRANSFORM(10020, "TRANSFORM with other SELECT columns not supported"), UNSUPPORTED_MULTIPLE_DISTINCTS(10022, "DISTINCT on different columns not supported" + " with skew in data"), NO_SUBQUERY_ALIAS(10023, "No alias for subquery"), NO_INSERT_INSUBQUERY(10024, "Cannot insert in a subquery. Inserting to table "), NON_KEY_EXPR_IN_GROUPBY(10025, "Expression not in GROUP BY key"), INVALID_XPATH(10026, "General . and [] operators are not supported"), INVALID_PATH(10027, "Invalid path"), ILLEGAL_PATH(10028, "Path is not legal"), INVALID_NUMERICAL_CONSTANT(10029, "Invalid numerical constant"), INVALID_ARRAYINDEX_TYPE(10030, "Not proper type for index of ARRAY. Currently, only integer type is supported"), INVALID_MAPINDEX_CONSTANT(10031, "Non-constant expression for map indexes not supported"), INVALID_MAPINDEX_TYPE(10032, "MAP key type does not match index expression type"), NON_COLLECTION_TYPE(10033, "[] not valid on non-collection types"), @Deprecated SELECT_DISTINCT_WITH_GROUPBY(10034, "SELECT DISTINCT and GROUP BY can not be in the same query"), COLUMN_REPEATED_IN_PARTITIONING_COLS(10035, "Column repeated in partitioning columns"), DUPLICATE_COLUMN_NAMES(10036, "Duplicate column name:"), INVALID_BUCKET_NUMBER(10037, "Bucket number should be bigger than zero"), COLUMN_REPEATED_IN_CLUSTER_SORT(10038, "Same column cannot appear in CLUSTER BY and SORT BY"), SAMPLE_RESTRICTION(10039, "Cannot SAMPLE on more than two columns"), SAMPLE_COLUMN_NOT_FOUND(10040, "SAMPLE column not found"), NO_PARTITION_PREDICATE(10041, "No partition predicate found"), INVALID_DOT(10042, ". Operator is only supported on struct or list of struct types"), INVALID_TBL_DDL_SERDE(10043, "Either list of columns or a custom serializer should be specified"), TARGET_TABLE_COLUMN_MISMATCH(10044, "Cannot insert into target table because column number/types are different"), TABLE_ALIAS_NOT_ALLOWED(10045, "Table alias not allowed in sampling clause"), CLUSTERBY_DISTRIBUTEBY_CONFLICT(10046, "Cannot have both CLUSTER BY and DISTRIBUTE BY clauses"), ORDERBY_DISTRIBUTEBY_CONFLICT(10047, "Cannot have both ORDER BY and DISTRIBUTE BY clauses"), CLUSTERBY_SORTBY_CONFLICT(10048, "Cannot have both CLUSTER BY and SORT BY clauses"), ORDERBY_SORTBY_CONFLICT(10049, "Cannot have both ORDER BY and SORT BY clauses"), CLUSTERBY_ORDERBY_CONFLICT(10050, "Cannot have both CLUSTER BY and ORDER BY clauses"), NO_LIMIT_WITH_ORDERBY(10051, "In strict mode, if ORDER BY is specified, " + "LIMIT must also be specified"), UNION_NOTIN_SUBQ(10053, "Top level UNION is not supported currently; " + "use a subquery for the UNION"), INVALID_INPUT_FORMAT_TYPE(10054, "Input format must implement InputFormat"), INVALID_OUTPUT_FORMAT_TYPE(10055, "Output Format must implement HiveOutputFormat, " + "otherwise it should be either IgnoreKeyTextOutputFormat or SequenceFileOutputFormat"), NO_VALID_PARTN(10056, HiveConf.StrictChecks.NO_PARTITIONLESS_MSG), NO_OUTER_MAPJOIN(10057, "MAPJOIN cannot be performed with OUTER JOIN"), INVALID_MAPJOIN_HINT(10058, "All tables are specified as map-table for join"), INVALID_MAPJOIN_TABLE(10059, "Result of a union cannot be a map table"), NON_BUCKETED_TABLE(10060, "Sampling expression needed for non-bucketed table"), BUCKETED_NUMERATOR_BIGGER_DENOMINATOR(10061, "Numerator should not be bigger than " + "denominator in sample clause for table"), NEED_PARTITION_ERROR(10062, "Need to specify partition columns because the destination " + "table is partitioned"), CTAS_CTLT_COEXISTENCE(10063, "Create table command does not allow LIKE and AS-SELECT in " + "the same command"), LINES_TERMINATED_BY_NON_NEWLINE(10064, "LINES TERMINATED BY only supports " + "newline '\\n' right now"), CTAS_COLLST_COEXISTENCE(10065, "CREATE TABLE AS SELECT command cannot specify " + "the list of columns " + "for the target table"), CTLT_COLLST_COEXISTENCE(10066, "CREATE TABLE LIKE command cannot specify the list of columns for " + "the target table"), INVALID_SELECT_SCHEMA(10067, "Cannot derive schema from the select-clause"), CTAS_PARCOL_COEXISTENCE(10068, "CREATE-TABLE-AS-SELECT does not support " + "partitioning in the target table "), CTAS_MULTI_LOADFILE(10069, "CREATE-TABLE-AS-SELECT results in multiple file load"), CTAS_EXTTBL_COEXISTENCE(10070, "CREATE-TABLE-AS-SELECT cannot create external table"), INSERT_EXTERNAL_TABLE(10071, "Inserting into a external table is not allowed"), DATABASE_NOT_EXISTS(10072, "Database does not exist:"), TABLE_ALREADY_EXISTS(10073, "Table already exists:", "42S02"), COLUMN_ALIAS_ALREADY_EXISTS(10074, "Column alias already exists:", "42S02"), UDTF_MULTIPLE_EXPR(10075, "Only a single expression in the SELECT clause is " + "supported with UDTF's"), @Deprecated UDTF_REQUIRE_AS(10076, "UDTF's require an AS clause"), UDTF_NO_GROUP_BY(10077, "GROUP BY is not supported with a UDTF in the SELECT clause"), UDTF_NO_SORT_BY(10078, "SORT BY is not supported with a UDTF in the SELECT clause"), UDTF_NO_CLUSTER_BY(10079, "CLUSTER BY is not supported with a UDTF in the SELECT clause"), UDTF_NO_DISTRIBUTE_BY(10080, "DISTRIBUTE BY is not supported with a UDTF in the SELECT clause"), UDTF_INVALID_LOCATION(10081, "UDTF's are not supported outside the SELECT clause, nor nested " + "in expressions"), UDTF_LATERAL_VIEW(10082, "UDTF's cannot be in a select expression when there is a lateral view"), UDTF_ALIAS_MISMATCH(10083, "The number of aliases supplied in the AS clause does not match the " + "number of columns output by the UDTF"), UDF_STATEFUL_INVALID_LOCATION(10084, "Stateful UDF's can only be invoked in the SELECT list"), LATERAL_VIEW_WITH_JOIN(10085, "JOIN with a LATERAL VIEW is not supported"), LATERAL_VIEW_INVALID_CHILD(10086, "LATERAL VIEW AST with invalid child"), OUTPUT_SPECIFIED_MULTIPLE_TIMES(10087, "The same output cannot be present multiple times: "), INVALID_AS(10088, "AS clause has an invalid number of aliases"), VIEW_COL_MISMATCH(10089, "The number of columns produced by the SELECT clause does not match the " + "number of column names specified by CREATE VIEW"), DML_AGAINST_VIEW(10090, "A view cannot be used as target table for LOAD or INSERT"), ANALYZE_VIEW(10091, "ANALYZE is not supported for views"), VIEW_PARTITION_TOTAL(10092, "At least one non-partitioning column must be present in view"), VIEW_PARTITION_MISMATCH(10093, "Rightmost columns in view output do not match " + "PARTITIONED ON clause"), PARTITION_DYN_STA_ORDER(10094, "Dynamic partition cannot be the parent of a static partition"), DYNAMIC_PARTITION_DISABLED(10095, "Dynamic partition is disabled. Either enable it by setting " + "hive.exec.dynamic.partition=true or specify partition column values"), DYNAMIC_PARTITION_STRICT_MODE(10096, "Dynamic partition strict mode requires at least one " + "static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict"), NONEXISTPARTCOL(10098, "Non-Partition column appears in the partition specification: "), UNSUPPORTED_TYPE(10099, "DATETIME type isn't supported yet. Please use " + "DATE or TIMESTAMP instead"), CREATE_NON_NATIVE_AS(10100, "CREATE TABLE AS SELECT cannot be used for a non-native table"), LOAD_INTO_NON_NATIVE(10101, "A non-native table cannot be used as target for LOAD"), LOCKMGR_NOT_SPECIFIED(10102, "Lock manager not specified correctly, set hive.lock.manager"), LOCKMGR_NOT_INITIALIZED(10103, "Lock manager could not be initialized, check hive.lock.manager "), LOCK_CANNOT_BE_ACQUIRED(10104, "Locks on the underlying objects cannot be acquired, " + "retry after some time."), ZOOKEEPER_CLIENT_COULD_NOT_BE_INITIALIZED(10105, "Check hive.zookeeper.quorum " + "and hive.zookeeper.client.port"), OVERWRITE_ARCHIVED_PART(10106, "Cannot overwrite an archived partition. " + "Unarchive before running this command"), ARCHIVE_METHODS_DISABLED(10107, "Archiving methods are currently disabled. " + "Please see the Hive wiki for more information about enabling archiving"), ARCHIVE_ON_MULI_PARTS(10108, "ARCHIVE can only be run on a single partition"), UNARCHIVE_ON_MULI_PARTS(10109, "ARCHIVE can only be run on a single partition"), ARCHIVE_ON_TABLE(10110, "ARCHIVE can only be run on partitions"), RESERVED_PART_VAL(10111, "Partition value contains a reserved substring"), OFFLINE_TABLE_OR_PARTITION(10113, "Query against an offline table or partition"), NEED_PARTITION_SPECIFICATION(10115, "Table is partitioned and partition specification is needed"), INVALID_METADATA(10116, "The metadata file could not be parsed "), NEED_TABLE_SPECIFICATION(10117, "Table name could be determined; It should be specified "), PARTITION_EXISTS(10118, "Partition already exists"), TABLE_DATA_EXISTS(10119, "Table exists and contains data files"), INCOMPATIBLE_SCHEMA(10120, "The existing table is not compatible with the Export/Import spec. "), EXIM_FOR_NON_NATIVE(10121, "Export/Import cannot be done for a non-native table."), INSERT_INTO_BUCKETIZED_TABLE(10122, "Bucketized tables do not support INSERT INTO:"), PARTSPEC_DIFFER_FROM_SCHEMA(10125, "Partition columns in partition specification are " + "not the same as that defined in the table schema. " + "The names and orders have to be exactly the same."), PARTITION_COLUMN_NON_PRIMITIVE(10126, "Partition column must be of primitive type."), INSERT_INTO_DYNAMICPARTITION_IFNOTEXISTS(10127, "Dynamic partitions do not support IF NOT EXISTS. Specified partitions with value :"), UDAF_INVALID_LOCATION(10128, "Not yet supported place for UDAF"), DROP_PARTITION_NON_STRING_PARTCOLS_NONEQUALITY(10129, "Drop partitions for a non-string partition column is only allowed using equality"), ALTER_COMMAND_FOR_VIEWS(10131, "To alter a view you need to use the ALTER VIEW command."), ALTER_COMMAND_FOR_TABLES(10132, "To alter a base table you need to use the ALTER TABLE command."), ALTER_VIEW_DISALLOWED_OP(10133, "Cannot use this form of ALTER on a view"), ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for {0} to a non-native table {1}", true), SORTMERGE_MAPJOIN_FAILED(10135, "Sort merge bucketed join could not be performed. " + "If you really want to perform the operation, either set " + "hive.optimize.bucketmapjoin.sortedmerge=false, or set " + "hive.enforce.sortmergebucketmapjoin=false."), BUCKET_MAPJOIN_NOT_POSSIBLE(10136, "Bucketed mapjoin cannot be performed. " + "This can be due to multiple reasons: " + " . Join columns don't match bucketed columns. " + " . Number of buckets are not a multiple of each other. " + "If you really want to perform the operation, either remove the " + "mapjoin hint from your query or set hive.enforce.bucketmapjoin to false."), BUCKETED_TABLE_METADATA_INCORRECT(10141, "Bucketed table metadata is not correct. " + "Fix the metadata or don't use bucketed mapjoin, by setting " + "hive.enforce.bucketmapjoin to false."), JOINNODE_OUTERJOIN_MORETHAN_16(10142, "Single join node containing outer join(s) " + "cannot have more than 16 aliases"), INVALID_JDO_FILTER_EXPRESSION(10143, "Invalid expression for JDO filter"), ALTER_BUCKETNUM_NONBUCKETIZED_TBL(10145, "Table is not bucketized."), TRUNCATE_FOR_NON_MANAGED_TABLE(10146, "Cannot truncate non-managed table {0}.", true), TRUNCATE_FOR_NON_NATIVE_TABLE(10147, "Cannot truncate non-native table {0}.", true), PARTSPEC_FOR_NON_PARTITIONED_TABLE(10148, "Partition spec for non partitioned table {0}.", true), INVALID_TABLE_IN_ON_CLAUSE_OF_MERGE(10149, "No columns from target table ''{0}'' found in ON " + "clause ''{1}'' of MERGE statement.", true), LOAD_INTO_STORED_AS_DIR(10195, "A stored-as-directories table cannot be used as target for LOAD"), ALTER_TBL_STOREDASDIR_NOT_SKEWED(10196, "This operation is only valid on skewed table."), ALTER_TBL_SKEWED_LOC_NO_LOC(10197, "Alter table skewed location doesn't have locations."), ALTER_TBL_SKEWED_LOC_NO_MAP(10198, "Alter table skewed location doesn't have location map."), SKEWED_TABLE_NO_COLUMN_NAME(10200, "No skewed column name."), SKEWED_TABLE_NO_COLUMN_VALUE(10201, "No skewed values."), SKEWED_TABLE_DUPLICATE_COLUMN_NAMES(10202, "Duplicate skewed column name:"), SKEWED_TABLE_INVALID_COLUMN(10203, "Invalid skewed column name:"), SKEWED_TABLE_SKEWED_COL_NAME_VALUE_MISMATCH_1(10204, "Skewed column name is empty but skewed value is not."), SKEWED_TABLE_SKEWED_COL_NAME_VALUE_MISMATCH_2(10205, "Skewed column value is empty but skewed name is not."), SKEWED_TABLE_SKEWED_COL_NAME_VALUE_MISMATCH_3(10206, "The number of skewed column names and the number of " + "skewed column values are different: "), ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN(10207, " is a skewed column. It's not allowed to rename skewed column" + " or change skewed column type."), HIVE_GROUPING_SETS_AGGR_NOMAPAGGR(10209, "Grouping sets aggregations (with rollups or cubes) are not allowed if map-side " + " aggregation is turned off. Set hive.map.aggr=true if you want to use grouping sets"), HIVE_GROUPING_SETS_AGGR_EXPRESSION_INVALID(10210, "Grouping sets aggregations (with rollups or cubes) are not allowed if aggregation function " + "parameters overlap with the aggregation functions columns"), HIVE_GROUPING_SETS_EMPTY(10211, "Empty grouping sets not allowed"), HIVE_UNION_REMOVE_OPTIMIZATION_NEEDS_SUBDIRECTORIES(10212, "In order to use hive.optimize.union.remove, the hadoop version that you are using " + "should support sub-directories for tables/partitions. If that is true, set " + "hive.hadoop.supports.subdirectories to true. Otherwise, set hive.optimize.union.remove " + "to false"), HIVE_GROUPING_SETS_EXPR_NOT_IN_GROUPBY(10213, "Grouping sets expression is not in GROUP BY key"), INVALID_PARTITION_SPEC(10214, "Invalid partition spec specified"), ALTER_TBL_UNSET_NON_EXIST_PROPERTY(10215, "Please use the following syntax if not sure " + "whether the property existed or not:\n" + "ALTER TABLE tableName UNSET TBLPROPERTIES IF EXISTS (key1, key2, ...)\n"), ALTER_VIEW_AS_SELECT_NOT_EXIST(10216, "Cannot ALTER VIEW AS SELECT if view currently does not exist\n"), REPLACE_VIEW_WITH_PARTITION(10217, "Cannot replace a view with CREATE VIEW or REPLACE VIEW or " + "ALTER VIEW AS SELECT if the view has partitions\n"), EXISTING_TABLE_IS_NOT_VIEW(10218, "Existing table is not a view\n"), NO_SUPPORTED_ORDERBY_ALLCOLREF_POS(10219, "Position in ORDER BY is not supported when using SELECT *"), INVALID_POSITION_ALIAS_IN_GROUPBY(10220, "Invalid position alias in Group By\n"), INVALID_POSITION_ALIAS_IN_ORDERBY(10221, "Invalid position alias in Order By\n"), HIVE_GROUPING_SETS_THRESHOLD_NOT_ALLOWED_WITH_SKEW(10225, "An additional MR job is introduced since the number of rows created per input row " + "due to grouping sets is more than hive.new.job.grouping.set.cardinality. There is no need " + "to handle skew separately. set hive.groupby.skewindata to false."), HIVE_GROUPING_SETS_THRESHOLD_NOT_ALLOWED_WITH_DISTINCTS(10226, "An additional MR job is introduced since the cardinality of grouping sets " + "is more than hive.new.job.grouping.set.cardinality. This functionality is not supported " + "with distincts. Either set hive.new.job.grouping.set.cardinality to a high number " + "(higher than the number of rows per input row due to grouping sets in the query), or " + "rewrite the query to not use distincts."), OPERATOR_NOT_ALLOWED_WITH_MAPJOIN(10227, "Not all clauses are supported with mapjoin hint. Please remove mapjoin hint."), ANALYZE_TABLE_NOSCAN_NON_NATIVE(10228, "ANALYZE TABLE NOSCAN cannot be used for " + "a non-native table"), PARTITION_VALUE_NOT_CONTINUOUS(10234, "Partition values specified are not continuous." + " A subpartition value is specified without specifying the parent partition's value"), TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and their partitions " + " cannot be exchanged."), EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES(10236, "Exchange partition is not allowed with " + "transactional tables. Alternatively, shall use load data or insert overwrite to move partitions."), TRUNCATE_COLUMN_NOT_RC(10237, "Only RCFileFormat supports column truncation."), TRUNCATE_COLUMN_ARCHIVED(10238, "Column truncation cannot be performed on archived partitions."), TRUNCATE_BUCKETED_COLUMN(10239, "A column on which a partition/table is bucketed cannot be truncated."), TRUNCATE_LIST_BUCKETED_COLUMN(10240, "A column on which a partition/table is list bucketed cannot be truncated."), TABLE_NOT_PARTITIONED(10241, "Table {0} is not a partitioned table", true), DATABASE_ALREADY_EXISTS(10242, "Database {0} already exists", true), CANNOT_REPLACE_COLUMNS(10243, "Replace columns is not supported for table {0}. SerDe may be incompatible.", true), BAD_LOCATION_VALUE(10244, "{0} is not absolute. Please specify a complete absolute uri.", true), UNSUPPORTED_ALTER_TBL_OP(10245, "{0} alter table options is not supported", true), INVALID_BIGTABLE_MAPJOIN(10246, "{0} table chosen for streaming is not valid", true), MISSING_OVER_CLAUSE(10247, "Missing over clause for function : "), PARTITION_SPEC_TYPE_MISMATCH(10248, "Cannot add partition column {0} of type {1} as it cannot be converted to type {2}", true), UNSUPPORTED_SUBQUERY_EXPRESSION(10249, "Unsupported SubQuery Expression"), INVALID_SUBQUERY_EXPRESSION(10250, "Invalid SubQuery expression"), INVALID_HDFS_URI(10251, "{0} is not a hdfs uri", true), INVALID_DIR(10252, "{0} is not a directory", true), NO_VALID_LOCATIONS(10253, "Could not find any valid location to place the jars. " + "Please update hive.jar.directory or hive.user.install.directory with a valid location", false), UNSUPPORTED_AUTHORIZATION_PRINCIPAL_TYPE_GROUP(10254, "Principal type GROUP is not supported in this authorization setting", "28000"), INVALID_TABLE_NAME(10255, "Invalid table name {0}", true), INSERT_INTO_IMMUTABLE_TABLE(10256, "Inserting into a non-empty immutable table is not allowed"), UNSUPPORTED_AUTHORIZATION_RESOURCE_TYPE_GLOBAL(10257, "Resource type GLOBAL is not supported in this authorization setting", "28000"), UNSUPPORTED_AUTHORIZATION_RESOURCE_TYPE_COLUMN(10258, "Resource type COLUMN is not supported in this authorization setting", "28000"), TXNMGR_NOT_SPECIFIED(10260, "Transaction manager not specified correctly, " + "set hive.txn.manager"), TXNMGR_NOT_INSTANTIATED(10261, "Transaction manager could not be " + "instantiated, check hive.txn.manager"), TXN_NO_SUCH_TRANSACTION(10262, "No record of transaction {0} could be found, " + "may have timed out", true), TXN_ABORTED(10263, "Transaction manager has aborted the transaction {0}. Reason: {1}", true), DBTXNMGR_REQUIRES_CONCURRENCY(10264, "To use DbTxnManager you must set hive.support.concurrency=true"), TXNMGR_NOT_ACID(10265, "This command is not allowed on an ACID table {0}.{1} with a non-ACID transaction manager", true), LOCK_NO_SUCH_LOCK(10270, "No record of lock {0} could be found, " + "may have timed out", true), LOCK_REQUEST_UNSUPPORTED(10271, "Current transaction manager does not " + "support explicit lock requests. Transaction manager: "), METASTORE_COMMUNICATION_FAILED(10280, "Error communicating with the " + "metastore"), METASTORE_COULD_NOT_INITIATE(10281, "Unable to initiate connection to the " + "metastore."), INVALID_COMPACTION_TYPE(10282, "Invalid compaction type, supported values are 'major' and " + "'minor'"), NO_COMPACTION_PARTITION(10283, "You must specify a partition to compact for partitioned tables"), TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on one partition at a " + "time."), DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current context"), NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on non-ACID table {0}.{1}", true), MASKING_FILTERING_ON_ACID_NOT_SUPPORTED(10287, "Detected {0}.{1} has row masking/column filtering enabled, " + "which is not supported for query involving ACID operations", true), MASKING_FILTERING_ON_MATERIALIZED_VIEWS_SOURCES(10288, "Querying directly materialized view contents is not supported since we detected {0}.{1} " + "used by materialized view has row masking/column filtering enabled", true), MASKING_COMPLEX_TYPE_NOT_SUPPORTED(10289, "Masking complex types is not supported, found a masking expression {0} over column {1}:{2}", true), UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing rewritten merge/update or " + "delete query"), UPDATE_CANNOT_UPDATE_PART_VALUE(10292, "Updating values of partition columns is not supported"), INSERT_CANNOT_CREATE_TEMP_FILE(10293, "Unable to create temp file for insert values "), ACID_OP_ON_NONACID_TXNMGR(10294, "Attempt to do update or delete using transaction manager that" + " does not support these operations."), VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296, "Values clause with table constructor not yet supported"), ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that is " + "not transactional", true), ACID_NO_SORTED_BUCKETS(10298, "ACID insert, update, delete not supported on tables that are " + "sorted, table {0}", true), ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED(10299, "Alter table partition type {0} does not allow partial partition spec", true), ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED(10300, "Alter table partition type {0} does not support cascade", true), DROP_NATIVE_FUNCTION(10301, "Cannot drop native function"), UPDATE_CANNOT_UPDATE_BUCKET_VALUE(10302, "Updating values of bucketing columns is not supported. Column {0}.", true), IMPORT_INTO_STRICT_REPL_TABLE(10303,"Non-repl import disallowed against table that is a destination of replication."), CTAS_LOCATION_NONEMPTY(10304, "CREATE-TABLE-AS-SELECT cannot create table with location to a non-empty directory."), CTAS_CREATES_VOID_TYPE(10305, "CREATE-TABLE-AS-SELECT creates a VOID type, please use CAST to specify the type, near field: "), TBL_SORTED_NOT_BUCKETED(10306, "Destination table {0} found to be sorted but not bucketed.", true), //{2} should be lockid LOCK_ACQUIRE_TIMEDOUT(10307, "Lock acquisition for {0} timed out after {1}ms. {2}", true), COMPILE_LOCK_TIMED_OUT(10308, "Attempt to acquire compile lock timed out.", true), CANNOT_CHANGE_SERDE(10309, "Changing SerDe (from {0}) is not supported for table {1}. File format may be incompatible", true), CANNOT_CHANGE_FILEFORMAT(10310, "Changing file format (from {0}) is not supported for table {1}", true), CANNOT_REORDER_COLUMNS(10311, "Reordering columns is not supported for table {0}. SerDe may be incompatible", true), CANNOT_CHANGE_COLUMN_TYPE(10312, "Changing from type {0} to {1} is not supported for column {2}. SerDe may be incompatible", true), REPLACE_CANNOT_DROP_COLUMNS(10313, "Replacing columns cannot drop columns for table {0}. SerDe may be incompatible", true), REPLACE_UNSUPPORTED_TYPE_CONVERSION(10314, "Replacing columns with unsupported type conversion (from {0} to {1}) for column {2}. SerDe may be incompatible", true), HIVE_GROUPING_SETS_AGGR_NOMAPAGGR_MULTIGBY(10315, "Grouping sets aggregations (with rollups or cubes) are not allowed when " + "HIVE_MULTI_GROUPBY_SINGLE_REDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"), INVALID_AST_TREE(10318, "Internal error : Invalid AST"), ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"), IO_ERROR(10320, "Error while performing IO operation "), ERROR_SERIALIZE_METADATA(10321, "Error while serializing the metadata"), INVALID_LOAD_TABLE_FILE_WORK(10322, "Invalid Load Table Work or Load File Work"), CLASSPATH_ERROR(10323, "Classpath error"), IMPORT_SEMANTIC_ERROR(10324, "Import Semantic Analyzer Error"), INVALID_FK_SYNTAX(10325, "Invalid Foreign Key syntax"), INVALID_CSTR_SYNTAX(10326, "Invalid Constraint syntax"), ACID_NOT_ENOUGH_HISTORY(10327, "Not enough history available for ({0},{1}). " + "Oldest available base: {2}", true), INVALID_COLUMN_NAME(10328, "Invalid column name"), UNSUPPORTED_SET_OPERATOR(10329, "Unsupported set operator"), LOCK_ACQUIRE_CANCELLED(10330, "Query was cancelled while acquiring locks on the underlying objects. "), NOT_RECOGNIZED_CONSTRAINT(10331, "Constraint not recognized"), INVALID_CONSTRAINT(10332, "Invalid constraint definition"), @Deprecated // kept for backwards reference REPLACE_VIEW_WITH_MATERIALIZED(10400, "Attempt to replace view {0} with materialized view", true), REPLACE_MATERIALIZED_WITH_VIEW(10401, "Attempt to replace materialized view {0} with view", true), UPDATE_DELETE_VIEW(10402, "You cannot update or delete records in a view"), MATERIALIZED_VIEW_DEF_EMPTY(10403, "Query for the materialized view rebuild could not be retrieved"), MERGE_PREDIACTE_REQUIRED(10404, "MERGE statement with both UPDATE and DELETE clauses " + "requires \"AND <boolean>\" on the 1st WHEN MATCHED clause of <{0}>", true), MERGE_TOO_MANY_DELETE(10405, "MERGE statement can have at most 1 WHEN MATCHED ... DELETE clause: <{0}>", true), MERGE_TOO_MANY_UPDATE(10406, "MERGE statement can have at most 1 WHEN MATCHED ... UPDATE clause: <{0}>", true), INVALID_JOIN_CONDITION(10407, "Error parsing condition in join"), INVALID_TARGET_COLUMN_IN_SET_CLAUSE(10408, "Target column \"{0}\" of set clause is not found in table \"{1}\".", true), HIVE_GROUPING_FUNCTION_EXPR_NOT_IN_GROUPBY(10409, "Expression in GROUPING function not present in GROUP BY"), ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED(10410, "Alter table with non-partitioned table does not support cascade"), HIVE_GROUPING_SETS_SIZE_LIMIT(10411, "Grouping sets size cannot be greater than 64"), REBUILD_NO_MATERIALIZED_VIEW(10412, "Rebuild command only valid for materialized views"), LOAD_DATA_ACID_FILE(10413, "\"{0}\" was created by Acid write - it cannot be loaded into anther Acid table", true), ACID_OP_ON_INSERTONLYTRAN_TABLE(10414, "Attempt to do update or delete on table {0} that is " + "insert-only transactional", true), LOAD_DATA_LAUNCH_JOB_PARSE_ERROR(10416, "Encountered parse error while parsing rewritten load data into insert query"), RESOURCE_PLAN_ALREADY_EXISTS(10417, "Resource plan {0} already exists", true), RESOURCE_PLAN_NOT_EXISTS(10418, "Resource plan {0} does not exist", true), INCOMPATIBLE_STRUCT(10419, "Incompatible structs.", true), OBJECTNAME_CONTAINS_DOT(10420, "Table or database name may not contain dot(.) character", true), WITHIN_GROUP_NOT_ALLOWED(10421, "Not an ordered-set aggregate function: {0}. WITHIN GROUP clause is not allowed.", true), WITHIN_GROUP_PARAMETER_MISMATCH(10422, "The number of hypothetical direct arguments ({0}) must match the number of ordering columns ({1})", true), AMBIGUOUS_STRUCT_ATTRIBUTE(10423, "Attribute \"{0}\" specified more than once in structured type.", true), OFFSET_NOT_SUPPORTED_IN_SUBQUERY(10424, "OFFSET is not supported in subquery of exists", true), WITH_COL_LIST_NUM_OVERFLOW(10425, "WITH-clause query {0} returns {1} columns, but {2} labels were specified. The number of column labels must be smaller or equal to the number of expressions returned by the query.", true), NULL_TREATMENT_NOT_SUPPORTED(10426, "Function {0} does not support null treatment.", true), DATACONNECTOR_ALREADY_EXISTS(10427, "Dataconnector {0} already exists", true), DATACONNECTOR_NOT_EXISTS(10428, "Dataconnector does not exist:"), TIME_TRAVEL_NOT_ALLOWED(10429, "Time travel is not allowed for {0}. Please choose a storage format which supports the feature.", true), INVALID_METADATA_TABLE_NAME(10430, "Invalid metadata table name {0}.", true), TABLE_META_REF_NOT_SUPPORTED(10431, "Table Meta Ref extension is not supported for table {0}.", true), COMPACTION_REFUSED(10432, "Compaction request for {0}.{1}{2} is refused, details: {3}.", true), COMPACTION_PARTITION_EVOLUTION(10438, "Compaction for {0}.{1} on partition level is not allowed on a table that has undergone partition evolution", true), COMPACTION_NON_IDENTITY_PARTITION_SPEC(10439, "Compaction for {0}.{1} is not supported on the table with non-identity partition spec", true), CBO_IS_REQUIRED(10433, "The following functionality requires CBO (" + HiveConf.ConfVars.HIVE_CBO_ENABLED.varname + "): {0}", true), CTLF_UNSUPPORTED_FORMAT(10434, "CREATE TABLE LIKE FILE is not supported by the ''{0}'' file format", true), NON_NATIVE_ACID_UPDATE(10435, "Update and Merge to a non-native ACID table in \"merge-on-read\" mode is only supported when \"" + HiveConf.ConfVars.SPLIT_UPDATE.varname + "\"=\"true\""), READ_ONLY_DATABASE(10436, "Database {0} is read-only", true), UNEXPECTED_PARTITION_TRANSFORM_SPEC(10437, "Partition transforms are only supported by Iceberg storage handler", true), //========================== 20000 range starts here ========================// SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. " + "It may have crashed with an error."), SCRIPT_GENERIC_ERROR(20002, "Hive encountered some unknown error while " + "running your custom script."), SCRIPT_CLOSING_ERROR(20003, "An error occurred when trying to close the Operator " + "running your custom script."), DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR(20004, "Fatal error occurred when node " + "tried to create too many dynamic partitions. The maximum number of dynamic partitions " + "is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode. "), /** * {1} is the transaction id; * use {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} to format */ OP_NOT_ALLOWED_IN_IMPLICIT_TXN(20006, "Operation {0} is not allowed in an implicit transaction ({1}).", true), /** * {1} is the transaction id; * use {@link org.apache.hadoop.hive.common.JavaUtils#txnIdToString(long)} to format */ OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction ({1},queryId={2}).", true), OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed without an active transaction", true), ACCESS_DENIED(20009, "Access denied: {0}", "42000", true), QUOTA_EXCEEDED(20010, "Quota exceeded: {0}", "64000", true), UNRESOLVED_PATH(20011, "Unresolved path: {0}", "64000", true), FILE_NOT_FOUND(20012, "File not found: {0}", "64000", true), WRONG_FILE_FORMAT(20013, "Wrong file format. Please check the file's format.", "64000", true), REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20016, "File is missing from both source and cm path."), REPL_EXTERNAL_SERVICE_CONNECTION_ERROR(20017, "Failed to connect to {0} service. Error code {1}.",true), CLIENT_POLLING_OPSTATUS_INTERRUPTED(20018, "Interrupted while polling on the operation status", "70100"), CTLF_FAILED_INFERENCE(20019, "Failed to infer schema:"), CTLF_CLASS_NOT_FOUND(20020, "Failed to find SerDe class ({0}) for ''{1}''", true), CTLF_MISSING_STORAGE_FORMAT_DESCRIPTOR(20021, "Failed to find StorageFormatDescriptor for file format ''{0}''", true), PARQUET_FOOTER_ERROR(20022, "Failed to read parquet footer:"), PARQUET_UNHANDLED_TYPE(20023, "Unhandled type {0}", true), ORC_FOOTER_ERROR(20024, "Failed to read orc footer:"), // An exception from runtime that will show the full stack to client UNRESOLVED_RT_EXCEPTION(29999, "Runtime Error: {0}", "58004", true), //========================== 30000 range starts here ========================// STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " + "There was a error to retrieve the StatsPublisher, and retrying " + "might help. If you don't want the query to fail because accurate statistics " + "could not be collected, set hive.stats.reliable=false"), STATSPUBLISHER_INITIALIZATION_ERROR(30001, "StatsPublisher cannot be initialized. " + "There was a error in the initialization of StatsPublisher, and retrying " + "might help. If you don't want the query to fail because accurate statistics " + "could not be collected, set hive.stats.reliable=false"), STATSPUBLISHER_CONNECTION_ERROR(30002, "StatsPublisher cannot be connected to." + "There was a error while connecting to the StatsPublisher, and retrying " + "might help. If you don't want the query to fail because accurate statistics " + "could not be collected, set hive.stats.reliable=false"), STATSPUBLISHER_PUBLISHING_ERROR(30003, "Error in publishing stats. There was an " + "error in publishing stats via StatsPublisher, and retrying " + "might help. If you don't want the query to fail because accurate statistics " + "could not be collected, set hive.stats.reliable=false"), STATSPUBLISHER_CLOSING_ERROR(30004, "StatsPublisher cannot be closed." + "There was a error while closing the StatsPublisher, and retrying " + "might help. If you don't want the query to fail because accurate statistics " + "could not be collected, set hive.stats.reliable=false"), COLUMNSTATSCOLLECTOR_INVALID_PART_KEY(30005, "Invalid partitioning key specified in ANALYZE " + "statement"), COLUMNSTATSCOLLECTOR_INVALID_PARTITION(30007, "Invalid partitioning key/value specified in " + "ANALYZE statement"), COLUMNSTATSCOLLECTOR_PARSE_ERROR(30009, "Encountered parse error while parsing rewritten query"), DROP_COMMAND_NOT_ALLOWED_FOR_PARTITION(30011, "Partition protected from being dropped"), COLUMNSTATSCOLLECTOR_INVALID_COLUMN(30012, "Column statistics are not supported " + "for partition columns"), STATSAGGREGATOR_SOURCETASK_NULL(30014, "SourceTask of StatsTask should not be null"), STATSAGGREGATOR_CONNECTION_ERROR(30015, "Stats aggregator of type {0} cannot be connected to", true), STATS_SKIPPING_BY_ERROR(30017, "Skipping stats aggregation by error {0}", true), INVALID_FILE_FORMAT_IN_LOAD(30019, "The file that you are trying to load does not match the" + " file format of the destination table."), SCHEMA_REQUIRED_TO_READ_ACID_TABLES(30020, "Neither the configuration variables " + "schema.evolution.columns / schema.evolution.columns.types " + "nor the " + "columns / columns.types " + "are set. Table schema information is required to read ACID tables"), ACID_TABLES_MUST_BE_READ_WITH_ACID_READER(30021, "An ORC ACID reader required to read ACID tables"), ACID_TABLES_MUST_BE_READ_WITH_HIVEINPUTFORMAT(30022, "Must use HiveInputFormat to read ACID tables " + "(set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat)"), ACID_LOAD_DATA_INVALID_FILE_NAME(30023, "{0} file name is not valid in Load Data into Acid " + "table {1}. Examples of valid names are: 00000_0, 00000_0_copy_1", true), CONCATENATE_UNSUPPORTED_FILE_FORMAT(30030, "Concatenate/Merge only supported for RCFile and ORCFile formats"), CONCATENATE_UNSUPPORTED_TABLE_BUCKETED(30031, "Concatenate/Merge can not be performed on bucketed tables"), CONCATENATE_UNSUPPORTED_PARTITION_ARCHIVED(30032, "Concatenate/Merge can not be performed on archived partitions"), CONCATENATE_UNSUPPORTED_TABLE_NON_NATIVE(30033, "Concatenate/Merge can not be performed on non-native tables"), CONCATENATE_UNSUPPORTED_TABLE_NOT_MANAGED(30034, "Concatenate/Merge can only be performed on managed tables"), CONCATENATE_UNSUPPORTED_TABLE_TRANSACTIONAL(30035, "Concatenate/Merge can not be performed on transactional tables"), REPL_FILE_SYSTEM_OPERATION_RETRY(30047, "Replication file system operation retry expired. Error {0}", true), REPL_SOURCE_DATABASE_NOT_FOUND(30048, "Cannot dump database {0} as it does not exist", true), //========================== 40000 range starts here ========================// REPL_DATABASE_IS_TARGET_OF_REPLICATION(40003, "Cannot dump database as it is a Target of replication."), REPL_INVALID_DB_OR_TABLE_PATTERN(40005, "Invalid pattern for the DB or table name in the replication policy. " + "It should be a valid regex enclosed within single or double quotes."), //if the error message is changed for REPL_EVENTS_MISSING_IN_METASTORE, then need modification in getNextNotification //method in HiveMetaStoreClient REPL_EVENTS_MISSING_IN_METASTORE(40006, "Notification events are missing in the meta store."), REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(40007, "Load path {0} not valid as target database is bootstrapped " + "from some other path : {1}.", true), REPL_INVALID_CONFIG_FOR_SERVICE(40008, "Invalid config error : {0} for {1} service.", true), REPL_INVALID_INTERNAL_CONFIG_FOR_SERVICE(40009, "Invalid internal config error : {0} for {1} service.", true), REPL_RETRY_EXHAUSTED(40010, "Retry exhausted for retryable error code {0}.", true), REPL_FAILED_WITH_NON_RECOVERABLE_ERROR(40011, "Replication failed with non recoverable error. Needs manual intervention"), REPL_INVALID_ARGUMENTS(40012, "Invalid arguments error : {0}.", true), REPL_INVALID_ALTER_TABLE(40013, "{0}Unable to alter table{1}", true), REPL_PERMISSION_DENIED(40014, "{0}org.apache.hadoop.security.AccessControlException{1}", true), REPL_DISTCP_SNAPSHOT_EXCEPTION(40015, "SNAPSHOT_ERROR", true), RANGER_AUTHORIZATION_FAILED(40016, "Authorization Failure while communicating to Ranger admin", true), RANGER_AUTHENTICATION_FAILED(40017, "Authentication Failure while communicating to Ranger admin", true), REPL_INCOMPATIBLE_EXCEPTION(40018, "Cannot load into database {0} as it is replication incompatible.", true), REPL_FAILOVER_TARGET_MODIFIED(40019,"Database event id changed post table diff generation.") ; private int errorCode; private String mesg; private String sqlState; private MessageFormat format; private static final char SPACE = ' '; private static final Pattern ERROR_MESSAGE_PATTERN = Pattern.compile(".*Line [0-9]+:[0-9]+ (.*)"); private static final Pattern ERROR_CODE_PATTERN = Pattern.compile("HiveException:\\s+\\[Error ([0-9]+)\\]: (.*)"); private static Map<String, ErrorMsg> mesgToErrorMsgMap = new HashMap<String, ErrorMsg>(); private static Map<Pattern, ErrorMsg> formatToErrorMsgMap = new HashMap<Pattern, ErrorMsg>(); private static int minMesgLength = -1; static { for (ErrorMsg errorMsg : values()) { if (errorMsg.format != null) { String pattern = errorMsg.mesg.replaceAll("\\{[0-9]+\\}", ".*"); formatToErrorMsgMap.put(Pattern.compile("^" + pattern + "$", Pattern.DOTALL), errorMsg); } else { mesgToErrorMsgMap.put(errorMsg.getMsg().trim(), errorMsg); int length = errorMsg.getMsg().trim().length(); if (minMesgLength == -1 || length < minMesgLength) { minMesgLength = length; } } } } /** * Given a remote runtime exception, returns the ErrorMsg object associated with it. * @param e An exception * @return ErrorMsg */ public static ErrorMsg getErrorMsg(Exception e) { if (e instanceof AccessControlException) { return ACCESS_DENIED; } if (e instanceof NSQuotaExceededException) { return QUOTA_EXCEEDED; } if (e instanceof DSQuotaExceededException) { return QUOTA_EXCEEDED; } if (e instanceof UnresolvedPathException) { return UNRESOLVED_PATH; } if (e instanceof FileNotFoundException) { return FILE_NOT_FOUND; } return UNRESOLVED_RT_EXCEPTION; } /** * Given an error message string, returns the ErrorMsg object associated with it. * @param mesg An error message string * @return ErrorMsg */ public static ErrorMsg getErrorMsg(String mesg) { if (mesg == null) { return GENERIC_ERROR; } // first see if there is a direct match ErrorMsg errorMsg = mesgToErrorMsgMap.get(mesg); if (errorMsg != null) { return errorMsg; } for (Map.Entry<Pattern, ErrorMsg> entry : formatToErrorMsgMap.entrySet()) { if (entry.getKey().matcher(mesg).matches()) { return entry.getValue(); } } // if not see if the mesg follows type of format, which is typically the // case: // line 1:14 Table not found table_name String truncatedMesg = mesg.trim(); Matcher match = ERROR_MESSAGE_PATTERN.matcher(mesg); if (match.matches()) { truncatedMesg = match.group(1); } // appends might exist after the root message, so strip tokens off until we // match while (truncatedMesg.length() > minMesgLength) { errorMsg = mesgToErrorMsgMap.get(truncatedMesg.trim()); if (errorMsg != null) { return errorMsg; } int lastSpace = truncatedMesg.lastIndexOf(SPACE); if (lastSpace == -1) { break; } // hack off the last word and try again truncatedMesg = truncatedMesg.substring(0, lastSpace).trim(); } return GENERIC_ERROR; } /** * Given an error code, returns the ErrorMsg object associated with it. * @param errorCode An error code * @return ErrorMsg */ public static ErrorMsg getErrorMsg(int errorCode) { for (ErrorMsg errorMsg : values()) { if (errorMsg.getErrorCode() == errorCode) { return errorMsg; } } return null; } /** * For a given error message string, searches for a <code>ErrorMsg</code> enum * that appears to be a match. If a match is found, returns the * <code>SQLState</code> associated with the <code>ErrorMsg</code>. If a match * is not found or <code>ErrorMsg</code> has no <code>SQLState</code>, returns * the <code>SQLState</code> bound to the <code>GENERIC_ERROR</code> * <code>ErrorMsg</code>. * * @param mesg * An error message string * @return SQLState */ public static String findSQLState(String mesg) { ErrorMsg error = getErrorMsg(mesg); return error.getSQLState(); } private ErrorMsg(int errorCode, String mesg) { this(errorCode, mesg, "42000", false); } private ErrorMsg(int errorCode, String mesg, boolean format) { // 42000 is the generic SQLState for syntax error. this(errorCode, mesg, "42000", format); } private ErrorMsg(int errorCode, String mesg, String sqlState) { this(errorCode, mesg, sqlState, false); } private ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) { this.errorCode = errorCode; this.mesg = mesg; this.sqlState = sqlState; this.format = format ? new MessageFormat(mesg) : null; } public String getMsg(String reason) { return mesg + " " + reason; } public String format(String reason) { return format(new String[]{reason}); } /** * If the message is parametrized, this will fill the parameters with supplied * {@code reasons}, otherwise {@code reasons} are appended at the end of the * message. */ public String format(String... reasons) { /* Not all messages are parametrized even those that should have been, e.g {@link #INVALID_TABLE}. INVALID_TABLE is usually used with {@link #getMsg(String)}. This method can also be used with INVALID_TABLE and the like and will match getMsg(String) behavior. Another example: {@link #INVALID_PARTITION}. Ideally you want the message to have 2 parameters one for partition name one for table name. Since this is already defined w/o any parameters, one can still call {@code INVALID_PARTITION.format("<partName> <table Name>"}. This way the message text will be slightly different but at least the errorCode will match. Note this, should not be abused by adding anything other than what should have been parameter names to keep msg text standardized. */ if(reasons == null || reasons.length == 0) { return getMsg(); } if(format != null) { return format.format(reasons); } if(reasons.length > 1) { StringBuilder sb = new StringBuilder(); for(String re : reasons) { if(re != null) { if(sb.length() > 0) { sb.append(" "); } sb.append(re); } } return getMsg(sb.toString()); } return getMsg(reasons[0]); } public String getErrorCodedMsg() { return "[Error " + errorCode + "]: " + mesg; } public String getErrorCodedMsg(String... reasons) { return "[Error " + errorCode + "]: " + format(reasons); } public static Pattern getErrorCodePattern() { return ERROR_CODE_PATTERN; } public String getMsg() { return mesg; } public String getSQLState() { return sqlState; } public int getErrorCode() { return errorCode; } }
apache/hive
common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
214,449
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2024 DBeaver Corp and others * Copyright (C) 2019 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.model.exec.plan; import com.google.gson.JsonObject; public interface DBCQueryPlannerSerialInfo { String version(); void addNodeProperties(DBCPlanNode node,JsonObject nodeJson); }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/model/exec/plan/DBCQueryPlannerSerialInfo.java
214,450
/****************************************************************************** * Top contributors (to current version): * Mudathir Mohamed, Andrew Reynolds, Andres Noetzli * * This file is part of the cvc5 project. * * Copyright (c) 2009-2022 by the authors listed in the file AUTHORS * in the top-level source directory and their institutional affiliations. * All rights reserved. See the file COPYING in the top-level source * directory for licensing information. * **************************************************************************** * * A simple demonstration of the solving capabilities of the cvc5 * bit-vector solver. * */ import io.github.cvc5.*; public class Extract { public static void main(String args[]) throws CVC5ApiException { try (Solver slv = new Solver()) { slv.setLogic("QF_BV"); // Set the logic Sort bitvector32 = slv.mkBitVectorSort(32); Term x = slv.mkConst(bitvector32, "a"); Op ext_31_1 = slv.mkOp(Kind.BITVECTOR_EXTRACT, 31, 1); Term x_31_1 = slv.mkTerm(ext_31_1, x); Op ext_30_0 = slv.mkOp(Kind.BITVECTOR_EXTRACT, 30, 0); Term x_30_0 = slv.mkTerm(ext_30_0, x); Op ext_31_31 = slv.mkOp(Kind.BITVECTOR_EXTRACT, 31, 31); Term x_31_31 = slv.mkTerm(ext_31_31, x); Op ext_0_0 = slv.mkOp(Kind.BITVECTOR_EXTRACT, 0, 0); Term x_0_0 = slv.mkTerm(ext_0_0, x); Term eq = slv.mkTerm(Kind.EQUAL, x_31_1, x_30_0); System.out.println(" Asserting: " + eq); slv.assertFormula(eq); Term eq2 = slv.mkTerm(Kind.EQUAL, x_31_31, x_0_0); System.out.println(" Check entailment assuming: " + eq2); System.out.println(" Expect UNSAT. "); System.out.println(" cvc5: " + slv.checkSatAssuming(eq2.notTerm())); } } }
HanielB/cvc5
examples/api/java/Extract.java
214,451
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** A collection of labels, either for a multi-label problem (all labels are part of the same label dictionary), or a factorized labeling, (each label is part of a different dictionary). @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import com.google.errorprone.annotations.Var; /** Usually some distribution over possible labels for an instance. */ public class Labels implements AlphabetCarrying, Serializable { Label[] labels; public Labels (Label[] labels) { this.labels = new Label[labels.length]; System.arraycopy (labels, 0, this.labels, 0, labels.length); } // Number of factors public int size () { return labels.length; } public Label get (int i) { return labels[i]; } public void set (int i, Label l) { labels[i] = l; } public String toString () { @Var String ret = ""; for (int i = 0; i < labels.length; i++) { ret += labels[i].toString(); if (i < labels.length - 1) ret += " "; } return ret; } public Alphabet getAlphabet () { return labels[0].getAlphabet(); } public Alphabet[] getAlphabets () { return labels[0].getAlphabets(); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); in.defaultReadObject (); } }
MNCC/Mallet
src/cc/mallet/types/Labels.java
214,452
/* * Missing License Header, Copyright 2016 (C) Andrew Maitland <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ package pcgen.cdom.base; /** * This interface is a tag used to identify items that are not "Granted", thus * should not be able to grant other items */ public interface Ungranted { //Intentionally Empty Interface }
romen/pcgen
code/src/java/pcgen/cdom/base/Ungranted.java
214,453
// Tregex/Tsurgeon, FilePanel - a GUI for tree search and modification // Copyright (c) 2007-2008 The Board of Trustees of // The Leland Stanford Junior University. All Rights Reserved. // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // // This code is a GUI interface to Tregex and Tsurgeon (which were // written by Roger Levy and Galen Andrew). // // For more information, bug reports, fixes, contact: // Christopher Manning // Dept of Computer Science, Gates 1A // Stanford CA 94305-9010 // USA // Support/Questions: [email protected] // Licensing: [email protected] // http://www-nlp.stanford.edu/software/tregex.shtml package edu.stanford.nlp.trees.tregex.gui; import java.awt.BorderLayout; import java.awt.Component; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.io.File; import java.util.*; import javax.swing.*; import javax.swing.tree.TreeCellRenderer; import javax.swing.tree.TreePath; import edu.stanford.nlp.trees.TreeReaderFactory; /** * Class representing the hierarchy of files in which trees may be searched and * allowing users to select whether to search a particular file or not * * @author Anna Rafferty */ public class FilePanel extends JPanel { private static final long serialVersionUID = -2229250395240163264L; private static FilePanel filePanel = null; private JTree tree; private FileTreeModel treeModel; public static synchronized FilePanel getInstance() { if (filePanel == null) { filePanel = new FilePanel(); } return filePanel; } private FilePanel() { //data stuff FileTreeNode root = new FileTreeNode(); treeModel = new FileTreeModel(root); tree = new JTree(treeModel); tree.setCellRenderer(new FileTreeCellRenderer()); tree.setRootVisible(false); tree.setShowsRootHandles(true); tree.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { TreePath path = tree.getPathForLocation(e.getX(), e.getY()); int nActiveTreebanks = getActiveTreebanks().size(); //Tdiff boolean canActivate = (! TregexGUI.getInstance().isTdiffEnabled() || nActiveTreebanks < TregexGUI.MAX_TDIFF_TREEBANKS); if(path != null) { FileTreeNode node = (FileTreeNode) path.getLastPathComponent(); if(canActivate || node.isActive()) node.setActive(!node.isActive()); } } }); //layout/panel stuff this.setLayout(new BorderLayout()); this.setBorder(BorderFactory.createTitledBorder(BorderFactory.createEmptyBorder(),"Tree files: ")); JScrollPane scroller = new JScrollPane(tree); this.add(scroller, BorderLayout.CENTER); } /** * Sets a new tree reader factory for reading trees from files in this panel. Since this may make some files * with trees unable to be read, clearFiles indicates if all current files should be removed from the panel. */ public void setTreeReaderFactory(TreeReaderFactory trf) { treeModel.setTRF(trf); } public void loadFiles(EnumMap<TregexGUI.FilterType, String> filters, File[] files) { treeModel.addFileFolder(filters, files); } /** *Returns true if no files are loaded; false otherwise */ public boolean isEmpty() { return treeModel.isEmpty(); } /** * Removes all files from the panel */ public void clearAll() { TreeReaderFactory oldTrf = treeModel.getTRF();//Preserve the current TRF when we refresh the tree file list FileTreeNode root = new FileTreeNode(); treeModel = new FileTreeModel(root); setTreeReaderFactory(oldTrf); tree.setModel(treeModel); this.revalidate(); this.repaint(); } /** * Returns all treebanks corresponding to the files stored in the panel that * are selected * @return active treebanks */ public List<FileTreeNode> getActiveTreebanks() { List<FileTreeNode> active = new ArrayList<>(); setActiveTreebanksFromParent(active, treeModel.getRoot()); return active; } private void setActiveTreebanksFromParent(List<FileTreeNode> active, FileTreeNode parent) { int numChildren = treeModel.getChildCount(parent); for(int i = 0; i < numChildren; i++) { FileTreeNode child = treeModel.getChild(parent, i); if(!child.getAllowsChildren()) { if(child.isActive()) active.add(child); } else { setActiveTreebanksFromParent(active,child); } } } @SuppressWarnings("serial") private static class FileTreeCellRenderer extends JCheckBox implements TreeCellRenderer { public FileTreeCellRenderer() { setOpaque(true); } public Component getTreeCellRendererComponent(JTree t, Object value, boolean selected, boolean expanded, boolean leaf, int row, boolean hasFocus) { return ((FileTreeNode) value).getDisplay(); } } }
stanfordnlp/CoreNLP
src/edu/stanford/nlp/trees/tregex/gui/FilePanel.java
214,454
/** * StatsdClient.java * * (C) 2011 Meetup, Inc. * Author: Andrew Gwozdziewycz <[email protected]>, @apgwoz * * * * Example usage: * * StatsdClient client = new StatsdClient("statsd.example.com", 8125); * // increment by 1 * client.increment("foo.bar.baz"); * // increment by 10 * client.increment("foo.bar.baz", 10); * // sample rate * client.increment("foo.bar.baz", 10, .1); * // increment multiple keys by 1 * client.increment("foo.bar.baz", "foo.bar.boo", "foo.baz.bar"); * // increment multiple keys by 10 -- yeah, it's "backwards" * client.increment(10, "foo.bar.baz", "foo.bar.boo", "foo.baz.bar"); * // multiple keys with a sample rate * client.increment(10, .1, "foo.bar.baz", "foo.bar.boo", "foo.baz.bar"); * * // To enable multi metrics (aka more than 1 metric in a UDP packet) (disabled by default) * client.enableMultiMetrics(true); //disable by passing in false * // To fine-tune udp packet buffer size (default=1500) * client.setBufferSize((short) 1500); * // To force flush the buffer out (good idea to add to your shutdown path) * client.flush(); * * * Note: For best results, and greater availability, you'll probably want to * create a wrapper class which creates a static client and proxies to it. * * You know... the "Java way." */ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; import java.util.Locale; import java.util.Random; import java.util.Timer; import java.util.TimerTask; import org.apache.log4j.Logger; public class StatsdClient extends TimerTask { private ByteBuffer sendBuffer; private Timer flushTimer; private boolean multi_metrics = false; private static final Random RNG = new Random(); private static final Logger log = Logger.getLogger(StatsdClient.class.getName()); private final InetSocketAddress _address; private final DatagramChannel _channel; public StatsdClient(String host, int port) throws UnknownHostException, IOException { this(InetAddress.getByName(host), port); } public StatsdClient(InetAddress host, int port) throws IOException { _address = new InetSocketAddress(host, port); _channel = DatagramChannel.open(); /* Put this in non-blocking mode so send does not block forever. */ _channel.configureBlocking(false); /* Increase the size of the output buffer so that the size is larger than our buffer size. */ _channel.setOption(StandardSocketOptions.SO_SNDBUF, 4096); setBufferSize((short) 1500); } protected void finalize() { flush(); } public synchronized void setBufferSize(short packetBufferSize) { if(sendBuffer != null) { flush(); } sendBuffer = ByteBuffer.allocate(packetBufferSize); } public synchronized void enableMultiMetrics(boolean enable) { multi_metrics = enable; } public synchronized boolean startFlushTimer(long period) { if(flushTimer == null) { // period is in msecs if(period <= 0) { period = 2000; } flushTimer = new Timer(); // We pass this object in as the TimerTask (which calls run()) flushTimer.schedule((TimerTask)this, period, period); return true; } return false; } public synchronized void stopFlushTimer() { if(flushTimer != null) { flushTimer.cancel(); flushTimer = null; } } public void run() { // used by Timer, we're a Runnable TimerTask flush(); } public boolean timing(String key, int value) { return timing(key, value, 1.0); } public boolean timing(String key, int value, double sampleRate) { return send(sampleRate, String.format(Locale.ENGLISH, "%s:%d|ms", key, value)); } public boolean decrement(String key) { return increment(key, -1, 1.0); } public boolean decrement(String key, int magnitude) { return decrement(key, magnitude, 1.0); } public boolean decrement(String key, int magnitude, double sampleRate) { magnitude = magnitude < 0 ? magnitude : -magnitude; return increment(key, magnitude, sampleRate); } public boolean decrement(String... keys) { return increment(-1, 1.0, keys); } public boolean decrement(int magnitude, String... keys) { magnitude = magnitude < 0 ? magnitude : -magnitude; return increment(magnitude, 1.0, keys); } public boolean decrement(int magnitude, double sampleRate, String... keys) { magnitude = magnitude < 0 ? magnitude : -magnitude; return increment(magnitude, sampleRate, keys); } public boolean increment(String key) { return increment(key, 1, 1.0); } public boolean increment(String key, int magnitude) { return increment(key, magnitude, 1.0); } public boolean increment(String key, int magnitude, double sampleRate) { String stat = String.format(Locale.ENGLISH, "%s:%s|c", key, magnitude); return send(sampleRate, stat); } public boolean increment(int magnitude, double sampleRate, String... keys) { String[] stats = new String[keys.length]; for (int i = 0; i < keys.length; i++) { stats[i] = String.format(Locale.ENGLISH, "%s:%s|c", keys[i], magnitude); } return send(sampleRate, stats); } public boolean gauge(String key, double magnitude){ return gauge(key, magnitude, 1.0); } public boolean gauge(String key, double magnitude, double sampleRate){ final String stat = String.format(Locale.ENGLISH, "%s:%s|g", key, magnitude); return send(sampleRate, stat); } private boolean send(double sampleRate, String... stats) { boolean retval = false; // didn't send anything if (sampleRate < 1.0) { for (String stat : stats) { if (RNG.nextDouble() <= sampleRate) { stat = String.format(Locale.ENGLISH, "%s|@%f", stat, sampleRate); if (doSend(stat)) { retval = true; } } } } else { for (String stat : stats) { if (doSend(stat)) { retval = true; } } } return retval; } private synchronized boolean doSend(String stat) { try { final byte[] data = stat.getBytes("utf-8"); // If we're going to go past the threshold of the buffer then flush. // the +1 is for the potential '\n' in multi_metrics below if(sendBuffer.remaining() < (data.length + 1)) { flush(); } if(sendBuffer.position() > 0) { // multiple metrics are separated by '\n' sendBuffer.put( (byte) '\n'); } sendBuffer.put(data); // append the data if(! multi_metrics) { flush(); } return true; } catch (IOException e) { log.error( String.format("Could not send stat %s to host %s:%d", sendBuffer.toString(), _address.getHostName(), _address.getPort()), e); return false; } } public synchronized boolean flush() { try { final int sizeOfBuffer = sendBuffer.position(); if(sizeOfBuffer <= 0) { return false; } // empty buffer // send and reset the buffer sendBuffer.flip(); final int nbSentBytes = _channel.send(sendBuffer, _address); sendBuffer.limit(sendBuffer.capacity()); sendBuffer.rewind(); if (sizeOfBuffer == nbSentBytes) { return true; } else { log.error(String.format( "Could not send entirely stat %s to host %s:%d. Only sent %d bytes out of %d bytes", sendBuffer.toString(), _address.getHostName(), _address.getPort(), nbSentBytes, sizeOfBuffer)); return false; } } catch (IOException e) { /* This would be a good place to close the channel down and recreate it. */ log.error( String.format("Could not send stat %s to host %s:%d", sendBuffer.toString(), _address.getHostName(), _address.getPort()), e); return false; } } }
kadaan/statsd
examples/StatsdClient.java
214,455
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.ui.locks.graph; import org.eclipse.gef.EditPart; import org.eclipse.gef.EditPartFactory; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraph; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraphEdge; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraphNode; public class LockGraphEditPartFactory implements EditPartFactory { public EditPart createEditPart(EditPart context, Object model) { EditPart editPart = null; if (model instanceof LockGraph) { editPart = new LockGraphEditPart(); } else if (model instanceof LockGraphEdge) { editPart = new LockGraphEdgeEditPart(); } else if (model instanceof LockGraphNode) { editPart = new LockGraphNodeEditPart(); } if (editPart != null) { editPart.setModel(model); } return editPart; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.ui.locks/src/org/jkiss/dbeaver/ext/ui/locks/graph/LockGraphEditPartFactory.java
214,456
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2024 DBeaver Corp and others * Copyright (C) 2017-2018 Andrew Khitrin ([email protected]) * Copyright (C) 2017-2018 Alexander Fedorov ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.debug; import org.eclipse.core.runtime.ListenerList; import org.jkiss.dbeaver.DBException; import org.jkiss.dbeaver.Log; import org.jkiss.dbeaver.debug.jdbc.DBGJDBCSession; import org.jkiss.dbeaver.model.DBPDataSourceContainer; import org.jkiss.dbeaver.model.messages.ModelMessages; import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor; import java.util.HashMap; import java.util.Map; public abstract class DBGBaseController implements DBGController { private static final Log log = Log.getLog(DBGBaseController.class); private final DBPDataSourceContainer dataSourceContainer; private final Map<String, Object> configuration; private ListenerList<DBGEventHandler> eventHandlers = new ListenerList<>(); protected DBGBaseController(DBPDataSourceContainer dataSourceContainer, Map<String, Object> configuration) { this.dataSourceContainer = dataSourceContainer; this.configuration = new HashMap<>(configuration); } @Override public DBPDataSourceContainer getDataSourceContainer() { return dataSourceContainer; } @Override public Map<String, Object> getDebugConfiguration() { return new HashMap<>(configuration); } @Override public DBGSession openSession(DBRProgressMonitor monitor) throws DBGException { if (!dataSourceContainer.isConnected()) { try { dataSourceContainer.connect(monitor, true, true); } catch (DBException e) { throw new DBGException(e, dataSourceContainer.getDataSource()); } } if (!dataSourceContainer.isConnected()) { throw new DBGException(ModelMessages.error_not_connected_to_database); } return createSession(monitor, configuration); } @Override public void dispose() { Object[] listeners = eventHandlers.getListeners(); for (Object listener : listeners) { unregisterEventHandler((DBGEventHandler) listener); } } public abstract DBGJDBCSession createSession(DBRProgressMonitor monitor, Map<String, Object> configuration) throws DBGException; @Override public void registerEventHandler(DBGEventHandler eventHandler) { eventHandlers.add(eventHandler); } @Override public void unregisterEventHandler(DBGEventHandler eventHandler) { eventHandlers.remove(eventHandler); } public void fireEvent(DBGEvent event) { for (DBGEventHandler eventHandler : eventHandlers) { eventHandler.handleDebugEvent(event); } } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.debug.core/src/org/jkiss/dbeaver/debug/DBGBaseController.java
214,457
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2024 DBeaver Corp and others * 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.postgresql.model; import org.jkiss.code.NotNull; import org.jkiss.code.Nullable; import org.jkiss.dbeaver.DBException; import org.jkiss.dbeaver.model.DBPScriptObject; import org.jkiss.dbeaver.model.impl.jdbc.JDBCUtils; import org.jkiss.dbeaver.model.meta.Property; import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor; import org.jkiss.dbeaver.model.struct.DBStructUtils; import org.jkiss.dbeaver.model.struct.rdb.DBSTable; import org.jkiss.dbeaver.model.struct.rdb.DBSTablePartition; import java.sql.ResultSet; import java.util.Map; public class PostgreTablePartition extends PostgreTable implements DBSTablePartition { public static final String CAT_PARTITIONING = "Partitioning"; private String partitionExpression; private PostgreTable partitionOf; public PostgreTablePartition(PostgreTable container) { super(container); this.partitionExpression = "FOR VALUES "; this.setPartition(true); this.setName("newpartition"); this.partitionOf = container; } public PostgreTablePartition(PostgreTableContainer container, ResultSet dbResult) { super(container, dbResult); this.setPartition(true); this.partitionExpression = JDBCUtils.safeGetString(dbResult, "partition_expr"); } @NotNull @Override public DBSTable getParentTable() { return partitionOf; } @Override public boolean needFullPath() { // Postgres tables can be queried directly without a parent table. return false; } @Property(viewable = true, editable = true, updatable = true, order = 60) @Nullable public String getPartitionExpression() { return partitionExpression; } public void setPartitionExpression(String expr) { this.partitionExpression = expr; } @Override public String getObjectDefinitionText(DBRProgressMonitor monitor, Map<String, Object> options) throws DBException { options.put(DBPScriptObject.OPTION_DDL_SKIP_FOREIGN_KEYS, true); options.put(OPTION_DDL_SEPARATE_FOREIGN_KEYS_STATEMENTS, false); options.put(OPTION_INCLUDE_NESTED_OBJECTS, false); options.put(OPTION_INCLUDE_PERMISSIONS, false); return DBStructUtils.generateTableDDL(monitor, this, options, false); } public PostgreTable getPartitionOf() { return partitionOf; } @Override public boolean isSubPartition() { return partitionOf instanceof PostgreTablePartition; } @Nullable @Override public DBSTablePartition getPartitionParent() { if (partitionOf instanceof PostgreTablePartition) { return (PostgreTablePartition) partitionOf; } return null; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.postgresql/src/org/jkiss/dbeaver/ext/postgresql/model/PostgreTablePartition.java
214,458
/* * MCMC.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.inference.smc; import dr.inference.loggers.LogColumn; import dr.inference.loggers.Loggable; import dr.inference.loggers.Logger; import dr.inference.markovchain.MarkovChain; import dr.inference.markovchain.MarkovChainListener; import dr.inference.mcmc.MCMCCriterion; import dr.inference.mcmc.MCMCOptions; import dr.inference.model.Likelihood; import dr.inference.model.Model; import dr.inference.operators.*; import dr.inference.state.Factory; import dr.inference.state.StateLoader; import dr.inference.state.StateLoaderSaver; import dr.inference.state.StateSaver; import dr.util.Identifiable; import dr.util.NumberFormatter; import dr.xml.Spawnable; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.List; /** * A class that runs short MCMC chains for each of a set of particles as * part of a sequential Monte Carlo (SMC) sampler. * * @author Andrew Rambaut * @version $Id:$ */ public class SMC implements Identifiable, Spawnable, Loggable { public SMC(String id, List<StateLoaderSaver> particleStates) { this.id = id; this.particleStates.addAll(particleStates); } /** * Must be called before calling chain. * * @param options the options for this SMC analysis * @param schedule operator schedule to be used in chain. * @param likelihood the likelihood for this SMC * @param loggers an array of loggers to record output of this SMC run */ public void init( SMCOptions options, Likelihood likelihood, OperatorSchedule schedule, Logger[] loggers) { MCMCCriterion criterion = new MCMCCriterion(); // full evaluation tests and operator adaptation are off as these are multiple short runs. // Operator tuning will have already been done. mc = new MarkovChain(likelihood, schedule, criterion, 0, 0, 0, false, false); this.options = options; this.loggers = loggers; this.schedule = schedule; //initialize transients currentState = 0; // States are saved at the end of each particle's run // if (Factory.INSTANCE != null) { // for (MarkovChainListener listener : Factory.INSTANCE.getStateSaverChainListeners()) { // mc.addMarkovChainListener(listener); // } // } } public MarkovChain getMarkovChain() { return mc; } public Logger[] getLoggers() { return loggers; } public OperatorSchedule getOperatorSchedule() { return schedule; } public void run() { chain(); } /** * This method actually initiates the MCMC analysis. */ public void chain() { currentState = 0; timer.start(); if (loggers != null) { for (Logger logger : loggers) { logger.startLogging(); } } mc.addMarkovChainListener(chainListener); for (StateLoaderSaver particleState : particleStates) { // Don't need the savedLnL - it won't be there particleState.loadState(mc, new double[1]); // reset the current chain length to 0 mc.setCurrentLength(0); mc.runChain(options.getChainLength(), true); // Save state to file... particleState.saveState(mc, mc.getCurrentLength(), mc.getCurrentScore()); } mc.terminateChain(); mc.removeMarkovChainListener(chainListener); timer.stop(); } @Override public LogColumn[] getColumns() { return new LogColumn[] { new LogColumn() { @Override public void setLabel(String label) { } @Override public String getLabel() { return "time"; } @Override public void setMinimumWidth(int minimumWidth) { } @Override public int getMinimumWidth() { return 0; } @Override public String getFormatted() { return Double.toString(getTimer().toSeconds()); } } }; } private final MarkovChainListener chainListener = new MarkovChainListener() { // MarkovChainListener interface ******************************************* // for receiving messages from subordinate MarkovChain /** * Called to update the current model keepEvery states. */ @Override public void currentState(long state, MarkovChain markovChain, Model currentModel) { currentState = state; if (loggers != null) { for (Logger logger : loggers) { logger.log(state); } } } /** * Called when a new new best posterior state is found. */ @Override public void bestState(long state, MarkovChain markovChain, Model bestModel) { } /** * cleans up when the chain finishes (possibly early). */ @Override public void finished(long chainLength, MarkovChain markovChain) { currentState = chainLength; if (loggers != null) { for (Logger logger : loggers) { logger.log(currentState); logger.stopLogging(); } } } }; /** * @return the likelihood function. */ public Likelihood getLikelihood() { return mc.getLikelihood(); } /** * @return the timer. */ public dr.util.Timer getTimer() { return timer; } // TRANSIENT PUBLIC METHODS ***************************************** /** * @return the current state of the MCMC analysis. */ public final long getCurrentState() { return currentState; } /** * @return the progress (0 to 1) of the MCMC analysis. */ public final double getProgress() { return (double) currentState / (double) options.getChainLength(); } public boolean getSpawnable() { return true; } private boolean spawnable = true; //PRIVATE METHODS ***************************************** public String getId() { return id; } public void setId(String id) { this.id = id; } // PRIVATE TRANSIENTS private final dr.util.Timer timer = new dr.util.Timer(); private long currentState = 0; private final NumberFormatter formatter = new NumberFormatter(8); /** * this markov chain does most of the work. */ private MarkovChain mc; /** * the options of this MCMC analysis */ private SMCOptions options; private final List<StateLoaderSaver> particleStates = new ArrayList<StateLoaderSaver>(); private Logger[] loggers; private OperatorSchedule schedule; private String id = null; }
gkarthik/beast-mcmc
src/dr/inference/smc/SMC.java
214,459
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.ui.locks.graph; import org.eclipse.gef.EditPart; import org.eclipse.gef.commands.Command; import org.eclipse.gef.editpolicies.XYLayoutEditPolicy; import org.eclipse.gef.requests.CreateRequest; public class LockGraphXYLayoutEditPolicy extends XYLayoutEditPolicy { @Override protected Command createChangeConstraintCommand(EditPart child, Object constraint) { return null; } @Override protected Command getCreateCommand(CreateRequest request) { return null; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.ui.locks/src/org/jkiss/dbeaver/ext/ui/locks/graph/LockGraphXYLayoutEditPolicy.java
214,460
/****************************************************************************** * Top contributors (to current version): * Mudathir Mohamed, Andrew Reynolds, Andres Noetzli * * This file is part of the cvc5 project. * * Copyright (c) 2009-2022 by the authors listed in the file AUTHORS * in the top-level source directory and their institutional affiliations. * All rights reserved. See the file COPYING in the top-level source * directory for licensing information. * **************************************************************************** * * A simple demonstration of the Sygus API. * * A simple demonstration of how to use the Sygus API to synthesize a simple * invariant. This is a direct translation of sygus-inv.cpp. */ import static io.github.cvc5.Kind.*; import io.github.cvc5.*; public class SygusInv { public static void main(String args[]) throws CVC5ApiException { try (Solver slv = new Solver()) { // required options slv.setOption("sygus", "true"); slv.setOption("incremental", "false"); // set the logic slv.setLogic("LIA"); Sort integer = slv.getIntegerSort(); Sort bool = slv.getBooleanSort(); Term zero = slv.mkInteger(0); Term one = slv.mkInteger(1); Term ten = slv.mkInteger(10); // declare input variables for functions Term x = slv.mkVar(integer, "x"); Term xp = slv.mkVar(integer, "xp"); // (ite (< x 10) (= xp (+ x 1)) (= xp x)) Term ite = slv.mkTerm(ITE, slv.mkTerm(LT, x, ten), slv.mkTerm(EQUAL, xp, slv.mkTerm(ADD, x, one)), slv.mkTerm(EQUAL, xp, x)); // define the pre-conditions, transition relations, and post-conditions Term pre_f = slv.defineFun("pre-f", new Term[] {x}, bool, slv.mkTerm(EQUAL, x, zero)); Term trans_f = slv.defineFun("trans-f", new Term[] {x, xp}, bool, ite); Term post_f = slv.defineFun("post-f", new Term[] {x}, bool, slv.mkTerm(LEQ, x, ten)); // declare the invariant-to-synthesize Term inv_f = slv.synthInv("inv-f", new Term[] {x}); slv.addSygusInvConstraint(inv_f, pre_f, trans_f, post_f); // print solutions if available if (slv.checkSynth().hasSolution()) { // Output should be equivalent to: // ( // (define-fun inv-f ((x Int)) Bool (not (>= x 11))) // ) Term[] terms = new Term[] {inv_f}; Utils.printSynthSolutions(terms, slv.getSynthSolutions(terms)); } } } }
HanielB/cvc5
examples/api/java/SygusInv.java
214,461
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.ui.locks.graph; import org.eclipse.gef.commands.Command; import org.eclipse.gef.editpolicies.ConnectionEditPolicy; import org.eclipse.gef.requests.GroupRequest; public class LockGraphConnectionEditPolicy extends ConnectionEditPolicy { @Override protected Command getDeleteCommand(GroupRequest request) { return null; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.ui.locks/src/org/jkiss/dbeaver/ext/ui/locks/graph/LockGraphConnectionEditPolicy.java
214,462
/** * "[Andrew, 1979] discovered an alternative to the Graham scan that uses a linear lexicographic * sort of the point set by the x and y-coordinates. This is an advantage if this ordering is * already known for a set, which is sometimes the case. But even if sorting is required, this is a * faster sort than the angular Graham-scan sort with its more complicated comparison function. The * "Monotone Chain" algorithm computes the upper and lower hulls of a monotone chain of points, * which is why we refer to it as the "Monotone Chain" algorithm. Like the Graham scan, it runs in * O(nlog-n) time due to the sort time. After that, it only takes O(n) time to compute the hull." - * Dan Sunday * * <p>This code is a modification of the monotone chains algorithm found on wikibooks. * https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain * * <p>Time Complexity: O(nlogn) * * @author A. M. Andrew, Dan Sunday, William Fiset */ package com.williamfiset.algorithms.geometry; import static java.lang.Math.abs; import java.awt.geom.*; import java.util.*; public class ConvexHullMonotoneChainsAlgorithm { // Small epsilon used for double value comparison. private static final double EPS = 1e-5; // Sorts points by first x coordinate and then y coordinate. private static class PointComparator implements Comparator<Point2D> { public int compare(Point2D p1, Point2D p2) { if (abs(p1.getX() - p2.getX()) < EPS) { if (abs(p1.getY() - p2.getY()) < EPS) return 0; else if (p1.getY() > p2.getY()) return 1; } else if (p1.getX() > p2.getX()) return 1; return -1; } } // Use the monotone chains algorithm to find the // convex hull of a set of points in O(nlogn) time. public static Point2D[] convexHull(Point2D[] pts) { int n = pts.length, k = 0; if (n <= 1) return pts; Point2D[] hull = new Point2D[2 * n]; Arrays.sort(pts, new PointComparator()); // Build upper chain. for (int i = 0; i < n; i++) { while (k >= 2 && orientation(hull[k - 2], hull[k - 1], pts[i]) <= 0) k--; hull[k++] = pts[i]; } int lastUpperChainIndex = k; // Build lower chain. for (int i = n - 2; i >= 0; i--) { while (k > lastUpperChainIndex && orientation(hull[k - 2], hull[k - 1], pts[i]) <= 0) k--; hull[k++] = pts[i]; } // Conserve only unique points. int index = 1; Point2D lastPt = hull[0]; for (int i = 1; i < k - 1; i++) { if (!hull[i].equals(lastPt)) { hull[index++] = lastPt = hull[i]; } } return Arrays.copyOfRange(hull, 0, index); } // To find orientation of point 'c' relative to the line segment (a, b). // Imagine yourself standing at point 'a' looking out towards point 'b'. // Returns 0 if all three points are collinear. // Returns -1 if 'c' is clockwise to segment (a, b), i.e right of line formed by the segment. // Returns +1 if 'c' is counter clockwise to segment (a, b), i.e left of line // formed by the segment. private static int orientation(Point2D a, Point2D b, Point2D c) { double value = (b.getY() - a.getY()) * (c.getX() - b.getX()) - (b.getX() - a.getX()) * (c.getY() - b.getY()); if (abs(value) < EPS) return 0; return (value > 0) ? -1 : +1; } }
williamfiset/Algorithms
src/main/java/com/williamfiset/algorithms/geometry/ConvexHullMonotoneChainsAlgorithm.java
214,463
/* * KMLRenderer.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.geo; import java.awt.*; import java.awt.geom.AffineTransform; import java.awt.geom.GeneralPath; import java.awt.geom.Rectangle2D; import java.awt.image.BufferedImage; import java.awt.image.Raster; import java.util.ArrayList; /** * @author Alexei Drummond */ public class KMLRenderer implements Lattice { BufferedImage image; int[][] lattice; Rectangle2D bounds; java.util.List<AbstractPolygon2D> polygons; java.util.List<Shape> shapes; ViewTransform viewTransform; Color shapeColor; Color background; public KMLRenderer(String kmlFileName, Color shapeColor, Color background) { polygons = Polygon2D.readKMLFile(kmlFileName); this.shapeColor = shapeColor; this.background = background; System.out.println("Read " + polygons.size() + " polygons"); System.out.println("Converting polygons to shapes"); double minX = Double.MAX_VALUE, maxX = -Double.MAX_VALUE, minY = Double.MAX_VALUE, maxY = -Double.MAX_VALUE; shapes = new ArrayList<Shape>(); for (AbstractPolygon2D p : polygons) { Shape shape = p.getShape(); bounds = shape.getBounds(); if (bounds.getMinX() < minX) minX = bounds.getMinX(); if (bounds.getMaxX() > maxX) maxX = bounds.getMaxX(); if (bounds.getMinY() < minY) minY = bounds.getMinY(); if (bounds.getMaxY() > maxY) maxY = bounds.getMaxY(); shapes.add(shape); System.out.print("."); System.out.flush(); } bounds = new Rectangle2D.Double(minX, minY, maxX - minX, maxY - minY); System.out.println(); } public Rectangle2D getBounds() { return bounds; } public BufferedImage render(int size) { int width; int height; if (bounds.getHeight() > bounds.getWidth()) { height = size; width = (int) (height * bounds.getWidth() / bounds.getHeight()); } else { width = size; height = (int) (width * bounds.getHeight() / bounds.getWidth()); } return render(width, height); } public BufferedImage render(int width, int height) { image = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB); render(image); Raster raster = image.getData(); lattice = new int[width][height]; int[] pixel = new int[4]; for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { raster.getPixel(i, j, pixel); if (colorDistanceSquared(pixel, shapeColor) < colorDistanceSquared(pixel, background)) { lattice[i][j] = 1; } else { lattice[i][j] = 0; } } } return image; } private double colorDistanceSquared(int[] pixel, Color color) { double[] argb = new double[4]; argb[0] = Math.abs(pixel[0] - color.getAlpha()); argb[1] = Math.abs(pixel[1] - color.getRed()); argb[2] = Math.abs(pixel[2] - color.getGreen()); argb[3] = Math.abs(pixel[3] - color.getBlue()); double dist = 0; for (double a : argb) { dist += a * a; } return dist; } public void render(BufferedImage image) { Graphics2D g2d = image.createGraphics(); g2d.setColor(background); g2d.fillRect(0, 0, image.getWidth(), image.getHeight()); viewTransform = new ViewTransform(bounds, image.getWidth(), image.getHeight()); g2d.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); g2d.setColor(shapeColor); AffineTransform transform = viewTransform.getTransform(); for (Shape s : shapes) { GeneralPath path = new GeneralPath(s); path.transform(transform); g2d.fill(path); } } public int latticeWidth() { return lattice.length; } public int latticeHeight() { return lattice[0].length; } public void setState(int i, int j, int state) { lattice[i][j] = state; } public int getState(int i, int j) { return lattice[i][j]; } public void paintLattice(Graphics g) { g.drawImage(image, 0, 0, null); } public void setBounds(Rectangle2D bounds) { this.bounds = bounds; } }
maxbiostat/beast-mcmc
src/dr/geo/KMLRenderer.java
214,464
/* * Copyright (c) 2012 jMonkeyEngine * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of 'jMonkeyEngine' nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.jme3.asset; import com.jme3.asset.cache.AssetCache; import com.jme3.post.FilterPostProcessor; /** * Used to load FilterPostProcessors which are not cached. * * @author Andrew Wason */ public class FilterKey extends AssetKey<FilterPostProcessor> { public FilterKey(String name) { super(name); } public FilterKey() { super(); } @Override public Class<? extends AssetCache> getCacheType() { // Do not cache filter processors return null; } }
CyberFlameGO/jmonkeyengine
jme3-core/src/main/java/com/jme3/asset/FilterKey.java
214,465
/* * Ungranted.java * Missing License Header, Copyright 2016 (C) Andrew Maitland <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ package pcgen.cdom.base; /** * This interface is a tag used to identify items that are not "Granted", thus * should not be able to grant other items */ public interface Ungranted { }
irobin591/pcgen
code/src/java/pcgen/cdom/base/Ungranted.java
214,466
/** * Copyright © 2016-2024 The Thingsboard Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.thingsboard.server.service.ws.telemetry.cmd.v1; import lombok.AllArgsConstructor; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NoArgsConstructor; import org.thingsboard.server.service.ws.WsCmdType; /** * @author Andrew Shvayka */ @NoArgsConstructor @AllArgsConstructor @Data @EqualsAndHashCode(callSuper = true) public class TimeseriesSubscriptionCmd extends SubscriptionCmd { private long startTs; private long timeWindow; private long interval; private int limit; private String agg; @Override public WsCmdType getType() { return WsCmdType.TIMESERIES; } }
thingsboard/thingsboard
application/src/main/java/org/thingsboard/server/service/ws/telemetry/cmd/v1/TimeseriesSubscriptionCmd.java
214,467
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** The number of instances in which each feature occurs. Note that we aren't attending to the feature's value, and MALLET doesn't currently have any support at all for categorical features. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; public class FeatureCounts extends RankedFeatureVector { // increment by 1 for each instance that has the feature, ignoring the feature's value static boolean countInstances = true; private static double[] calcFeatureCounts (InstanceList instances) { int numInstances = instances.size(); int numClasses = instances.getTargetAlphabet().size(); int numFeatures = instances.getDataAlphabet().size(); double[] counts = new double[numFeatures]; double count; for (int i = 0; i < instances.size(); i++) { Instance inst = instances.get(i); if (!(inst.getData() instanceof FeatureVector)) { throw new IllegalArgumentException ("Currently only handles FeatureVector data"); } FeatureVector fv = (FeatureVector) inst.getData (); if (instances.getInstanceWeight(i) == 0) { continue; } for (int j = 0; j < fv.numLocations(); j++) { if (countInstances) { counts[fv.indexAtLocation(j)] += 1; } else { counts[fv.indexAtLocation(j)] += fv.valueAtLocation(j); } } } return counts; } public FeatureCounts (InstanceList instances) { super (instances.getDataAlphabet(), calcFeatureCounts (instances)); } public FeatureCounts (Alphabet vocab, double[] counts) { super (vocab, counts); } public static class Factory implements RankedFeatureVector.Factory { public Factory () {} public RankedFeatureVector newRankedFeatureVector (InstanceList instances) { return new FeatureCounts (instances); } } }
MNCC/Mallet
src/cc/mallet/types/FeatureCounts.java
214,469
404: Not Found
HanielB/cvc5
examples/api/java/ParserSymbolManager.java
214,470
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.exasol.ui.editors; import org.eclipse.jface.action.IContributionManager; import org.eclipse.jface.action.Separator; import org.eclipse.swt.widgets.Composite; import org.jkiss.dbeaver.ext.exasol.model.ExasolDataSource; import org.jkiss.dbeaver.ext.exasol.model.lock.ExasolLock; import org.jkiss.dbeaver.ext.exasol.model.lock.ExasolLockManager; import org.jkiss.dbeaver.ext.ui.locks.edit.AbstractLockEditor; import org.jkiss.dbeaver.ext.ui.locks.manage.LockManagerViewer; import org.jkiss.dbeaver.model.admin.locks.DBAServerLock; import org.jkiss.dbeaver.model.admin.locks.DBAServerLockItem; import org.jkiss.dbeaver.model.admin.locks.DBAServerLockManager; import org.jkiss.dbeaver.model.exec.DBCExecutionContext; import java.math.BigInteger; import java.util.HashMap; public class ExasolLockEditor extends AbstractLockEditor { public static final String sidHold = "hsid"; public static final String sidWait = "wsid"; @SuppressWarnings("unchecked") @Override protected LockManagerViewer createLockViewer( DBCExecutionContext executionContext, Composite parent) { @SuppressWarnings("rawtypes") DBAServerLockManager<DBAServerLock, DBAServerLockItem> lockManager = (DBAServerLockManager) new ExasolLockManager((ExasolDataSource) executionContext.getDataSource()); return new LockManagerViewer(this, parent, lockManager) { @Override protected void contributeToToolbar(DBAServerLockManager<DBAServerLock, DBAServerLockItem> sessionManager, IContributionManager contributionManager) { contributionManager.add(new Separator()); } @SuppressWarnings("serial") @Override protected void onLockSelect(final DBAServerLock lock) { super.onLockSelect(lock); if (lock != null) { final ExasolLock pLock = (ExasolLock) lock; super.refreshDetail(new HashMap<String, Object>() {{ put(sidHold, BigInteger.valueOf(pLock.getHold_sid())); put(sidWait, BigInteger.valueOf(pLock.getWait_sid().longValue())); }}); } } }; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.exasol.ui/src/org/jkiss/dbeaver/ext/exasol/ui/editors/ExasolLockEditor.java
214,471
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase; import java.nio.ByteBuffer; import java.util.Collection; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; @InterfaceAudience.Private public class KeyValueTestUtil { public static KeyValue create(String row, String family, String qualifier, long timestamp, String value) { return create(row, family, qualifier, timestamp, KeyValue.Type.Put, value); } public static KeyValue create(String row, String family, String qualifier, long timestamp, KeyValue.Type type, String value) { return new KeyValue(Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qualifier), timestamp, type, Bytes.toBytes(value)); } public static ByteBuffer toByteBufferAndRewind(final Iterable<? extends KeyValue> kvs, boolean includeMemstoreTS) { int totalBytes = KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS); ByteBuffer bb = ByteBuffer.allocate(totalBytes); for (KeyValue kv : IterableUtils.emptyIfNull(kvs)) { KeyValueUtil.appendToByteBuffer(bb, kv, includeMemstoreTS); } bb.rewind(); return bb; } /** * Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is * made without distinguishing MVCC version of the KeyValues * @return true if KeyValues from kvCollection2 are contained in kvCollection1 */ public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1, Collection<? extends Cell> kvCollection2) { for (Cell kv1 : kvCollection1) { boolean found = false; for (Cell kv2 : kvCollection2) { if (PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true; } if (!found) return false; } return true; } public static List<KeyValue> rewindThenToList(final ByteBuffer bb, final boolean includesMemstoreTS, final boolean useTags) { bb.rewind(); List<KeyValue> kvs = Lists.newArrayList(); KeyValue kv = null; while (true) { kv = KeyValueUtil.nextShallowCopy(bb, includesMemstoreTS, useTags); if (kv == null) { break; } kvs.add(kv); } return kvs; } /********************* toString ************************************/ public static String toStringWithPadding(final Collection<? extends KeyValue> kvs, final boolean includeMeta) { int maxRowStringLength = 0; int maxFamilyStringLength = 0; int maxQualifierStringLength = 0; int maxTimestampLength = 0; for (KeyValue kv : kvs) { maxRowStringLength = Math.max(maxRowStringLength, getRowString(kv).length()); maxFamilyStringLength = Math.max(maxFamilyStringLength, getFamilyString(kv).length()); maxQualifierStringLength = Math.max(maxQualifierStringLength, getQualifierString(kv).length()); maxTimestampLength = Math.max(maxTimestampLength, Long.valueOf(kv.getTimestamp()).toString().length()); } StringBuilder sb = new StringBuilder(); for (KeyValue kv : kvs) { if (sb.length() > 0) { sb.append("\n"); } String row = toStringWithPadding(kv, maxRowStringLength, maxFamilyStringLength, maxQualifierStringLength, maxTimestampLength, includeMeta); sb.append(row); } return sb.toString(); } protected static String toStringWithPadding(final KeyValue kv, final int maxRowLength, int maxFamilyLength, int maxQualifierLength, int maxTimestampLength, boolean includeMeta) { String leadingLengths = ""; String familyLength = kv.getFamilyLength() + " "; if (includeMeta) { leadingLengths += Strings.padFront(kv.getKeyLength() + "", '0', 4); leadingLengths += " "; leadingLengths += Strings.padFront(kv.getValueLength() + "", '0', 4); leadingLengths += " "; leadingLengths += Strings.padFront(kv.getRowLength() + "", '0', 2); leadingLengths += " "; } int spacesAfterRow = maxRowLength - getRowString(kv).length() + 2; int spacesAfterFamily = maxFamilyLength - getFamilyString(kv).length() + 2; int spacesAfterQualifier = maxQualifierLength - getQualifierString(kv).length() + 1; int spacesAfterTimestamp = maxTimestampLength - Long.valueOf(kv.getTimestamp()).toString().length() + 1; return leadingLengths + getRowString(kv) + StringUtils.repeat(' ', spacesAfterRow) + familyLength + getFamilyString(kv) + StringUtils.repeat(' ', spacesAfterFamily) + getQualifierString(kv) + StringUtils.repeat(' ', spacesAfterQualifier) + getTimestampString(kv) + StringUtils.repeat(' ', spacesAfterTimestamp) + getTypeString(kv) + " " + getValueString(kv); } protected static String getRowString(final KeyValue kv) { return Bytes.toStringBinary(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); } protected static String getFamilyString(final KeyValue kv) { return Bytes.toStringBinary(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength()); } protected static String getQualifierString(final KeyValue kv) { return Bytes.toStringBinary(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()); } protected static String getTimestampString(final KeyValue kv) { return kv.getTimestamp() + ""; } protected static String getTypeString(final KeyValue kv) { return KeyValue.Type.codeToType(kv.getTypeByte()).toString(); } protected static String getValueString(final KeyValue kv) { return Bytes.toStringBinary(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); } }
apache/hbase
hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
214,472
/****************************************************************************** * Top contributors (to current version): * Andrew Reynolds, Aina Niemetz, Andres Noetzli * * This file is part of the cvc5 project. * * Copyright (c) 2009-2022 by the authors listed in the file AUTHORS * in the top-level source directory and their institutional affiliations. * All rights reserved. See the file COPYING in the top-level source * directory for licensing information. * **************************************************************************** * * The cvc5 java API. */ package io.github.cvc5; /** * Encapsulation of a solver synth result. * * This is the return value of the API methods: * - {@link Solver#checkSynth()} * - {@link Solver#checkSynthNext()} * * which we call synthesis queries. This class indicates whether the * synthesis query has a solution, has no solution, or is unknown. */ public class SynthResult extends AbstractPointer { // region construction and destruction SynthResult(Solver solver, long pointer) { super(solver, pointer); } protected native void deletePointer(long pointer); public long getPointer() { return pointer; } // endregion /** * @return True if SynthResult is empty, i.e., a nullary SynthResult, and not * an actual result returned from a synthesis query. */ public boolean isNull() { return isNull(pointer); } private native boolean isNull(long pointer); /** * @return True if the synthesis query has a solution. */ public boolean hasSolution() { return hasSolution(pointer); } private native boolean hasSolution(long pointer); /** * @return True if the synthesis query has no solution. In this case, it was * determined there was no solution. */ public boolean hasNoSolution() { return hasNoSolution(pointer); } private native boolean hasNoSolution(long pointer); /** * @return True if the result of the synthesis query could not be determined. */ public boolean isUnknown() { return isUnknown(pointer); } private native boolean isUnknown(long pointer); /** * @return A string representation of this result. */ protected native String toString(long pointer); }
HanielB/cvc5
src/api/java/io/github/cvc5/SynthResult.java
214,473
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Tests membership of the token text in the provided list of words. The lexicon words are provided in a file, one word per line. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; import com.carrotsearch.hppc.ObjectHashSet; public class LexiconMembership extends Pipe implements Serializable { String name; ObjectHashSet lexicon; boolean ignoreCase; public LexiconMembership (String name, Reader lexiconReader, boolean ignoreCase) { this.name = name; this.lexicon = new ObjectHashSet (); this.ignoreCase = ignoreCase; LineNumberReader reader = new LineNumberReader (lexiconReader); String line; while (true) { try { line = reader.readLine(); } catch (IOException e) { throw new IllegalStateException (); } if (line == null) { break; } else { // System.out.println(name + " : " + (ignoreCase ? line.toLowerCase().intern() : line.intern()) ); lexicon.add (ignoreCase ? line.toLowerCase() : line); } } if (lexicon.size() == 0) throw new IllegalArgumentException ("Empty lexicon"); } public LexiconMembership (String name, File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this (name, new BufferedReader (new FileReader (lexiconFile)), ignoreCase); } public LexiconMembership (File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this (lexiconFile.getName(), lexiconFile, ignoreCase); } public LexiconMembership (File lexiconFile) throws FileNotFoundException { this (lexiconFile.getName(), lexiconFile, true); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); String conS=s; //dealing with ([a-z]+), ([a-z]+, [a-z]+), [a-z]+. if(conS.startsWith("(")) conS = conS.substring(1); if(conS.endsWith(")") || conS.endsWith(".")) conS = conS.substring(0, conS.length()-1); if (lexicon.contains (ignoreCase ? s.toLowerCase() : s)) t.setFeatureValue (name, 1.0); if(conS.compareTo(s) != 0) { if (lexicon.contains (ignoreCase ? conS.toLowerCase() : conS)) t.setFeatureValue (name, 1.0); } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (name); out.writeObject (lexicon); out.writeBoolean (ignoreCase); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); this.name = (String) in.readObject(); this.lexicon = (ObjectHashSet) in.readObject(); this.ignoreCase = in.readBoolean(); } }
techknowledgist/Mallet
src/cc/mallet/pipe/tsf/LexiconMembership.java
214,474
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import cc.mallet.pipe.*; import cc.mallet.types.*; /** AdaBoost Robert E. Schapire. "The boosting approach to machine learning: An overview." In MSRI Workshop on Nonlinear Estimation and Classification, 2002. http://www.research.att.com/~schapire/cgi-bin/uncompress-papers/msri.ps @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class AdaBoost extends Classifier { Classifier[] weakClassifiers; double[] alphas; public AdaBoost (Pipe instancePipe, Classifier[] weakClassifiers, double[] alphas) { super (instancePipe); this.weakClassifiers = weakClassifiers; this.alphas = alphas; } // added by Gary /** * Get the number of weak classifiers in this ensemble classifier */ public int getNumWeakClassifiers() { return alphas.length; } // added by Gary /** * Return an AdaBoost classifier that uses only the first * <tt>numWeakClassifiersToUse</tt> weak learners. * * <p>The returned classifier's Pipe and weak classifiers * are backed by the respective objects of this classifier, * so changes to the returned classifier's Pipe and weak * classifiers are reflected in this classifier, and vice versa. */ public AdaBoost getTrimmedClassifier(int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); Classifier[] newWeakClassifiers = new Classifier[numWeakClassifiersToUse]; System.arraycopy(weakClassifiers, 0, newWeakClassifiers, 0, numWeakClassifiersToUse); double[] newAlphas = new double[numWeakClassifiersToUse]; System.arraycopy(alphas, 0, newAlphas, 0, numWeakClassifiersToUse); return new AdaBoost(instancePipe, newWeakClassifiers, newAlphas); } @Override public Classification classify (Instance inst) { return classify(inst, weakClassifiers.length); } /** * Classify the given instance using only the first * <tt>numWeakClassifiersToUse</tt> classifiers * trained during boosting */ public Classification classify (Instance inst, int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); FeatureVector fv = (FeatureVector) inst.getData(); assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; int bestIndex; double sum = 0; // Gather scores of all weakClassifiers for (int round = 0; round < numWeakClassifiersToUse; round++) { bestIndex = weakClassifiers[round].classify(inst).getLabeling().getBestIndex(); scores[bestIndex] += alphas[round]; sum += scores[bestIndex]; } // Normalize the scores for (int i = 0; i < scores.length; i++) scores[i] /= sum; return new Classification (inst, this, new LabelVector (getLabelAlphabet(), scores)); } }
lebiathan/Mallet
src/cc/mallet/classify/AdaBoost.java
214,475
// -*- mode: java; c-basic-offset: 2; -*- /** * Visual Blocks Editor * * Copyright 2012 Google Inc. * Copyright © 2013-2016 Massachusetts Institute of Technology * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @license * @fileoverview Object representing a warning for MIT App Inventor. * @author [email protected] (Andrew F. McKinney) * @author [email protected] (Neil Fraser) * @author [email protected] (Evan W. Patton) */ 'use strict'; goog.provide('AI.Blockly.Error'); goog.require('Blockly.Bubble'); goog.require('Blockly.Icon'); /** * Class for an error. * @param {!Blockly.Block} block The block associated with this error. * @constructor */ Blockly.Error = function(block) { Blockly.Error.superClass_.constructor.call(this, block); this.createIcon(); this.text_ = {}; }; goog.inherits(Blockly.Error, Blockly.Icon); /** * If set to true, the icon will be hidden when the block it is * attached to is collapsed. Otherwise, the icon will be visible even * if the block is collapsed. */ Blockly.Error.prototype.collapseHidden = false; /** * Radius of the warning icon. */ Blockly.Error.ICON_RADIUS = 8; /** * Create the icon on the block. * @private */ Blockly.Error.prototype.drawIcon_ = function(group) { /* Here's the markup that will be generated: <g class="blocklyIconGroup"> <path class="blocklyIconShield" d="..."/> <text class="blocklyIconMark" x="8" y="13">!</text> </g> */ Blockly.utils.createSvgElement('circle', {'class': 'blocklyErrorIconOutline', 'r': Blockly.Error.ICON_RADIUS, 'cx': Blockly.Error.ICON_RADIUS, 'cy': Blockly.Error.ICON_RADIUS}, group); Blockly.utils.createSvgElement('path', {'class': 'blocklyErrorIconX', 'd': 'M 4,4 12,12 8,8 4,12 12,4'}, // X fills circle vvv //'d': 'M 3.1931458,3.1931458 12.756854,12.756854 8,8 3.0931458,12.756854 12.756854,3.0931458'}, group); }; /** * Create the text for the error's bubble. * @param {string} text The text to display. * @return {!Element} The top-level node of the text. * @private */ Blockly.Error.textToDom_ = function(text) { var paragraph = Blockly.utils.createSvgElement('text', {'class': 'blocklyText blocklyBubbleText', 'y': Blockly.Bubble.BORDER_WIDTH}, null); var lines = text.split('\n'); for (var i = 0; i < lines.length; i++) { var tspanElement = Blockly.utils.createSvgElement('tspan', {'dy': '1em', 'x': Blockly.Bubble.BORDER_WIDTH}, paragraph); var textNode = document.createTextNode(lines[i]); tspanElement.appendChild(textNode); } return paragraph; }; /** * Show or hide the error bubble. * @param {boolean} visible True if the bubble should be visible. */ Blockly.Error.prototype.setVisible = function(visible) { if (visible == this.isVisible()) { // No change. return; } if (visible) { // Create the bubble. var paragraph = Blockly.Error.textToDom_(this.getText()); this.bubble_ = new Blockly.Bubble( /** @type {!Blockly.Workspace} */ (this.block_.workspace), paragraph, this.block_.svgPath_, this.iconXY_, null, null); if (this.block_.RTL) { // Right-align the paragraph. // This cannot be done until the bubble is rendered on screen. var maxWidth = paragraph.getBBox().width; for (var x = 0, textElement; textElement = paragraph.childNodes[x]; x++) { textElement.setAttribute('text-anchor', 'end'); textElement.setAttribute('x', maxWidth + Blockly.Bubble.BORDER_WIDTH); } } this.updateColour(); // Bump the warning into the right location. var size = this.bubble_.getBubbleSize(); this.bubble_.setBubbleSize(size.width, size.height); } else { // Dispose of the bubble. this.bubble_.dispose(); this.bubble_ = null; this.body_ = null; } }; /** * Bring the target to the top of the stack when clicked on. * @param {!Event} e Mouse up event. * @private */ Blockly.Error.prototype.bodyFocus_ = function(e) { this.bubble_.promote_(); }; /** * Set this error's text. * @param {string} text Error text. */ Blockly.Error.prototype.setText = function(text, id) { if (this.text_[id] == text) { return; } if (text) { this.text_[id] = text; } else { delete this.text_[id]; } if (this.isVisible()) { this.setVisible(false); this.setVisible(true); } }; /** * Get this error's texts. * @return {string} All texts concatenated into one string. */ Blockly.Error.prototype.getText = function() { var allErrors = []; for (var id in this.text_) { allErrors.push(this.text_[id]); } return allErrors.join('\n'); }; /** * Dispose of this error. */ Blockly.Error.prototype.dispose = function() { this.block_.error = null; Blockly.Icon.prototype.dispose.call(this); };
kkashi01/AppyBuilder-Source
appinventor/blocklyeditor/src/errorIcon.js
214,478
/* * Copyright 2014-2021 Andrew Gaul <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.Console; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystems; import java.nio.file.PathMatcher; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import com.google.common.base.Strings; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.io.Files; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.inject.Module; import org.jclouds.Constants; import org.jclouds.ContextBuilder; import org.jclouds.JcloudsVersion; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.concurrent.DynamicExecutors; import org.jclouds.concurrent.config.ExecutorServiceModule; import org.jclouds.location.reference.LocationConstants; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class Main { private static final Logger logger = LoggerFactory.getLogger(Main.class); private Main() { throw new AssertionError("intentionally not implemented"); } private static final class Options { @Option(name = "--properties", usage = "S3Proxy configuration (required, multiple allowed)") private List<File> propertiesFiles = new ArrayList<>(); @Option(name = "--version", usage = "display version") private boolean version; } public static void main(String[] args) throws Exception { Console console = System.console(); if (console == null) { System.setErr(createLoggerErrorPrintStream()); } Options options = new Options(); CmdLineParser parser = new CmdLineParser(options); try { parser.parseArgument(args); } catch (CmdLineException cle) { usage(parser); } if (options.version) { System.err.println( Main.class.getPackage().getImplementationVersion()); System.exit(0); } else if (options.propertiesFiles.isEmpty()) { usage(parser); } S3Proxy.Builder s3ProxyBuilder = null; ThreadFactory factory = new ThreadFactoryBuilder() .setNameFormat("user thread %d") .setThreadFactory(Executors.defaultThreadFactory()) .build(); ExecutorService executorService = DynamicExecutors.newScalingThreadPool( 1, 20, 60 * 1000, factory); ImmutableMap.Builder<String, Map.Entry<String, BlobStore>> locators = ImmutableMap.builder(); ImmutableMap.Builder<PathMatcher, Map.Entry<String, BlobStore>> globLocators = ImmutableMap.builder(); Set<String> locatorGlobs = new HashSet<>(); Set<String> parsedIdentities = new HashSet<>(); for (File propertiesFile : options.propertiesFiles) { Properties properties = new Properties(); try (InputStream is = new FileInputStream(propertiesFile)) { properties.load(is); } properties.putAll(System.getProperties()); BlobStore blobStore = createBlobStore(properties, executorService); blobStore = parseMiddlewareProperties(blobStore, executorService, properties); String s3ProxyAuthorizationString = properties.getProperty( S3ProxyConstants.PROPERTY_AUTHORIZATION); String localIdentity = null; if (AuthenticationType.fromString(s3ProxyAuthorizationString) != AuthenticationType.NONE) { localIdentity = properties.getProperty( S3ProxyConstants.PROPERTY_IDENTITY); String localCredential = properties.getProperty( S3ProxyConstants.PROPERTY_CREDENTIAL); if (parsedIdentities.add(localIdentity)) { locators.put(localIdentity, Maps.immutableEntry(localCredential, blobStore)); } } for (String key : properties.stringPropertyNames()) { if (key.startsWith(S3ProxyConstants.PROPERTY_BUCKET_LOCATOR)) { String bucketLocator = properties.getProperty(key); if (locatorGlobs.add(bucketLocator)) { globLocators.put( FileSystems.getDefault().getPathMatcher( "glob:" + bucketLocator), Maps.immutableEntry(localIdentity, blobStore)); } else { System.err.println("Multiple definitions of the " + "bucket locator: " + bucketLocator); System.exit(1); } } } S3Proxy.Builder s3ProxyBuilder2 = S3Proxy.Builder .fromProperties(properties) .blobStore(blobStore); if (s3ProxyBuilder != null && !s3ProxyBuilder.equals(s3ProxyBuilder2)) { System.err.println("Multiple configurations require" + " identical s3proxy properties"); System.exit(1); } s3ProxyBuilder = s3ProxyBuilder2; } S3Proxy s3Proxy; try { s3Proxy = s3ProxyBuilder.build(); } catch (IllegalArgumentException | IllegalStateException e) { System.err.println(e.getMessage()); System.exit(1); throw e; } final Map<String, Map.Entry<String, BlobStore>> locator = locators.build(); final Map<PathMatcher, Map.Entry<String, BlobStore>> globLocator = globLocators.build(); if (!locator.isEmpty() || !globLocator.isEmpty()) { s3Proxy.setBlobStoreLocator( new GlobBlobStoreLocator(locator, globLocator)); } try { s3Proxy.start(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } private static BlobStore parseMiddlewareProperties(BlobStore blobStore, ExecutorService executorService, Properties properties) throws IOException { Properties altProperties = new Properties(); for (Map.Entry<Object, Object> entry : properties.entrySet()) { String key = (String) entry.getKey(); if (key.startsWith(S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX)) { key = key.substring( S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX.length()); altProperties.put(key, (String) entry.getValue()); } } String eventualConsistency = properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY); if ("true".equalsIgnoreCase(eventualConsistency)) { BlobStore altBlobStore = createBlobStore(altProperties, executorService); int delay = Integer.parseInt(properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_DELAY, "5")); double probability = Double.parseDouble(properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_PROBABILITY, "1.0")); System.err.println("Emulating eventual consistency with delay " + delay + " seconds and probability " + (probability * 100) + "%"); blobStore = EventualBlobStore.newEventualBlobStore( blobStore, altBlobStore, Executors.newScheduledThreadPool(1), delay, TimeUnit.SECONDS, probability); } String nullBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_NULL_BLOBSTORE); if ("true".equalsIgnoreCase(nullBlobStore)) { System.err.println("Using null storage backend"); blobStore = NullBlobStore.newNullBlobStore(blobStore); } String readOnlyBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_READ_ONLY_BLOBSTORE); if ("true".equalsIgnoreCase(readOnlyBlobStore)) { System.err.println("Using read-only storage backend"); blobStore = ReadOnlyBlobStore.newReadOnlyBlobStore(blobStore); } ImmutableBiMap<String, String> aliases = AliasBlobStore.parseAliases( properties); if (!aliases.isEmpty()) { System.err.println("Using alias backend"); blobStore = AliasBlobStore.newAliasBlobStore(blobStore, aliases); } ImmutableList<Map.Entry<Pattern, String>> regexs = RegexBlobStore.parseRegexs(properties); if (!regexs.isEmpty()) { System.err.println("Using regex backend"); blobStore = RegexBlobStore.newRegexBlobStore(blobStore, regexs); } ImmutableMap<String, Integer> shards = ShardedBlobStore.parseBucketShards(properties); ImmutableMap<String, String> prefixes = ShardedBlobStore.parsePrefixes(properties); if (!shards.isEmpty()) { System.err.println("Using sharded buckets backend"); blobStore = ShardedBlobStore.newShardedBlobStore(blobStore, shards, prefixes); } String encryptedBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE); if ("true".equalsIgnoreCase(encryptedBlobStore)) { System.err.println("Using encrypted storage backend"); blobStore = EncryptedBlobStore.newEncryptedBlobStore(blobStore, properties); } return blobStore; } private static PrintStream createLoggerErrorPrintStream() { return new PrintStream(System.err) { private final StringBuilder builder = new StringBuilder(); @Override @edu.umd.cs.findbugs.annotations.SuppressFBWarnings( "SLF4J_SIGN_ONLY_FORMAT") public void print(final String string) { logger.error("{}", string); } @Override public void write(byte[] buf, int off, int len) { for (int i = off; i < len; ++i) { char ch = (char) buf[i]; if (ch == '\n') { if (builder.length() != 0) { print(builder.toString()); builder.setLength(0); } } else { builder.append(ch); } } } }; } private static BlobStore createBlobStore(Properties properties, ExecutorService executorService) throws IOException { String provider = properties.getProperty(Constants.PROPERTY_PROVIDER); String identity = properties.getProperty(Constants.PROPERTY_IDENTITY); String credential = properties.getProperty( Constants.PROPERTY_CREDENTIAL); String endpoint = properties.getProperty(Constants.PROPERTY_ENDPOINT); properties.remove(Constants.PROPERTY_ENDPOINT); String region = properties.getProperty( LocationConstants.PROPERTY_REGION); if (provider == null) { System.err.println( "Properties file must contain: " + Constants.PROPERTY_PROVIDER); System.exit(1); } if (provider.equals("filesystem") || provider.equals("transient")) { // local blobstores do not require credentials identity = Strings.nullToEmpty(identity); credential = Strings.nullToEmpty(credential); } else if (provider.equals("google-cloud-storage")) { File credentialFile = new File(credential); if (credentialFile.exists()) { credential = Files.asCharSource(credentialFile, StandardCharsets.UTF_8).read(); } properties.remove(Constants.PROPERTY_CREDENTIAL); // We also need to clear the system property, otherwise the // credential will be overridden by the system property. System.clearProperty(Constants.PROPERTY_CREDENTIAL); } if (identity == null || credential == null) { System.err.println( "Properties file must contain: " + Constants.PROPERTY_IDENTITY + " and " + Constants.PROPERTY_CREDENTIAL); System.exit(1); } properties.setProperty(Constants.PROPERTY_USER_AGENT, String.format("s3proxy/%s jclouds/%s java/%s", Main.class.getPackage().getImplementationVersion(), JcloudsVersion.get(), System.getProperty("java.version"))); ContextBuilder builder = ContextBuilder .newBuilder(provider) .credentials(identity, credential) .modules(ImmutableList.<Module>of( new SLF4JLoggingModule(), new ExecutorServiceModule(executorService))) .overrides(properties); if (!Strings.isNullOrEmpty(endpoint)) { builder = builder.endpoint(endpoint); } BlobStoreContext context = builder.build(BlobStoreContext.class); BlobStore blobStore; if (context instanceof RegionScopedBlobStoreContext && region != null) { blobStore = ((RegionScopedBlobStoreContext) context) .getBlobStore(region); } else { blobStore = context.getBlobStore(); } return blobStore; } private static void usage(CmdLineParser parser) { System.err.println("Usage: s3proxy [options...]"); parser.printUsage(System.err); System.exit(1); } }
gaul/s3proxy
src/main/java/org/gaul/s3proxy/Main.java
214,479
/* * Utils.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.app.util; import javax.swing.*; import javax.swing.filechooser.FileNameExtensionFilter; import java.awt.*; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.lang.reflect.Constructor; import java.util.*; import java.util.List; /** * @author adru001 */ public class Utils { public static final String TRAITS = "traits"; public static String absName(File file) { return (file != null) ? file.getAbsolutePath() : null; } public static String getLoadFileName(String message) { return absName(getLoadFile(message)); } public static String getSaveFileName(String message) { return absName(getSaveFile(message)); } public static File getLoadFile(String message) { // No file name in the arguments so throw up a dialog box... java.awt.Frame frame = new java.awt.Frame(); java.awt.FileDialog chooser = new java.awt.FileDialog(frame, message, java.awt.FileDialog.LOAD); // chooser.show(); chooser.setVisible(true); if (chooser.getFile() == null) return null; java.io.File file = new java.io.File(chooser.getDirectory(), chooser.getFile()); chooser.dispose(); frame.dispose(); return file; } public static File[] getLoadFiles(String message, File openDefaultDirectory, String description, String... extensions) { // No file name in the arguments so throw up a dialog box... java.awt.Frame frame = new java.awt.Frame(); frame.setTitle(message); final JFileChooser chooser = new JFileChooser(openDefaultDirectory); chooser.setMultiSelectionEnabled(true); chooser.setFileSelectionMode(JFileChooser.FILES_AND_DIRECTORIES); FileNameExtensionFilter filter = new FileNameExtensionFilter(description, extensions); chooser.setFileFilter(filter); final int returnVal = chooser.showOpenDialog(frame); File[] files = null; if (returnVal == JFileChooser.APPROVE_OPTION) { files = chooser.getSelectedFiles(); } frame.dispose(); return files; } public static File getSaveFile(String message) { // No file name in the arguments so throw up a dialog box... java.awt.Frame frame = new java.awt.Frame(); java.awt.FileDialog chooser = new java.awt.FileDialog(frame, message, java.awt.FileDialog.SAVE); // chooser.show(); chooser.setVisible(true); java.io.File file = new java.io.File(chooser.getDirectory(), chooser.getFile()); chooser.dispose(); frame.dispose(); return file; } // detect type of text value - return class of type public static Class detectType(final String valueString) { if (valueString.equalsIgnoreCase("TRUE") || valueString.equalsIgnoreCase("FALSE")) { return Boolean.class; } try { final double number = Double.parseDouble(valueString); if (Math.round(number) == number) { return Integer.class; } return Double.class; } catch (NumberFormatException pe) { return String.class; } } // New object of type cl from text. // return null if can't be done of value can't be converted. public static Object constructFromString(Class cl, String value) { for (Constructor c : cl.getConstructors()) { final Class[] classes = c.getParameterTypes(); if (classes.length == 1 && classes[0].equals(String.class)) { try { return c.newInstance(value); } catch (Exception e) { return null; } } } return null; } // skip over comment and empty lines public static String nextNonCommentLine(BufferedReader reader) throws IOException { String line; do { line = reader.readLine(); // ignore empty or comment lines } while (line != null && (line.trim().length() == 0 || line.trim().charAt(0) == '#')); return line; } /** * Load traits from file. * * @param file File * @param delimiter String * @return A map whose key is the trait. The value is a list of <taxa, value> as a string array of size 2. * @throws java.io.IOException IOException * @throws dr.app.util.Arguments.ArgumentException * ArgumentException */ public static Map<String, List<String[]>> importTraitsFromFile(File file, final String delimiter) throws IOException, Arguments.ArgumentException { final BufferedReader reader = new BufferedReader(new FileReader(file)); String line = nextNonCommentLine(reader); // define where is the trait keyword in the 1st row of file final int startAt = 1; final String[] traitNames = line.split(delimiter); for (int k = 0; k < traitNames.length; ++k) { traitNames[k] = traitNames[k].trim(); } if (!(traitNames[0].equalsIgnoreCase(TRAITS) || traitNames[0].length() < 1)) throw new Arguments.ArgumentException("Wrong file format:\ntrait key word should be declared in the 1st row"); Map<String, List<String[]>> traits = new HashMap<String, List<String[]>>(); for (int i = startAt; i < traitNames.length; i++) { traits.put(traitNames[i], new ArrayList<String[]>()); } line = nextNonCommentLine(reader); while (line != null) { String[] values = line.split(delimiter); assert (values.length > 0); if (values.length != traitNames.length) throw new Arguments.ArgumentException("Wrong file format:\neach trait should have its corresponding value"); try { if (traitNames[0].equalsIgnoreCase(TRAITS)) { importStatesMoreThanTaxon(traits, values, traitNames, startAt); } else { importSpecies(traits, values, traitNames, startAt); } } catch (Arguments.ArgumentException e) { e.printStackTrace(); } line = nextNonCommentLine(reader); } return traits; } private static void importSpecies(Map<String, List<String[]>> traits, String[] values, String[] traitNames, int startAt) throws Arguments.ArgumentException { // first column is label for the redundant "taxa" name final String first = values[0].trim(); int k = Arrays.asList(traitNames).indexOf(first); if (k >= 0) { List<String[]> trait = traits.get(first); if (trait == null) { throw new Arguments.ArgumentException("undefined trait " + first); } final String traitVal = values[1].trim(); for (int i = 2; i < values.length; i++) { trait.add(new String[]{values[i], traitVal}); // {taxon_name, trait} } } else { for (int i = startAt; i < values.length; i++) { if (i < traitNames.length) { List<String[]> column = traits.get(traitNames[i]); column.add(new String[]{first, values[i].trim()}); } } } } private static void importStatesMoreThanTaxon(Map<String, List<String[]>> traits, String[] values, String[] traitNames, int startAt) throws Arguments.ArgumentException { // first column is label taxon name if (traitNames.length < 2) { throw new Arguments.ArgumentException("Wrong file format:\ntrait key words in the 1st row are loaded improperly"); } else if (traitNames.length - startAt < 1) { throw new Arguments.ArgumentException("startAt set improperly"); } for (int i = 0; i < (traitNames.length - startAt); i++) { List<String[]> trait = traits.get(traitNames[i + startAt]); if (trait == null) throw new Arguments.ArgumentException("undefined trait " + traitNames[i + startAt]); trait.add(new String[]{values[0].trim(), values[i + startAt].trim()}); // {taxon_name, trait} } } /** * This function takes a file name and an array of extensions (specified * without the leading '.'). If the file name ends with one of the extensions * then it is returned with this trimmed off. Otherwise the file name is * return as it is. * * @param fileName String * @param extensions String[] * @return the trimmed filename */ public static String trimExtensions(String fileName, String[] extensions) { String newName = null; for (String extension : extensions) { final String ext = "." + extension; if (fileName.toUpperCase().endsWith(ext.toUpperCase())) { newName = fileName.substring(0, fileName.length() - ext.length()); } } return (newName != null) ? newName : fileName; } /** * @param caller Object * @param name String * @return a named image from file or resource bundle. */ public static Image getImage(Object caller, String name) { java.net.URL url = caller.getClass().getResource(name); if (url != null) { return Toolkit.getDefaultToolkit().createImage(url); } else { if (caller instanceof Component) { Component c = (Component) caller; Image i = c.createImage(100, 20); Graphics g = c.getGraphics(); g.drawString("Not found!", 1, 15); return i; } else return null; } } public static File getCWD() { final String f = System.getProperty("user.dir"); return new File(f); } // enum Platform { // WINDOWS, // MACOSX, // LINUX; // // Platform detect() { // // final String os = System.getProperty("os.name"); // // if( os.equals("Linux") ) { // return LINUX; // } // // todo probably wrong, please check on windows // if( os.equals("Windows") ) { // return WINDOWS; // } // // if( System.getProperty("os.name").toLowerCase().startsWith("mac os x") ) { // return MACOSX; // } // // return null; // } // } }
maxbiostat/beast-mcmc
src/dr/app/util/Utils.java
214,480
/* * Copyright 2002-2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.collections; import org.apache.commons.collections.bag.HashBag; import org.apache.commons.collections.bag.PredicatedBag; import org.apache.commons.collections.bag.PredicatedSortedBag; import org.apache.commons.collections.bag.SynchronizedBag; import org.apache.commons.collections.bag.SynchronizedSortedBag; import org.apache.commons.collections.bag.TransformedBag; import org.apache.commons.collections.bag.TransformedSortedBag; import org.apache.commons.collections.bag.TreeBag; import org.apache.commons.collections.bag.TypedBag; import org.apache.commons.collections.bag.TypedSortedBag; import org.apache.commons.collections.bag.UnmodifiableBag; import org.apache.commons.collections.bag.UnmodifiableSortedBag; /** * Provides utility methods and decorators for * {@link Bag} and {@link SortedBag} instances. * * @since Commons Collections 2.1 * @version $Revision: 1.20 $ $Date: 2004/04/01 20:12:00 $ * * @author Paul Jack * @author Stephen Colebourne * @author Andrew Freeman * @author Matthew Hawthorne */ public class BagUtils { /** * An empty unmodifiable bag. */ public static final Bag EMPTY_BAG = UnmodifiableBag.decorate(new HashBag()); /** * An empty unmodifiable sorted bag. */ public static final Bag EMPTY_SORTED_BAG = UnmodifiableSortedBag.decorate(new TreeBag()); /** * Instantiation of BagUtils is not intended or required. * However, some tools require an instance to operate. */ public BagUtils() { } //----------------------------------------------------------------------- /** * Returns a synchronized (thread-safe) bag backed by the given bag. * In order to guarantee serial access, it is critical that all * access to the backing bag is accomplished through the returned bag. * <p> * It is imperative that the user manually synchronize on the returned * bag when iterating over it: * * <pre> * Bag bag = BagUtils.synchronizedBag(new HashBag()); * ... * synchronized(bag) { * Iterator i = bag.iterator(); // Must be in synchronized block * while (i.hasNext()) * foo(i.next()); * } * } * </pre> * * Failure to follow this advice may result in non-deterministic * behavior. * * @param bag the bag to synchronize, must not be null * @return a synchronized bag backed by that bag * @throws IllegalArgumentException if the Bag is null */ public static Bag synchronizedBag(Bag bag) { return SynchronizedBag.decorate(bag); } /** * Returns an unmodifiable view of the given bag. Any modification * attempts to the returned bag will raise an * {@link UnsupportedOperationException}. * * @param bag the bag whose unmodifiable view is to be returned, must not be null * @return an unmodifiable view of that bag * @throws IllegalArgumentException if the Bag is null */ public static Bag unmodifiableBag(Bag bag) { return UnmodifiableBag.decorate(bag); } /** * Returns a predicated (validating) bag backed by the given bag. * <p> * Only objects that pass the test in the given predicate can be added to the bag. * Trying to add an invalid object results in an IllegalArgumentException. * It is important not to use the original bag after invoking this method, * as it is a backdoor for adding invalid objects. * * @param bag the bag to predicate, must not be null * @param predicate the predicate for the bag, must not be null * @return a predicated bag backed by the given bag * @throws IllegalArgumentException if the Bag or Predicate is null */ public static Bag predicatedBag(Bag bag, Predicate predicate) { return PredicatedBag.decorate(bag, predicate); } /** * Returns a typed bag backed by the given bag. * <p> * Only objects of the specified type can be added to the bag. * * @param bag the bag to limit to a specific type, must not be null * @param type the type of objects which may be added to the bag * @return a typed bag backed by the specified bag */ public static Bag typedBag(Bag bag, Class type) { return TypedBag.decorate(bag, type); } /** * Returns a transformed bag backed by the given bag. * <p> * Each object is passed through the transformer as it is added to the * Bag. It is important not to use the original bag after invoking this * method, as it is a backdoor for adding untransformed objects. * * @param bag the bag to predicate, must not be null * @param transformer the transformer for the bag, must not be null * @return a transformed bag backed by the given bag * @throws IllegalArgumentException if the Bag or Transformer is null */ public static Bag transformedBag(Bag bag, Transformer transformer) { return TransformedBag.decorate(bag, transformer); } //----------------------------------------------------------------------- /** * Returns a synchronized (thread-safe) sorted bag backed by the given * sorted bag. * In order to guarantee serial access, it is critical that all * access to the backing bag is accomplished through the returned bag. * <p> * It is imperative that the user manually synchronize on the returned * bag when iterating over it: * * <pre> * SortedBag bag = BagUtils.synchronizedSortedBag(new TreeBag()); * ... * synchronized(bag) { * Iterator i = bag.iterator(); // Must be in synchronized block * while (i.hasNext()) * foo(i.next()); * } * } * </pre> * * Failure to follow this advice may result in non-deterministic * behavior. * * @param bag the bag to synchronize, must not be null * @return a synchronized bag backed by that bag * @throws IllegalArgumentException if the SortedBag is null */ public static SortedBag synchronizedSortedBag(SortedBag bag) { return SynchronizedSortedBag.decorate(bag); } /** * Returns an unmodifiable view of the given sorted bag. Any modification * attempts to the returned bag will raise an * {@link UnsupportedOperationException}. * * @param bag the bag whose unmodifiable view is to be returned, must not be null * @return an unmodifiable view of that bag * @throws IllegalArgumentException if the SortedBag is null */ public static SortedBag unmodifiableSortedBag(SortedBag bag) { return UnmodifiableSortedBag.decorate(bag); } /** * Returns a predicated (validating) sorted bag backed by the given sorted bag. * <p> * Only objects that pass the test in the given predicate can be added to the bag. * Trying to add an invalid object results in an IllegalArgumentException. * It is important not to use the original bag after invoking this method, * as it is a backdoor for adding invalid objects. * * @param bag the sorted bag to predicate, must not be null * @param predicate the predicate for the bag, must not be null * @return a predicated bag backed by the given bag * @throws IllegalArgumentException if the SortedBag or Predicate is null */ public static SortedBag predicatedSortedBag(SortedBag bag, Predicate predicate) { return PredicatedSortedBag.decorate(bag, predicate); } /** * Returns a typed sorted bag backed by the given bag. * <p> * Only objects of the specified type can be added to the bag. * * @param bag the bag to limit to a specific type, must not be null * @param type the type of objects which may be added to the bag * @return a typed bag backed by the specified bag */ public static SortedBag typedSortedBag(SortedBag bag, Class type) { return TypedSortedBag.decorate(bag, type); } /** * Returns a transformed sorted bag backed by the given bag. * <p> * Each object is passed through the transformer as it is added to the * Bag. It is important not to use the original bag after invoking this * method, as it is a backdoor for adding untransformed objects. * * @param bag the bag to predicate, must not be null * @param transformer the transformer for the bag, must not be null * @return a transformed bag backed by the given bag * @throws IllegalArgumentException if the Bag or Transformer is null */ public static SortedBag transformedSortedBag(SortedBag bag, Transformer transformer) { return TransformedSortedBag.decorate(bag, transformer); } }
Mr-xn/Penetration_Testing_POC
jboss_CVE-2017-12149/src/org/apache/commons/collections/BagUtils.java
214,481
/* * TIFFWriter.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.util; /** * @author Marc A. Suchard * @author Liya Thomas -- most code taken from Liya's November 2001 free source code */ //import java.awt.*; import dr.geo.color.ChannelColorScheme; import dr.geo.color.ColorScheme; import java.awt.image.BufferedImage; import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; //import java.awt.image.IndexColorModel; public class TIFFWriter { public static final short MAXROWS = 6000; // maximum # of rows public static final short MAXCOLUMNS = 3000; // maximum # of columns public static void writeDoubleArray(String fileName, double[][] inputImageInt) { writeDoubleArray(fileName, inputImageInt, "png", ColorScheme.HEATMAP); } public static void writeDoubleArrayMultiChannel(String fileName, List<double[][]> inputImageIntList, String format, ChannelColorScheme scheme) { // Get size, assumes the same for all matrix in list int dim1 = inputImageIntList.get(0).length; int dim2 = (inputImageIntList.get(0))[0].length; BufferedImage image = new BufferedImage(dim1, dim2, BufferedImage.TYPE_INT_ARGB); List<Double> max = new ArrayList<Double>(); List<Double> min = new ArrayList<Double>(); final int channels = inputImageIntList.size(); for (int c = 0; c < channels; ++c) { double[][] inputImageInt = inputImageIntList.get(c); double tmax = Double.NEGATIVE_INFINITY; double tmin = Double.POSITIVE_INFINITY; for (int i = 0; i < dim1; ++i) { for (int j = 0; j < dim2; ++j) { double value = inputImageInt[i][j]; if (value > tmax) tmax = value; else if (value < tmin) tmin = value; } } max.add(tmax); min.add(tmin); } for (int i = 0; i < dim1; ++i) { for (int j = 0; j < dim2; ++j) { List<Double> input = new ArrayList<Double>(); for (int c = 0; c < channels; ++c) { double value = (inputImageIntList.get(c))[i][j]; input.add(value); } image.setRGB(i, j, scheme.getColor(input, min, max).getRGB()); } } try { javax.imageio.ImageIO.write(image, format, new File(fileName)); } catch (IOException e) { e.printStackTrace(); } } public static void writeDoubleArray(String fileName, double[][] inputImageInt, String format, ColorScheme scheme) { BufferedImage image = new BufferedImage(inputImageInt.length, inputImageInt[0].length, BufferedImage.TYPE_INT_ARGB); double max = Double.NEGATIVE_INFINITY; double min = Double.POSITIVE_INFINITY; for (int i = 0; i < inputImageInt.length; ++i) { for (int j = 0; j < inputImageInt[i].length; ++j) { double value = inputImageInt[i][j]; if (value > max) max = value; else if (value < min) min = value; } } for (int i = 0; i < inputImageInt.length; ++i) { for (int j = 0; j < inputImageInt[i].length; ++j) { double value = inputImageInt[i][j]; image.setRGB(i, j, scheme.getColor(value, min, max).getRGB()); } } try { javax.imageio.ImageIO.write(image, format, new File(fileName)); } catch (IOException e) { e.printStackTrace(); } } // public static double getRampValue(double input, double min, double max) { // double end = 1.0 / 6.0; // double start = 0.0; // return (input - min) / (max - min) * (end - start); // } // public static Color getColor(double input, double min, double max) { // float hue = (float) getRampValue(input, min, max); // float saturation = 0.85f; // float alpha = 1.0f; // return Color.getHSBColor(hue, saturation, alpha); // } // Create TIFF image of integer array public static void writeDoubleArray( DataOutputStream dataOut, double[][] inputImageInt) { final int rows = inputImageInt.length; final int columns = inputImageInt[0].length; if (rows < 0 || rows > MAXROWS || columns < 0 || columns > MAXCOLUMNS) throw new RuntimeException("Invalid # rows and # columns"); // offset to the end of data (gray values) in file int pos = 8 + rows * columns; try { /* * Write the header */ short i, j; i = (short) 'I'; j = (short) (i * 256 + i); fputword(dataOut, j); fputword(dataOut, (short) 42); fputlong(dataOut, pos); /* * Write the bitmap */ for (i = 0; i < rows; i++) for (j = 0; j < columns; j++) { int datum = (int) inputImageInt[i][j]; dataOut.writeByte((byte) datum); } /* * Write the tags */ fputword(dataOut, (short) 8); // # of tags writetiftag(dataOut, SubFileType, TIFFshort, 1, 1); writetiftag(dataOut, ImageWidth, TIFFshort, 1, columns); writetiftag(dataOut, ImageLength, TIFFshort, 1, rows); writetiftag(dataOut, BitsPerSample, TIFFshort, 1, 8); writetiftag(dataOut, Compression, TIFFshort, 1, 1); writetiftag(dataOut, PhotoMetricInterp, TIFFshort, 1, 1); // for gray values only writetiftag(dataOut, StripOffsets, TIFFlong, 1, 8); // beginning of image data writetiftag(dataOut, PlanarConfiguration, TIFFshort, 1, 1); fputlong(dataOut, 0); } catch (java.io.IOException read) { System.out.println("Error occured while writing output file."); } } /* * write one TIFF tag to the IFD */ static void writetiftag(DataOutputStream dataOut, short tag, short type, int length, int offset) { fputword(dataOut, tag); fputword(dataOut, type); fputlong(dataOut, length); fputlong(dataOut, offset); } /* writetiftag */ /* * function: fputword */ static void fputword(DataOutputStream dataOut, short n) { try { dataOut.writeByte((byte) n); dataOut.writeByte((byte) (n >> 8)); } catch (java.io.IOException read) { System.out.println("Error occured while writing output file."); } } /* fputword */ /* * function: fputlong */ static void fputlong(DataOutputStream dataOut, int n) { try { dataOut.writeByte((byte) n); dataOut.writeByte((byte) (n >> 8)); dataOut.writeByte((byte) (n >> 16)); dataOut.writeByte((byte) (n >> 24)); } catch (java.io.IOException read) { System.out.println("Error occured while writing output file."); } } /* fputlong */ public static final short GOOD_WRITE = 0; public static final short BAD_WRITE = 1; public static final short BAD_READ = 2; public static final short MEMORY_ERROR = 3; public static final short WRONG_BITS = 4; public static final short RGB_RED = 0; public static final short RGB_GREEN = 1; public static final short RGB_BLUE = 2; public static final short RGB_SIZE = 3; /* * TIFF object sizes */ public static final short TIFFbyte = 1; public static final short TIFFascii = 2; public static final short TIFFshort = 3; public static final short TIFFlong = 4; public static final short TIFFrational = 5; /* * TIFF tag names */ public static final short NewSubFile = 254; public static final short SubFileType = 255; public static final short ImageWidth = 256; public static final short ImageLength = 257; public static final short RowsPerStrip = 278; public static final short StripOffsets = 273; public static final short StripByteCounts = 279; public static final short SamplesPerPixel = 277; public static final short BitsPerSample = 258; public static final short Compression = 259; public static final short PlanarConfiguration = 284; public static final short Group3Options = 292; public static final short Group4Options = 293; public static final short FillOrder = 266; public static final short Threshholding = 263; public static final short CellWidth = 264; public static final short CellLength = 265; public static final short MinSampleValue = 280; public static final short MaxSampleValue = 281; public static final short PhotoMetricInterp = 262; public static final short GrayResponseUnit = 290; public static final short GrayResponseCurve = 291; public static final short ColorResponseUnit = 300; public static final short ColorResponseCurves = 301; public static final short XResolution = 282; public static final short YResolution = 283; public static final short ResolutionUnit = 296; public static final short Orientation = 274; public static final short DocumentName = 269; public static final short PageName = 285; public static final short XPosition = 286; public static final short YPosition = 287; public static final short PageNumber = 297; public static final short ImageDescription = 270; public static final short Make = 271; public static final short Model = 272; public static final short FreeOffsets = 288; public static final short FreeByteCounts = 289; public static final short ColorMap = 320; public static final short Artist = 315; public static final short DateTime = 306; public static final short HostComputer = 316; public static final short Software = 305; }
maxbiostat/beast-mcmc
src/dr/util/TIFFWriter.java
214,482
package edu.stanford.nlp.trees; /** * A class storing information about a constituent in a character-based tree. * This is used for evaluation of character-based Chinese parsing. * The constituent can be of type "word" (for words), "cat" (for phrases) or "tag" (for POS). * @author Galen Andrew */ public class WordCatConstituent extends LabeledConstituent { public String type; public static final String wordType = "word"; public static final String tagType = "tag"; public static final String catType = "cat"; // this one is for POS tag of correctly segmented words only public static final String goodWordTagType = "goodWordTag"; public WordCatConstituent(Tree subTree, Tree root, String type) { setStart(Trees.leftEdge(subTree, root)); setEnd(Trees.rightEdge(subTree, root)); setFromString(subTree.value()); this.type = type; } }
stanfordnlp/CoreNLP
src/edu/stanford/nlp/trees/WordCatConstituent.java
214,483
/* * Transform.java * * Copyright (c) 2002-2016 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.util; import dr.inference.model.Parameter; import dr.math.MathUtils; import java.util.ArrayList; import java.util.List; /** * interface for the one-to-one transform of a continuous variable. * A static member Transform.LOG provides an instance of LogTransform * * @author Andrew Rambaut * @author Guy Baele * @author Marc Suchard * @version $Id: Transform.java,v 1.5 2005/05/24 20:26:01 rambaut Exp $ */ public interface Transform { /** * @param value evaluation point * @return the transformed value */ double transform(double value); /** * overloaded transformation that takes and returns an array of doubles * @param values evaluation points * @param from start transformation at this index * @param to end transformation at this index * @return the transformed values */ double[] transform(double[] values, int from, int to); /** * @param value evaluation point * @return the inverse transformed value */ double inverse(double value); /** * overloaded transformation that takes and returns an array of doubles * @param values evaluation points * @param from start transformation at this index * @param to end transformation at this index * @return the transformed values */ double[] inverse(double[] values, int from, int to); /** * overloaded transformation that takes and returns an array of doubles * @param values evaluation points * @param from start transformation at this index * @param to end transformation at this index * @param sum fixed sum of values that needs to be enforced * @return the transformed values */ double[] inverse(double[] values, int from, int to, double sum); double updateGradientLogDensity(double gradient, double value); double[] updateGradientLogDensity(double[] gradient, double[] value, int from, int to); double gradientInverse(double value); double[] gradientInverse(double[] values, int from, int to); /** * @return the transform's name */ String getTransformName(); /** * @param value evaluation point * @return the log of the transform's jacobian */ double getLogJacobian(double value); /** * @param values evaluation points * @param from start calculation at this index * @param to end calculation at this index * @return the log of the transform's jacobian */ double getLogJacobian(double[] values, int from, int to); abstract class UnivariableTransform implements Transform { public abstract double transform(double value); public double[] transform(double[] values, int from, int to) { double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = transform(values[i]); } return result; } public abstract double inverse(double value); public double[] inverse(double[] values, int from, int to) { double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = inverse(values[i]); } return result; } public double[] inverse(double[] values, int from, int to, double sum) { throw new RuntimeException("Fixed sum cannot be enforced for a univariate transformation."); } public abstract double gradientInverse(double value); public double[] gradientInverse(double[] values, int from, int to) { double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = gradientInverse(values[i]); } return result; } public abstract double updateGradientLogDensity(double gradient, double value); public double[] updateGradientLogDensity(double[] gradient, double[] value , int from, int to) { double[] result = value.clone(); for (int i = from; i < to; ++i) { result[i] = updateGradientLogDensity(gradient[i], value[i]); } return result; } public abstract double getLogJacobian(double value); public double getLogJacobian(double[] values, int from, int to) { double sum = 0.0; for (int i = from; i < to; ++i) { sum += getLogJacobian(values[i]); } return sum; } } abstract class MultivariableTransform implements Transform { public double transform(double value) { throw new RuntimeException("Transformation not permitted for this type of parameter, exiting ..."); } public double inverse(double value) { throw new RuntimeException("Transformation not permitted for this type of parameter, exiting ..."); } public double updateGradientLogDensity(double gradient, double value) { throw new RuntimeException("Transformation not permitted for this type of parameter, exiting ..."); } public double gradientInverse(double value) { throw new RuntimeException("Transformation not permitted for this type of parameter, exiting ..."); } public double getLogJacobian(double value) { throw new RuntimeException("Transformation not permitted for this type of parameter, exiting ..."); } } abstract class MultivariableTransformWithParameter extends MultivariableTransform { abstract public Parameter getParameter(); } class LogTransform extends UnivariableTransform { public double transform(double value) { return Math.log(value); } public double inverse(double value) { return Math.exp(value); } public double gradientInverse(double value) { return Math.exp(value); } public double updateGradientLogDensity(double gradient, double value) { // value == gradient of inverse() // 1.0 == gradient of log Jacobian of inverse() return gradient * value + 1.0; } public String getTransformName() { return "log"; } public double getLogJacobian(double value) { return -Math.log(value); } } class LogConstrainedSumTransform extends MultivariableTransform { //private double fixedSum; public LogConstrainedSumTransform() { } /*public LogConstrainedSumTransform(double fixedSum) { this.fixedSum = fixedSum; } public double getConstrainedSum() { return this.fixedSum; }*/ public double[] transform(double[] values, int from, int to) { double[] transformedValues = new double[to - from + 1]; int counter = 0; for (int i = from; i <= to; i++) { transformedValues[counter] = Math.log(values[i]); counter++; } return transformedValues; } //inverse transformation assumes a sum of elements equal to the number of elements public double[] inverse(double[] values, int from, int to) { double sum = (double)(to - from + 1); double[] transformedValues = new double[to - from + 1]; int counter = 0; double newSum = 0.0; for (int i = from; i <= to; i++) { transformedValues[counter] = Math.exp(values[i]); newSum += transformedValues[counter]; counter++; } /*for (int i = 0; i < sum; i++) { transformedValues[i] = (transformedValues[i] / newSum) * sum; }*/ for (int i = 0; i < transformedValues.length; i++) { transformedValues[i] = (transformedValues[i] / newSum) * sum; } return transformedValues; } //inverse transformation assumes a given sum provided as an argument public double[] inverse(double[] values, int from, int to, double sum) { //double sum = (double)(to - from + 1); double[] transformedValues = new double[to - from + 1]; int counter = 0; double newSum = 0.0; for (int i = from; i <= to; i++) { transformedValues[counter] = Math.exp(values[i]); newSum += transformedValues[counter]; counter++; } /*for (int i = 0; i < sum; i++) { transformedValues[i] = (transformedValues[i] / newSum) * sum; }*/ for (int i = 0; i < transformedValues.length; i++) { transformedValues[i] = (transformedValues[i] / newSum) * sum; } return transformedValues; } public String getTransformName() { return "logConstrainedSum"; } public double[] updateGradientLogDensity(double[] gradient, double[] value, int from, int to) { throw new RuntimeException("Not yet implemented"); } public double[] gradientInverse(double[] values, int from, int to) { throw new RuntimeException("Not yet implemented"); } public double getLogJacobian(double[] values, int from, int to) { double sum = 0.0; for (int i = from; i <= to; i++) { sum -= Math.log(values[i]); } return sum; } public static void main(String[] args) { //specify starting values double[] startValues = {1.5, 0.6, 0.9}; System.err.print("Starting values: "); double startSum = 0.0; for (int i = 0; i < startValues.length; i++) { System.err.print(startValues[i] + " "); startSum += startValues[i]; } System.err.println("\nSum = " + startSum); //perform transformation double[] transformedValues = LOG_CONSTRAINED_SUM.transform(startValues, 0, startValues.length-1); System.err.print("Transformed values: "); for (int i = 0; i < transformedValues.length; i++) { System.err.print(transformedValues[i] + " "); } System.err.println(); //add draw for normal distribution to transformed elements for (int i = 0; i < transformedValues.length; i++) { transformedValues[i] += 0.20 * MathUtils.nextDouble(); } //perform inverse transformation transformedValues = LOG_CONSTRAINED_SUM.inverse(transformedValues, 0, transformedValues.length-1); System.err.print("New values: "); double endSum = 0.0; for (int i = 0; i < transformedValues.length; i++) { System.err.print(transformedValues[i] + " "); endSum += transformedValues[i]; } System.err.println("\nSum = " + endSum); if (startSum != endSum) { System.err.println("Starting and ending constraints differ!"); } } } class LogitTransform extends UnivariableTransform { public LogitTransform() { range = 1.0; lower = 0.0; } public double transform(double value) { return Math.log(value / (1.0 - value)); } public double inverse(double value) { return 1.0 / (1.0 + Math.exp(-value)); } public double gradientInverse(double value) { throw new RuntimeException("Not yet implemented"); } public double updateGradientLogDensity(double gradient, double value) { throw new RuntimeException("Not yet implemented"); } public String getTransformName() { return "logit"; } public double getLogJacobian(double value) { return -Math.log(1.0 - value) - Math.log(value); } private final double range; private final double lower; } class FisherZTransform extends UnivariableTransform { public double transform(double value) { return 0.5 * (Math.log(1.0 + value) - Math.log(1.0 - value)); } public double inverse(double value) { return (Math.exp(2 * value) - 1) / (Math.exp(2 * value) + 1); } public double gradientInverse(double value) { throw new RuntimeException("Not yet implemented"); } public double updateGradientLogDensity(double gradient, double value) { throw new RuntimeException("Not yet implemented"); } public String getTransformName() { return "fisherz"; } public double getLogJacobian(double value) { return -Math.log(1 - value) - Math.log(1 + value); } } class NegateTransform extends UnivariableTransform { public double transform(double value) { return -value; } public double inverse(double value) { return -value; } public double updateGradientLogDensity(double gradient, double value) { // -1 == gradient of inverse() // 0.0 == gradient of log Jacobian of inverse() return -gradient; } public double gradientInverse(double value) { return -1.0; } public String getTransformName() { return "negate"; } public double getLogJacobian(double value) { return 0.0; } } class PowerTransform extends UnivariableTransform{ private double power; PowerTransform(){ this.power = 2; } PowerTransform(double power){ this.power = power; } @Override public String getTransformName() { return "Power Transform"; } @Override public double transform(double value) { return Math.pow(value, power); } @Override public double inverse(double value) { return Math.pow(value, 1 / power); } @Override public double gradientInverse(double value) { throw new RuntimeException("not implemented yet"); // return 0; } @Override public double updateGradientLogDensity(double gradient, double value) { throw new RuntimeException("not implemented yet"); } @Override public double getLogJacobian(double value) { throw new RuntimeException("not implemented yet"); } } class NoTransform extends UnivariableTransform { public double transform(double value) { return value; } public double inverse(double value) { return value; } public double updateGradientLogDensity(double gradient, double value) { return gradient; } public double gradientInverse(double value) { return 1.0; } public String getTransformName() { return "none"; } public double getLogJacobian(double value) { return 0.0; } } class Compose extends UnivariableTransform { public Compose(UnivariableTransform outer, UnivariableTransform inner) { this.outer = outer; this.inner = inner; } @Override public String getTransformName() { return "compose." + outer.getTransformName() + "." + inner.getTransformName(); } @Override public double transform(double value) { final double outerValue = inner.transform(value); final double outerTransform = outer.transform(outerValue); // System.err.println(value + " " + outerValue + " " + outerTransform); // System.exit(-1); return outerTransform; // return outer.transform(inner.transform(value)); } @Override public double inverse(double value) { return inner.inverse(outer.inverse(value)); } @Override public double gradientInverse(double value) { return inner.gradientInverse(value) * outer.gradientInverse(inner.transform(value)); } @Override public double updateGradientLogDensity(double gradient, double value) { // final double innerGradient = inner.updateGradientLogDensity(gradient, value); // final double outerValue = inner.transform(value); // final double outerGradient = outer.updateGradientLogDensity(innerGradient, outerValue); // return outerGradient; return outer.updateGradientLogDensity(inner.updateGradientLogDensity(gradient, value), inner.transform(value)); } @Override public double getLogJacobian(double value) { return inner.getLogJacobian(value) + outer.getLogJacobian(inner.transform(value)); } private final UnivariableTransform outer; private final UnivariableTransform inner; } class Inverse extends UnivariableTransform { public Inverse(UnivariableTransform inner) { this.inner = inner; } @Override public String getTransformName() { return "inverse." + inner.getTransformName(); } @Override public double transform(double value) { return inner.inverse(value); // Purposefully switched } @Override public double updateGradientLogDensity(double gradient, double value) { throw new RuntimeException("Not yet implemented"); } @Override public double inverse(double value) { return inner.transform(value); // Purposefully switched } @Override public double gradientInverse(double value) { throw new RuntimeException("Not yet implemented"); } @Override public double getLogJacobian(double value) { return -inner.getLogJacobian(value); } private final UnivariableTransform inner; } class Array extends MultivariableTransformWithParameter { private final List<Transform> array; private final Parameter parameter; public Array(List<Transform> array, Parameter parameter) { this.parameter = parameter; this.array = array; // if (parameter.getDimension() != array.size()) { // throw new IllegalArgumentException("Dimension mismatch"); // } } public Parameter getParameter() { return parameter; } @Override public double[] transform(double[] values, int from, int to) { final double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = array.get(i).transform(values[i]); } return result; } @Override public double[] inverse(double[] values, int from, int to) { final double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = array.get(i).inverse(values[i]); } return result; } @Override public double[] inverse(double[] values, int from, int to, double sum) { throw new RuntimeException("Not yet implemented."); } @Override public double[] gradientInverse(double[] values, int from, int to) { final double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = array.get(i).gradientInverse(values[i]); } return result; } @Override public double[] updateGradientLogDensity(double[] gradient, double[] values, int from, int to) { final double[] result = values.clone(); for (int i = from; i < to; ++i) { result[i] = array.get(i).updateGradientLogDensity(gradient[i], values[i]); } return result; } @Override public String getTransformName() { return "array"; } @Override public double getLogJacobian(double[] values, int from, int to) { double sum = 0.0; for (int i = from; i < to; ++i) { sum += array.get(i).getLogJacobian(values[i]); } return sum; } } class Collection extends MultivariableTransformWithParameter { private final List<ParsedTransform> segments; private final Parameter parameter; public Collection(List<ParsedTransform> segments, Parameter parameter) { this.parameter = parameter; this.segments = ensureContiguous(segments); } public Parameter getParameter() { return parameter; } private List<ParsedTransform> ensureContiguous(List<ParsedTransform> segments) { final List<ParsedTransform> contiguous = new ArrayList<ParsedTransform>(); int current = 0; for (ParsedTransform segment : segments) { if (current < segment.start) { contiguous.add(new ParsedTransform(NONE, current, segment.start)); } contiguous.add(segment); current = segment.end; } if (current < parameter.getDimension()) { contiguous.add(new ParsedTransform(NONE, current, parameter.getDimension())); } // System.err.println("Segments:"); // for (ParsedTransform transform : contiguous) { // System.err.println(transform.transform.getTransformName() + " " + transform.start + " " + transform.end); // } // System.exit(-1); return contiguous; } @Override public double[] transform(double[] values, int from, int to) { final double[] result = values.clone(); for (ParsedTransform segment : segments) { if (from < segment.end && to >= segment.start) { final int begin = Math.max(segment.start, from); final int end = Math.min(segment.end, to); for (int i = begin; i < end; ++i) { result[i] = segment.transform.transform(values[i]); } } } return result; } @Override public double[] inverse(double[] values, int from, int to) { final double[] result = values.clone(); for (ParsedTransform segment : segments) { if (from < segment.end && to >= segment.start) { final int begin = Math.max(segment.start, from); final int end = Math.min(segment.end, to); for (int i = begin; i < end; ++i) { result[i] = segment.transform.inverse(values[i]); } } } return result; } @Override public double[] inverse(double[] values, int from, int to, double sum) { throw new RuntimeException("Not yet implemented."); } @Override public double[] gradientInverse(double[] values, int from, int to) { final double[] result = values.clone(); for (ParsedTransform segment : segments) { if (from < segment.end && to >= segment.start) { final int begin = Math.max(segment.start, from); final int end = Math.min(segment.end, to); for (int i = begin; i < end; ++i) { result[i] = segment.transform.gradientInverse(values[i]); } } } return result; } @Override public double[] updateGradientLogDensity(double[] gradient, double[] values, int from, int to) { final double[] result = values.clone(); for (ParsedTransform segment : segments) { if (from < segment.end && to >= segment.start) { final int begin = Math.max(segment.start, from); final int end = Math.min(segment.end, to); for (int i = begin; i < end; ++i) { result[i] = segment.transform.updateGradientLogDensity(gradient[i], values[i]); } } } return result; } @Override public String getTransformName() { return "collection"; } @Override public double getLogJacobian(double[] values, int from, int to) { double sum = 0.0; for (ParsedTransform segment : segments) { if (from < segment.end && to >= segment.start) { final int begin = Math.max(segment.start, from); final int end = Math.min(segment.end, to); for (int i = begin; i < end; ++i) { sum += segment.transform.getLogJacobian(values[i]); } } } // System.err.println("Log: " + sum + " " + segments.size()); return sum; } // class Segment { // // public Segment(Transform transform, int start, int end) { // this.transform = transform; // this.start = start; // this.end = end; // } // public Transform transform; // public int start; // public int end; // } } class ParsedTransform { public Transform transform; public int start; // zero-indexed public int end; // zero-indexed, i.e, i = start; i < end; ++i public int every = 1; public double fixedSum = 0.0; public List<Parameter> parameters = null; public ParsedTransform() { } public ParsedTransform(Transform transform, int start, int end) { this.transform = transform; this.start = start; this.end = end; } public ParsedTransform clone() { ParsedTransform clone = new ParsedTransform(); clone.transform = transform; clone.start = start; clone.end = end; clone.every = every; clone.fixedSum = fixedSum; clone.parameters = parameters; return clone; } public boolean equivalent(ParsedTransform other) { if (start == other.start && end == other.end && every == other.every && parameters == other.parameters) { return true; } else { return false; } } } class Util { public static Transform[] getListOfNoTransforms(int size) { Transform[] transforms = new Transform[size]; for (int i = 0; i < size; ++i) { transforms[i] = NONE; } return transforms; } } NoTransform NONE = new NoTransform(); LogTransform LOG = new LogTransform(); NegateTransform NEGATE = new NegateTransform(); Compose LOG_NEGATE = new Compose(new LogTransform(), new NegateTransform()); LogConstrainedSumTransform LOG_CONSTRAINED_SUM = new LogConstrainedSumTransform(); LogitTransform LOGIT = new LogitTransform(); FisherZTransform FISHER_Z = new FisherZTransform(); enum Type { NONE("none", new NoTransform()), LOG("log", new LogTransform()), NEGATE("negate", new NegateTransform()), LOG_NEGATE("log-negate", new Compose(new LogTransform(), new NegateTransform())), LOG_CONSTRAINED_SUM("logConstrainedSum", new LogConstrainedSumTransform()), LOGIT("logit", new LogitTransform()), FISHER_Z("fisherZ",new FisherZTransform()), POWER("power", new PowerTransform()); Type(String name, Transform transform) { this.name = name; this.transform = transform; } public Transform getTransform() { return transform; } public String getName() { return name; } private Transform transform; private String name; } // String TRANSFORM = "transform"; // String TYPE = "type"; // String START = "start"; // String END = "end"; // String EVERY = "every"; // String INVERSE = "inverse"; }
maxbiostat/beast-mcmc
src/dr/util/Transform.java
214,484
/* * Copyright (c) 2010-2011, The MiCode Open Source Community (www.micode.net) * * This file is part of FileExplorer. * * FileExplorer is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * FileExplorer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SwiFTP. If not, see <http://www.gnu.org/licenses/>. */ package net.micode.fileexplorer; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; import java.util.Map; import java.util.Properties; /** * Utilities for dealing with MIME types. * Used to implement java.net.URLConnection and android.webkit.MimeTypeMap. */ public final class MimeUtils { private static final Map<String, String> mimeTypeToExtensionMap = new HashMap<String, String>(); private static final Map<String, String> extensionToMimeTypeMap = new HashMap<String, String>(); static { // The following table is based on /etc/mime.types data minus // chemical/* MIME types and MIME types that don't map to any // file extensions. We also exclude top-level domain names to // deal with cases like: // // mail.google.com/a/google.com // // and "active" MIME types (due to potential security issues). add("application/andrew-inset", "ez"); add("application/dsptype", "tsp"); add("application/futuresplash", "spl"); add("application/hta", "hta"); add("application/mac-binhex40", "hqx"); add("application/mac-compactpro", "cpt"); add("application/mathematica", "nb"); add("application/msaccess", "mdb"); add("application/oda", "oda"); add("application/ogg", "ogg"); add("application/pdf", "pdf"); add("application/pgp-keys", "key"); add("application/pgp-signature", "pgp"); add("application/pics-rules", "prf"); add("application/rar", "rar"); add("application/rdf+xml", "rdf"); add("application/rss+xml", "rss"); add("application/zip", "zip"); add("application/vnd.android.package-archive", "apk"); add("application/vnd.cinderella", "cdy"); add("application/vnd.ms-pki.stl", "stl"); add("application/vnd.oasis.opendocument.database", "odb"); add("application/vnd.oasis.opendocument.formula", "odf"); add("application/vnd.oasis.opendocument.graphics", "odg"); add("application/vnd.oasis.opendocument.graphics-template", "otg"); add("application/vnd.oasis.opendocument.image", "odi"); add("application/vnd.oasis.opendocument.spreadsheet", "ods"); add("application/vnd.oasis.opendocument.spreadsheet-template", "ots"); add("application/vnd.oasis.opendocument.text", "odt"); add("application/vnd.oasis.opendocument.text-master", "odm"); add("application/vnd.oasis.opendocument.text-template", "ott"); add("application/vnd.oasis.opendocument.text-web", "oth"); add("application/vnd.google-earth.kml+xml", "kml"); add("application/vnd.google-earth.kmz", "kmz"); add("application/msword", "doc"); add("application/msword", "dot"); add("application/vnd.openxmlformats-officedocument.wordprocessingml.document", "docx"); add("application/vnd.openxmlformats-officedocument.wordprocessingml.template", "dotx"); add("application/vnd.ms-excel", "xls"); add("application/vnd.ms-excel", "xlt"); add("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "xlsx"); add("application/vnd.openxmlformats-officedocument.spreadsheetml.template", "xltx"); add("application/vnd.ms-powerpoint", "ppt"); add("application/vnd.ms-powerpoint", "pot"); add("application/vnd.ms-powerpoint", "pps"); add("application/vnd.openxmlformats-officedocument.presentationml.presentation", "pptx"); add("application/vnd.openxmlformats-officedocument.presentationml.template", "potx"); add("application/vnd.openxmlformats-officedocument.presentationml.slideshow", "ppsx"); add("application/vnd.rim.cod", "cod"); add("application/vnd.smaf", "mmf"); add("application/vnd.stardivision.calc", "sdc"); add("application/vnd.stardivision.draw", "sda"); add("application/vnd.stardivision.impress", "sdd"); add("application/vnd.stardivision.impress", "sdp"); add("application/vnd.stardivision.math", "smf"); add("application/vnd.stardivision.writer", "sdw"); add("application/vnd.stardivision.writer", "vor"); add("application/vnd.stardivision.writer-global", "sgl"); add("application/vnd.sun.xml.calc", "sxc"); add("application/vnd.sun.xml.calc.template", "stc"); add("application/vnd.sun.xml.draw", "sxd"); add("application/vnd.sun.xml.draw.template", "std"); add("application/vnd.sun.xml.impress", "sxi"); add("application/vnd.sun.xml.impress.template", "sti"); add("application/vnd.sun.xml.math", "sxm"); add("application/vnd.sun.xml.writer", "sxw"); add("application/vnd.sun.xml.writer.global", "sxg"); add("application/vnd.sun.xml.writer.template", "stw"); add("application/vnd.visio", "vsd"); add("application/x-abiword", "abw"); add("application/x-apple-diskimage", "dmg"); add("application/x-bcpio", "bcpio"); add("application/x-bittorrent", "torrent"); add("application/x-cdf", "cdf"); add("application/x-cdlink", "vcd"); add("application/x-chess-pgn", "pgn"); add("application/x-cpio", "cpio"); add("application/x-debian-package", "deb"); add("application/x-debian-package", "udeb"); add("application/x-director", "dcr"); add("application/x-director", "dir"); add("application/x-director", "dxr"); add("application/x-dms", "dms"); add("application/x-doom", "wad"); add("application/x-dvi", "dvi"); add("application/x-flac", "flac"); add("application/x-font", "pfa"); add("application/x-font", "pfb"); add("application/x-font", "gsf"); add("application/x-font", "pcf"); add("application/x-font", "pcf.Z"); add("application/x-freemind", "mm"); add("application/x-futuresplash", "spl"); add("application/x-gnumeric", "gnumeric"); add("application/x-go-sgf", "sgf"); add("application/x-graphing-calculator", "gcf"); add("application/x-gtar", "gtar"); add("application/x-gtar", "tgz"); add("application/x-gtar", "taz"); add("application/x-hdf", "hdf"); add("application/x-ica", "ica"); add("application/x-internet-signup", "ins"); add("application/x-internet-signup", "isp"); add("application/x-iphone", "iii"); add("application/x-iso9660-image", "iso"); add("application/x-jmol", "jmz"); add("application/x-kchart", "chrt"); add("application/x-killustrator", "kil"); add("application/x-koan", "skp"); add("application/x-koan", "skd"); add("application/x-koan", "skt"); add("application/x-koan", "skm"); add("application/x-kpresenter", "kpr"); add("application/x-kpresenter", "kpt"); add("application/x-kspread", "ksp"); add("application/x-kword", "kwd"); add("application/x-kword", "kwt"); add("application/x-latex", "latex"); add("application/x-lha", "lha"); add("application/x-lzh", "lzh"); add("application/x-lzx", "lzx"); add("application/x-maker", "frm"); add("application/x-maker", "maker"); add("application/x-maker", "frame"); add("application/x-maker", "fb"); add("application/x-maker", "book"); add("application/x-maker", "fbdoc"); add("application/x-mif", "mif"); add("application/x-ms-wmd", "wmd"); add("application/x-ms-wmz", "wmz"); add("application/x-msi", "msi"); add("application/x-ns-proxy-autoconfig", "pac"); add("application/x-nwc", "nwc"); add("application/x-object", "o"); add("application/x-oz-application", "oza"); add("application/x-pkcs12", "p12"); add("application/x-pkcs7-certreqresp", "p7r"); add("application/x-pkcs7-crl", "crl"); add("application/x-quicktimeplayer", "qtl"); add("application/x-shar", "shar"); add("application/x-shockwave-flash", "swf"); add("application/x-stuffit", "sit"); add("application/x-sv4cpio", "sv4cpio"); add("application/x-sv4crc", "sv4crc"); add("application/x-tar", "tar"); add("application/x-texinfo", "texinfo"); add("application/x-texinfo", "texi"); add("application/x-troff", "t"); add("application/x-troff", "roff"); add("application/x-troff-man", "man"); add("application/x-ustar", "ustar"); add("application/x-wais-source", "src"); add("application/x-wingz", "wz"); add("application/x-webarchive", "webarchive"); add("application/x-webarchive-xml", "webarchivexml"); add("application/x-x509-ca-cert", "crt"); add("application/x-x509-user-cert", "crt"); add("application/x-xcf", "xcf"); add("application/x-xfig", "fig"); add("application/xhtml+xml", "xhtml"); add("audio/3gpp", "3gpp"); add("audio/amr", "amr"); add("audio/basic", "snd"); add("audio/midi", "mid"); add("audio/midi", "midi"); add("audio/midi", "kar"); add("audio/midi", "xmf"); add("audio/mobile-xmf", "mxmf"); add("audio/mpeg", "mpga"); add("audio/mpeg", "mpega"); add("audio/mpeg", "mp2"); add("audio/mpeg", "mp3"); add("audio/mpeg", "m4a"); add("audio/mpegurl", "m3u"); add("audio/prs.sid", "sid"); add("audio/x-aiff", "aif"); add("audio/x-aiff", "aiff"); add("audio/x-aiff", "aifc"); add("audio/x-gsm", "gsm"); add("audio/x-mpegurl", "m3u"); add("audio/x-ms-wma", "wma"); add("audio/x-ms-wax", "wax"); add("audio/x-pn-realaudio", "ra"); add("audio/x-pn-realaudio", "rm"); add("audio/x-pn-realaudio", "ram"); add("audio/x-realaudio", "ra"); add("audio/x-scpls", "pls"); add("audio/x-sd2", "sd2"); add("audio/x-wav", "wav"); add("image/bmp", "bmp"); add("audio/x-qcp", "qcp"); add("image/gif", "gif"); add("image/ico", "cur"); add("image/ico", "ico"); add("image/ief", "ief"); add("image/jpeg", "jpeg"); add("image/jpeg", "jpg"); add("image/jpeg", "jpe"); add("image/pcx", "pcx"); add("image/png", "png"); add("image/svg+xml", "svg"); add("image/svg+xml", "svgz"); add("image/tiff", "tiff"); add("image/tiff", "tif"); add("image/vnd.djvu", "djvu"); add("image/vnd.djvu", "djv"); add("image/vnd.wap.wbmp", "wbmp"); add("image/x-cmu-raster", "ras"); add("image/x-coreldraw", "cdr"); add("image/x-coreldrawpattern", "pat"); add("image/x-coreldrawtemplate", "cdt"); add("image/x-corelphotopaint", "cpt"); add("image/x-icon", "ico"); add("image/x-jg", "art"); add("image/x-jng", "jng"); add("image/x-ms-bmp", "bmp"); add("image/x-photoshop", "psd"); add("image/x-portable-anymap", "pnm"); add("image/x-portable-bitmap", "pbm"); add("image/x-portable-graymap", "pgm"); add("image/x-portable-pixmap", "ppm"); add("image/x-rgb", "rgb"); add("image/x-xbitmap", "xbm"); add("image/x-xpixmap", "xpm"); add("image/x-xwindowdump", "xwd"); add("model/iges", "igs"); add("model/iges", "iges"); add("model/mesh", "msh"); add("model/mesh", "mesh"); add("model/mesh", "silo"); add("text/calendar", "ics"); add("text/calendar", "icz"); add("text/comma-separated-values", "csv"); add("text/css", "css"); add("text/html", "htm"); add("text/html", "html"); add("text/h323", "323"); add("text/iuls", "uls"); add("text/mathml", "mml"); // add ".txt" first so it will be the default for ExtensionFromMimeType add("text/plain", "txt"); add("text/plain", "asc"); add("text/plain", "text"); add("text/plain", "diff"); add("text/plain", "po"); // reserve "pot" for vnd.ms-powerpoint add("text/richtext", "rtx"); add("text/rtf", "rtf"); add("text/texmacs", "ts"); add("text/text", "phps"); add("text/tab-separated-values", "tsv"); add("text/xml", "xml"); add("text/x-bibtex", "bib"); add("text/x-boo", "boo"); add("text/x-c++hdr", "h++"); add("text/x-c++hdr", "hpp"); add("text/x-c++hdr", "hxx"); add("text/x-c++hdr", "hh"); add("text/x-c++src", "c++"); add("text/x-c++src", "cpp"); add("text/x-c++src", "cxx"); add("text/x-chdr", "h"); add("text/x-component", "htc"); add("text/x-csh", "csh"); add("text/x-csrc", "c"); add("text/x-dsrc", "d"); add("text/x-haskell", "hs"); add("text/x-java", "java"); add("text/x-literate-haskell", "lhs"); add("text/x-moc", "moc"); add("text/x-pascal", "p"); add("text/x-pascal", "pas"); add("text/x-pcs-gcd", "gcd"); add("text/x-setext", "etx"); add("text/x-tcl", "tcl"); add("text/x-tex", "tex"); add("text/x-tex", "ltx"); add("text/x-tex", "sty"); add("text/x-tex", "cls"); add("text/x-vcalendar", "vcs"); add("text/x-vcard", "vcf"); add("video/3gpp", "3gpp"); add("video/3gpp", "3gp"); add("video/3gpp", "3g2"); add("video/dl", "dl"); add("video/dv", "dif"); add("video/dv", "dv"); add("video/fli", "fli"); add("video/m4v", "m4v"); add("video/mpeg", "mpeg"); add("video/mpeg", "mpg"); add("video/mpeg", "mpe"); add("video/mp4", "mp4"); add("video/mpeg", "VOB"); add("video/quicktime", "qt"); add("video/quicktime", "mov"); add("video/vnd.mpegurl", "mxu"); add("video/webm", "webm"); add("video/x-la-asf", "lsf"); add("video/x-la-asf", "lsx"); add("video/x-mng", "mng"); add("video/x-ms-asf", "asf"); add("video/x-ms-asf", "asx"); add("video/x-ms-wm", "wm"); add("video/x-ms-wmv", "wmv"); add("video/x-ms-wmx", "wmx"); add("video/x-ms-wvx", "wvx"); add("video/x-msvideo", "avi"); add("video/x-sgi-movie", "movie"); add("x-conference/x-cooltalk", "ice"); add("x-epoc/x-sisx-app", "sisx"); applyOverrides(); } private static void add(String mimeType, String extension) { // // if we have an existing x --> y mapping, we do not want to // override it with another mapping x --> ? // this is mostly because of the way the mime-type map below // is constructed (if a mime type maps to several extensions // the first extension is considered the most popular and is // added first; we do not want to overwrite it later). // if (!mimeTypeToExtensionMap.containsKey(mimeType)) { mimeTypeToExtensionMap.put(mimeType, extension); } extensionToMimeTypeMap.put(extension, mimeType); } private static InputStream getContentTypesPropertiesStream() { // User override? String userTable = System.getProperty("content.types.user.table"); if (userTable != null) { File f = new File(userTable); if (f.exists()) { try { return new FileInputStream(f); } catch (IOException ignored) { } } } // Standard location? File f = new File(System.getProperty("java.home"), "lib" + File.separator + "content-types.properties"); if (f.exists()) { try { return new FileInputStream(f); } catch (IOException ignored) { } } return null; } /** * This isn't what the RI does. The RI doesn't have hard-coded defaults, so supplying your * own "content.types.user.table" means you don't get any of the built-ins, and the built-ins * come from "$JAVA_HOME/lib/content-types.properties". */ private static void applyOverrides() { // Get the appropriate InputStream to read overrides from, if any. InputStream stream = getContentTypesPropertiesStream(); if (stream == null) { return; } try { try { // Read the properties file... Properties overrides = new Properties(); overrides.load(stream); // And translate its mapping to ours... for (Map.Entry<Object, Object> entry : overrides.entrySet()) { String extension = (String) entry.getKey(); String mimeType = (String) entry.getValue(); add(mimeType, extension); } } finally { stream.close(); } } catch (IOException ignored) { } } private MimeUtils() { } /** * Returns true if the given MIME type has an entry in the map. * @param mimeType A MIME type (i.e. text/plain) * @return True iff there is a mimeType entry in the map. */ public static boolean hasMimeType(String mimeType) { if (mimeType == null || mimeType.isEmpty()) { return false; } return mimeTypeToExtensionMap.containsKey(mimeType); } /** * Returns the MIME type for the given extension. * @param extension A file extension without the leading '.' * @return The MIME type for the given extension or null iff there is none. */ public static String guessMimeTypeFromExtension(String extension) { if (extension == null || extension.isEmpty()) { return null; } return extensionToMimeTypeMap.get(extension); } /** * Returns true if the given extension has a registered MIME type. * @param extension A file extension without the leading '.' * @return True iff there is an extension entry in the map. */ public static boolean hasExtension(String extension) { if (extension == null || extension.isEmpty()) { return false; } return extensionToMimeTypeMap.containsKey(extension); } /** * Returns the registered extension for the given MIME type. Note that some * MIME types map to multiple extensions. This call will return the most * common extension for the given MIME type. * @param mimeType A MIME type (i.e. text/plain) * @return The extension for the given MIME type or null iff there is none. */ public static String guessExtensionFromMimeType(String mimeType) { if (mimeType == null || mimeType.isEmpty()) { return null; } return mimeTypeToExtensionMap.get(mimeType); } }
mitwo-dev/FileExplorer
src/net/micode/fileexplorer/MimeUtils.java
214,485
/* * Missing License Header, Copyright 2016 (C) Andrew Maitland <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ package pcgen.core.kit; import java.util.ArrayList; import java.util.Collections; import java.util.List; import pcgen.base.formula.Formula; import pcgen.core.Kit; import pcgen.core.PlayerCharacter; public class KitTable extends BaseKit { private String tableName; private final List<TableEntry> list = new ArrayList<>(); public String getTableName() { return tableName; } public void setTableName(String tableName) { this.tableName = tableName; } public void addGear(KitGear optionInfo, Formula min, Formula max) { list.add(new TableEntry(optionInfo, min, max)); } public static class TableEntry { public final KitGear gear; public final Formula lowRange; public final Formula highRange; public TableEntry(KitGear optionInfo, Formula min, Formula max) { gear = optionInfo; lowRange = min; highRange = max; } /** * True if value falls within a range * @param pc the PC this Kit is being applied to * @param inValue the value to test. * @return True if value falls within a range */ public boolean isIn(PlayerCharacter pc, int inValue) { int lv = lowRange.resolve(pc, "").intValue(); int hv = highRange.resolve(pc, "").intValue(); return inValue >= lv && inValue <= hv; } } public List<TableEntry> getList() { return Collections.unmodifiableList(list); } @Override public void apply(PlayerCharacter aPC) { throw new UnsupportedOperationException(); } @Override public String getObjectName() { return "Table"; } @Override public boolean testApply(Kit aKit, PlayerCharacter aPC, List<String> warnings) { throw new UnsupportedOperationException(); } public KitGear getEntry(PlayerCharacter pc, int value) { for (TableEntry entry : list) { if (entry.isIn(pc, value)) { return entry.gear; } } return null; } }
irobin591/pcgen
code/src/java/pcgen/core/kit/KitTable.java
214,486
package edu.drexel.psal.anonymouth.engine; import java.util.ArrayList; import java.util.List; import edu.drexel.psal.anonymouth.gooie.ThePresident; import edu.drexel.psal.anonymouth.helpers.ErrorHandler; import edu.drexel.psal.jstylo.generics.CumulativeFeatureDriver; import edu.drexel.psal.jstylo.generics.Logger; import edu.drexel.psal.jstylo.generics.WekaInstancesBuilder; import com.jgaap.generics.*; import weka.core.Instances; /** * Constructs instances using Weka and JStylo. Features are extracted. * @author Andrew W.E. McDonald * @author Marc Barrowclift * */ public class InstanceConstructor { private final String NAME = "( "+this.getClass().getSimpleName()+" ) - "; /** * private variable to hold the attributes of the training documents. */ private ArrayList<String> setAttributes; /** * public method to retrieve the attributes of the training documents. * @return * String array containing attributes of training documents, each index of array holds one attribute, with indices corresponding to indices of String[] returned by @getTrainingInstances . */ public ArrayList<String> getAttributeSet(){ return setAttributes; } /** * private variable to hold the instances of the training documents. */ private Double[][] trainingInstances; /** * public method to retrieve the instances of the training documents. * @return * double array containing instances of training documents. */ public Double[][] getTrainingInstances(){ return trainingInstances; } /** * private variable to hold the instances of the testing document(s). */ private Double[][] testingInstances; /** * public method to retrieve the instances of the training documents. * @return * double array containing instances of training documents. */ public Double[][] getTestingInstances(){ return testingInstances; } private CumulativeFeatureDriver theseFeaturesCfd; Instances trainingDat,testingDat; private boolean printStuff; WekaInstancesBuilder wid; /** * Constructor for InstanceConstructor, accepts boolean variable that tells WekaInstancesBuilder whether to expect sparse data or not. (if unsure, set false) * @param isSparse - boolean, true if expecting sparse data, false otherwise or if unsure. * @param cfd - cumulative feature driver. Contains all features that will be extracted from the documents */ public InstanceConstructor(boolean isSparse, CumulativeFeatureDriver cfd, boolean printStuff){ wid = new WekaInstancesBuilder(isSparse); wid.setNumCalcThreads(ThePresident.num_Tagging_Threads); theseFeaturesCfd = cfd; this.printStuff =printStuff; Logger.logln(NAME+"InstanceConstuctor constructed"); } /** * method runInstanceBuilder uses an instance of WekaInstancesBuilder to extract the features of both the input * trainDocs and testDoc(s). * @param trainDocs list of Document objects to train the Weka classifier on * @param testDocs list (may be a single object list) of Document object(s) to classify. * @return * true if no errors */ public boolean runInstanceBuilder(List<Document> trainDocs,List<Document> testDocs){ Logger.logln(NAME+"Running JStylo WekaInstancesBuilder from runInstanceBuilder in InstanceConstructor"); int eye = 0; if (printStuff == true) { char[] cRay = testDocs.get(0).getProcessedText(); System.out.println("PRE-INSTANCE BUILDING:\n"); for(eye = 0;eye<cRay.length;eye++) System.out.print(cRay[eye]); System.out.println(); } try { wid.prepareTrainingSet(trainDocs, theseFeaturesCfd); wid.prepareTestSet(testDocs); } catch(Exception e) { ErrorHandler.StanfordPOSError(); } // Initialize two new instances to hold training and testing instances (attributes and data) trainingDat=wid.getTrainingSet(); testingDat=wid.getTestSet(); setAttributes=getAttributes(trainingDat); trainingInstances=getInstances(trainingDat); testingInstances=getInstances(testingDat); if(printStuff == true){ char[] cRay = testDocs.get(0).getProcessedText(); System.out.println("POST-INSTANCE BUILDING:\n"); for(eye = 0;eye<cRay.length;eye++) System.out.print(cRay[eye]); System.out.println(); System.out.println(testingDat.toString()); System.exit(7); } return true; } public boolean onlyBuildTrain(List<Document> trainDocs, boolean withAuthor) { if (withAuthor) Logger.logln(NAME+"Only building train set"); else Logger.logln(NAME+"Building train set with author"); try { wid.prepareTrainingSet(trainDocs, theseFeaturesCfd); } catch(Exception e) { ErrorHandler.StanfordPOSError(); } trainingDat=wid.getTrainingSet(); setAttributes=getAttributes(trainingDat); trainingInstances=getInstances(trainingDat); return true; } /** * returns full set of training data in arff style formatting (contains list of attributes and data) * @return * Instances object containing training data */ public Instances getFullTrainData(){ return trainingDat; } /** * returns full set of test data in arff style formatting (contains list of attributes and data) * @return * Instances object containing testing data */ public Instances getFullTestData(){ return testingDat; } /** * Accepts Weka Instances object and returns the stripped attributes. Stripping performed by 'AttributeStripper' * @param currentInstance - Weka Instances object (arff format) * @return */ public ArrayList<String> getAttributes(Instances currentInstance){ int i=0; String tempString; ArrayList<String> tempAttrib= new ArrayList<String>(currentInstance.numAttributes()); for(i=0;i<currentInstance.numAttributes();i++){ tempString = currentInstance.attribute(i).toString(); if(tempString.contains("authorName")){ tempAttrib.add(i,tempString); continue; } tempAttrib.add(i,AttributeStripper.strip(tempString)); } return tempAttrib; } /** * Accepts JSylo's Instances object and returns the instances (@data) portion of the ".arff file" (not really a file at this point though). * @param currentInstance - JStylo's Instances object (arff format) * @return */ public Double[][] getInstances(Instances currentInstance){ int i=0; int j=0; int placeHolder; int numAttribs = setAttributes.size(); int numInstances = currentInstance.numInstances(); String tempString; String otherTempString; Double[][] tempInstance; tempInstance= new Double[numInstances][numAttribs]; int skip = 0; for(i=0;i<numAttribs;i++){ if(currentInstance.attribute(i).toString().contains("authorName")){ skip=i; break; } } for(i=0;i<numInstances;i++){ j=0; tempString =currentInstance.instance(i).toString()+","; while(!tempString.equals("")){ placeHolder =tempString.indexOf(","); otherTempString = tempString.substring(0,placeHolder); if(j==skip){ tempInstance[i][j] = null; // set author name/ID to null rather than simply omit - allows attribute lists indices to stay synchronized } else{ tempInstance[i][j] = Double.valueOf(otherTempString).doubleValue(); } tempString = tempString.substring(placeHolder+1); j++; } } return tempInstance; } }
spencermwoo/anonymouth
src/edu/drexel/psal/anonymouth/engine/InstanceConstructor.java
214,487
/* * XMLObject.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.xml; import dr.inference.model.AbstractModel; import org.w3c.dom.Element; import org.w3c.dom.NamedNodeMap; import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; import java.util.Vector; /** * This class wraps a DOM Element for the purposes of parsing. * * @author Alexei Drummond * @version $Id: XMLObject.java,v 1.30 2005/07/11 14:06:25 rambaut Exp $ */ public class XMLObject { public static final String missingValue = "NA"; /** * @param e the element the construct this XML object from */ public XMLObject(Element e, XMLObject parent) { this.element = e; this.parent = parent; } public XMLObject(XMLObject obj, int index) { this(obj.element, null); nativeObject = ((List)obj.getNativeObject()).get(index); } /** * @return the number of children this XMLObject has. */ public final int getChildCount() { return children.size(); } /** * @param i the index of the child to return * @return the ith child in native format if available, otherwise as * an XMLObject. */ public Object getChild(int i) { Object obj = getRawChild(i); XMLObject xo = null; if( obj instanceof XMLObject ) { xo = (XMLObject) obj; } else if( obj instanceof Reference ) { xo = ((Reference) obj).getReferenceObject(); } if( xo != null && xo.hasNativeObject() ) { return xo.getNativeObject(); } return obj; } /** * @param c the class of the child to return * @return the first child with a native format of the given class, or null if no such child exists. */ public Object getChild(Class c) { for (int i = 0; i < getChildCount(); i++) { Object child = getChild(i); if( c.isInstance(child) ) { return child; } } return null; } /** * @return all children with or empty list if no children. */ public List<Object> getChildren() { List<Object> allChildren = new ArrayList<Object>(); for (int i = 0; i < getChildCount(); i++) { Object child = getChild(i); allChildren.add(child); } return allChildren; } /** * @param c the class of the children to return * @return all children with a native format of the given class, or empty if no such child exists. */ public <T> List<T> getAllChildren(Class<T> c) { List<T> allChildren = new ArrayList<T>();; for (int i = 0; i < getChildCount(); i++) { Object child = getChild(i); if( c.isInstance(child) ) { allChildren.add(c.cast(child)); } } return allChildren; } /** * @param name the name of the child to return * @return the first child of type XMLObject with a given name, or null if no such child exists. */ public XMLObject getChild(String name) { for (int i = 0; i < getChildCount(); i++) { Object child = getChild(i); if( child instanceof XMLObject ) { if( ((XMLObject) child).getName().equals(name) ) { return (XMLObject) child; } } } return null; } /** * @param name the name of the children * @return all children with a given name. */ public List<XMLObject> getAllChildren(String name) { List<XMLObject> allChildren = new ArrayList<XMLObject>(); for (int i = 0; i < getChildCount(); i++) { Object child = getChild(i); if( child instanceof XMLObject && ((XMLObject)child).getName().equals(name)) { allChildren.add((XMLObject)child); } } return allChildren; } /** * @param elementName the name of the XML wrapper element the child resides in * @return the first child element out of a named XMLObject element * @throws XMLParseException if no wrapper element exists, or it the child in * wrapper element is not an XMLObject */ public Object getElementFirstChild(String elementName) throws XMLParseException { Object child = getChild(elementName); if (child == null) throw new XMLParseException("Child element called " + elementName + " does not exist inside element " + getName()); if (!(child instanceof XMLObject)) throw new XMLParseException("Child element called " + elementName + " inside element " + getName() + " is not an XMLObject."); return ((XMLObject) child).getChild(0); } public String getChildName(int i) { Object obj = getRawChild(i); XMLObject xo; if (obj instanceof XMLObject) { xo = (XMLObject) obj; } else if (obj instanceof Reference) { xo = ((Reference) obj).getReferenceObject(); } else { return ""; } return xo.getName(); } /** * @param name the name of the child element being tested for. * @return true if a child element of the given name exists. */ public boolean hasChildNamed(String name) { final Object child = getChild(name); return (child != null) && (child instanceof XMLObject); } /** * @return all attributes */ public NamedNodeMap getAttributes() { return element.getAttributes(); } /** * @param i the index of the child to return * @return the ith child as a boolean. * @throws XMLParseException if getChild(i) would */ public boolean getBooleanChild(int i) throws XMLParseException { return getBoolean(getChild(i)); } /** * @param i the index of the child to return * @return the ith child as a double. * @throws XMLParseException if getChild(i) would */ public double getDoubleChild(int i) throws XMLParseException { return getDouble(getChild(i)); } /** * @param i the index of the child to return * @return the ith child as a double[]. * @throws XMLParseException if getChild(i) would */ public double[] getDoubleArrayChild(int i) throws XMLParseException { return getDoubleArray(getChild(i)); } /** * @param i the index of the child to return * @return the ith child as an integer. * @throws XMLParseException if getChild(i) would */ public int getIntegerChild(int i) throws XMLParseException { return getInteger(getChild(i)); } /** * @param i the index of the child to return * @return the ith child as a string. * @throws XMLParseException if getChild(i) would */ public String getStringChild(int i) throws XMLParseException { return getString(getChild(i)); } /** * @param i the index of the child to return * @return the ith child as a String[]. * @throws XMLParseException if getChild(i) would */ public String[] getStringArrayChild(int i) throws XMLParseException { return getStringArray(getChild(i)); } /** * Attribute value, if present - default otherwise. * * @param name attribute name * @param defaultValue the default value * @return the given attribute if it exists, otherwise the default value * @throws XMLParseException if attribute can't be converted to desired type */ public <T> T getAttribute(String name, T defaultValue) throws XMLParseException { if (element.hasAttribute(name)) { final String s = element.getAttribute(name); for (Constructor c : defaultValue.getClass().getConstructors()) { final Class[] classes = c.getParameterTypes(); if (classes.length == 1 && classes[0].equals(String.class)) { try { return (T) c.newInstance(s); } catch (Exception e) { throw new XMLParseException(" conversion of '" + s + "' to " + defaultValue.getClass().getName() + " failed"); } } } } return defaultValue; } /** * @return the named attribute */ public Object getAttribute(String name) throws XMLParseException { return getAndTest(name); } /** * @return the named attribute as a boolean. */ public boolean getBooleanAttribute(String name) throws XMLParseException { return getBoolean(getAndTest(name)); } /** * @return the named attribute as a double. */ public double getDoubleAttribute(String name) throws XMLParseException { return getDouble(getAndTest(name)); } /** * @return the named attribute as a double[]. */ public double[] getDoubleArrayAttribute(String name) throws XMLParseException { return getDoubleArray(getAndTest(name)); } /** * @return the named attribute as a double[]. */ public int[] getIntegerArrayAttribute(String name) throws XMLParseException { return getIntegerArray(getAndTest(name)); } /** * @return the named attribute as an integer. */ public int getIntegerAttribute(String name) throws XMLParseException { return getInteger(getAndTest(name)); } /** * @return the named attribute as a long integer. */ public long getLongIntegerAttribute(String name) throws XMLParseException { return getLongInteger(getAndTest(name)); } /** * @return the named attribute as a string. */ public String getStringAttribute(String name) throws XMLParseException { return getString(getAndTest(name)); } /** * @return the named attribute as a String[]. */ public String[] getStringArrayAttribute(String name) throws XMLParseException { return getStringArray(getAndTest(name)); } /** * @param valueList if this ArrayList is not null it is populated by the double array * that the given string represents. * @return true if the given string represents a whitespaced-delimited array of doubles. */ public static boolean isDoubleArray(String s, List<Double> valueList) { try { StringTokenizer st = new StringTokenizer(s); while (st.hasMoreTokens()) { String token = st.nextToken(); Double d; if (token.compareToIgnoreCase(missingValue) == 0) d = Double.NaN; else d = new Double(token); if (valueList != null) valueList.add(d); } return true; } catch (NumberFormatException e) { return false; } } /** * @param valueList if this ArrayList is not null it is populated by the integer array * that the given string represents. * @return true if the given string represents a whitespaced-delimited array of integers. */ public static boolean isIntegerArray(String s, List<Integer> valueList) { try { StringTokenizer st = new StringTokenizer(s); while (st.hasMoreTokens()) { Integer d = new Integer(st.nextToken()); if (valueList != null) valueList.add(d); } return true; } catch (NumberFormatException e) { return false; } } public final static String ID = "id"; public boolean hasId() { return hasAttribute(ID); } public String getId() throws XMLParseException { return getStringAttribute(ID); } /** * @return true if either an attribute exists. */ public boolean hasAttribute(String name) { return (element.hasAttribute(name)); } public String getName() { return element.getTagName(); } public Object getNativeObject() { return nativeObject; } public boolean hasNativeObject() { return nativeObject != null; } public String toString() { String prefix = getName(); if (hasId()) { try { prefix += ":" + getId(); } catch (XMLParseException e) { // this shouldn't happen assert false; } } //if (nativeObject != null) return prefix + ":" + nativeObject.toString(); return prefix; } public String content() { if (nativeObject != null) { if (nativeObject instanceof dr.util.XHTMLable) { return ((dr.util.XHTMLable) nativeObject).toXHTML(); } else { return nativeObject.toString(); } } return ""; } //********************************************************************* // Package functions //********************************************************************* /** * Adds a child. */ void addChild(Object child) { if (child instanceof XMLObject || child instanceof Reference || child instanceof String) { children.add(child); } else throw new IllegalArgumentException(); } /** * @return the ith child of this XMLObject, without processing. */ public Object getRawChild(int i) { return children.get(i); } /** * Sets the native object represented by this XMLObject. */ public void setNativeObject(Object obj) { nativeObject = obj; } boolean isReference(int child) { return (getRawChild(child) instanceof Reference); } //********************************************************************* // Static members //********************************************************************* //********************************************************************* // Private methods //********************************************************************* /** * @return the object as a boolean if possible */ private boolean getBoolean(Object obj) throws XMLParseException { if (obj instanceof Boolean) return (Boolean) obj; if (obj instanceof String) { if (obj.equals("true")) return true; if (obj.equals("false")) return false; } throw new XMLParseException("Expected a boolean (true|false), but got " + obj); } /** * @return the object as an double if possible */ private double getDouble(Object obj) throws XMLParseException { try { if (obj instanceof Number) { return ((Number) obj).doubleValue(); } if (obj instanceof String) { return Double.parseDouble((String) obj); } } catch (NumberFormatException nfe) { throw new XMLParseException("Expected double precision number, but got " + obj); } throw new XMLParseException("Expected double precision number, but got " + obj); } /** * @return the object as an double[] if possible */ private double[] getDoubleArray(Object obj) throws XMLParseException { if (obj instanceof Number) return new double[]{((Number) obj).doubleValue()}; if (obj instanceof double[]) return (double[]) obj; if (obj instanceof String) { List<Double> valueList = new ArrayList<Double>(); if (isDoubleArray((String) obj, valueList)) { double[] values = new double[valueList.size()]; for (int i = 0; i < values.length; i++) { values[i] = valueList.get(i); } return values; } else { throw new XMLParseException("Expected array of double precision numbers, but got " + obj); } } throw new XMLParseException("Expected array of double precision numbers, but got " + obj); } /** * @return the object as an double[] if possible */ private int[] getIntegerArray(Object obj) throws XMLParseException { if (obj instanceof Number) return new int[]{((Number) obj).intValue()}; if (obj instanceof int[]) return (int[]) obj; if (obj instanceof String) { ArrayList<Integer> valueList = new ArrayList<Integer>(); if (isIntegerArray((String) obj, valueList)) { int[] values = new int[valueList.size()]; for (int i = 0; i < values.length; i++) { values[i] = valueList.get(i); } return values; } else { throw new XMLParseException("Expected array of integers, but got " + obj); } } throw new XMLParseException("Expected array of integers, but got " + obj); } /** * @return the object as an integer if possible */ private int getInteger(Object obj) throws XMLParseException { if (obj instanceof Number) return ((Number) obj).intValue(); try { return Integer.parseInt((String) obj); } catch (NumberFormatException e) { throw new XMLParseException("Expected integer, got " + obj); } } /** * @return the object as an integer if possible */ private long getLongInteger(Object obj) throws XMLParseException { if (obj instanceof Number) return ((Number) obj).longValue(); try { return Long.parseLong((String) obj); } catch (NumberFormatException e) { throw new XMLParseException("Expected long integer, got " + obj); } } /** * @return the object as a string if possible */ private String getString(Object obj) throws XMLParseException { if (obj instanceof String) return (String) obj; throw new XMLParseException("Expected string, but got " + obj); } /** * @return the object as an double[] if possible */ private String[] getStringArray(Object obj) throws XMLParseException { if (obj instanceof String[]) return (String[]) obj; if (obj instanceof String) { ArrayList<String> stringList = new ArrayList<String>(); StringTokenizer st = new StringTokenizer((String) obj); while (st.hasMoreTokens()) { stringList.add(st.nextToken()); } String[] strings = new String[stringList.size()]; for (int i = 0; i < strings.length; i++) { strings[i] = stringList.get(i); } return strings; } throw new XMLParseException("Expected array of strings, but got " + obj); } /** * @return the named attribute if it exists, throws XMLParseException otherwise. */ private Object getAndTest(String name) throws XMLParseException { if (element.hasAttribute(name)) { return element.getAttribute(name); } throw new XMLParseException("'" + name + "' attribute was not found in " + element.getTagName() + " element."); } public XMLObject getParent() { return parent; } //********************************************************************* // Private instance variables //********************************************************************* private final Vector<Object> children = new Vector<Object>(); private final Element element; private final XMLObject parent; private Object nativeObject; // The objectStore representing the local scope of this element. // private ObjectStore store; }
maxbiostat/beast-mcmc
src/dr/xml/XMLObject.java
214,488
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import java.util.ArrayList; import java.util.List; import java.util.logging.*; import java.io.PrintWriter; import java.io.Serializable; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.AlphabetCarrying; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Labeling; import cc.mallet.types.FeatureSelection; /** * Abstract parent of all Classifiers. * <p> * All classification techniques in MALLET are implemented as two classes: * a trainer and a classifier. The trainer injests the training data * and creates a classifier that holds the parameters set during training. * The classifier applies those parameters to an Instance to produce * a classification of the Instance. * <p> * A concrete classifier is required only to be able to classify an instance. * <p> * Methods for classifying an InstanceList are here. There are * also methods for calculating precison, recall, and f1 from either * InstanceLists (which are classified first) or an ArrayList of * classifications. Similar functionality is also in * {@link cc.mallet.classify.Trial} * * <p> A classifier holds a reference to the pipe that was used to * create the Instances being classified. Most classifiers use * this to make sure the Alphabets of the instances being classified * are the same Alphabet objects used during training. * <p> * Alphabets are allowed to between training and classification. * @see ClassifierTrainer * @see Instance * @see InstanceList * @see Classification * @see Trial */ public abstract class Classifier implements AlphabetCarrying, Serializable { private static Logger logger = Logger.getLogger(Classifier.class.getName()); protected Pipe instancePipe; /** For serialization only. */ protected Classifier() { } public Classifier (Pipe instancePipe) { this.instancePipe = instancePipe; // All classifiers must have set of labels. assert (instancePipe.getTargetAlphabet() != null); assert (instancePipe.getTargetAlphabet().getClass().isAssignableFrom(LabelAlphabet.class)); // Not all classifiers require a feature dictionary, however. } // TODO Change this method name to getPipe(); public Pipe getInstancePipe () { return instancePipe; } @Override public Alphabet getAlphabet () { return (Alphabet) instancePipe.getDataAlphabet(); } public LabelAlphabet getLabelAlphabet () { return (LabelAlphabet) instancePipe.getTargetAlphabet(); } @Override public Alphabet[] getAlphabets() { return new Alphabet[] {getAlphabet(), getLabelAlphabet()}; } public boolean alphabetsMatch (AlphabetCarrying object) { Alphabet[] otherAlphabets = object.getAlphabets(); if (otherAlphabets.length == 2 && otherAlphabets[0] == getAlphabet() && otherAlphabets[1] == getLabelAlphabet()) return true; return false; } // TODO Make argument List<Instance> public ArrayList<Classification> classify (InstanceList instances) { ArrayList<Classification> ret = new ArrayList<Classification> (instances.size()); for (Instance inst : instances) ret.add (classify (inst)); return ret; } public Classification[] classify (Instance[] instances) { Classification[] ret = new Classification[instances.length]; for (int i = 0; i < instances.length; i++) ret[i] = classify (instances[i]); return ret; } public abstract Classification classify (Instance instance); /** Pipe the object through this classifier's pipe, then classify the resulting instance. */ public Classification classify (Object obj) { if (obj instanceof Instance) return classify ((Instance)obj); return classify (instancePipe.instanceFrom(new Instance (obj, null, null, null))); } public FeatureSelection getFeatureSelection () { return null; } public FeatureSelection[] getPerClassFeatureSelection () { return null; } // Various evaluation methods public double getAccuracy (InstanceList ilist) { return new Trial(this, ilist).getAccuracy(); } public double getPrecision (InstanceList ilist, int index) { return new Trial(this, ilist).getPrecision(index); } public double getPrecision (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getPrecision(labeling); } public double getPrecision (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getPrecision(labelEntry); } public double getRecall (InstanceList ilist, int index) { return new Trial(this, ilist).getRecall(index); } public double getRecall (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getRecall(labeling); } public double getRecall (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getRecall(labelEntry); } public double getF1 (InstanceList ilist, int index) { return new Trial(this, ilist).getF1(index); } public double getF1 (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getF1(labeling); } public double getF1 (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getF1(labelEntry); } public double getAverageRank (InstanceList ilist) { return new Trial(this, ilist).getAverageRank(); } /** * Outputs human-readable description of classifier (e.g., list of weights, decision tree) * to System.out */ public void print () { System.out.println ("Classifier "+getClass().getName()+"\n Detailed printout not yet implemented."); } public void print (PrintWriter out) { out.println ("Classifier "+getClass().getName()+"\n Detailed printout not yet implemented."); } }
techknowledgist/Mallet
src/cc/mallet/classify/Classifier.java
214,489
/** * Copyright © 2016-2024 The Thingsboard Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.thingsboard.server.common.msg.rule.engine; import lombok.Data; import org.thingsboard.server.common.data.id.DeviceId; import org.thingsboard.server.common.data.id.TenantId; import org.thingsboard.server.common.data.security.DeviceCredentials; import org.thingsboard.server.common.msg.MsgType; import org.thingsboard.server.common.msg.ToDeviceActorNotificationMsg; /** * @author Andrew Shvayka */ @Data public class DeviceCredentialsUpdateNotificationMsg implements ToDeviceActorNotificationMsg { private static final long serialVersionUID = -3956907402411126990L; private final TenantId tenantId; private final DeviceId deviceId; /** * LwM2M * @return */ private final DeviceCredentials deviceCredentials; @Override public MsgType getMsgType() { return MsgType.DEVICE_CREDENTIALS_UPDATE_TO_DEVICE_ACTOR_MSG; } }
thingsboard/thingsboard
common/message/src/main/java/org/thingsboard/server/common/msg/rule/engine/DeviceCredentialsUpdateNotificationMsg.java
214,490
// -*- mode: java; c-basic-offset: 2; -*- // Copyright © 2012-2016 Massachusetts Institute of Technology. All rights reserved. /** * @license * @fileoverview Methods for manipulating App Inventor components - adding, removing, * renaming, etc. * * @author [email protected] (Andrew F. McKinney) * @author [email protected] (Sharon Perl) * @author [email protected] (Evan W. Patton) */ 'use strict'; goog.provide('AI.Blockly.Component'); goog.provide('AI.Blockly.ComponentTypes'); goog.provide('AI.Blockly.ComponentInstances'); goog.require('Blockly.TranslationProperties'); goog.require('Blockly.TranslationEvents'); goog.require('Blockly.TranslationMethods'); goog.require('Blockly.TranslationParams'); // App Inventor extensions to Blockly goog.require('Blockly.TypeBlock'); if (Blockly.Component === undefined) Blockly.Component = {}; if (Blockly.ComponentTypes === undefined) Blockly.ComponentTypes = {}; if (Blockly.ComponentInstances === undefined) Blockly.ComponentInstances = {}; Blockly.Component.add = function(name, uid) { if (Blockly.ComponentInstances.haveInstance(name, uid)) { return; } Blockly.TypeBlock.needsReload.components = true; //get type name for instance var typeName = Blockly.Component.instanceNameToTypeName(name); Blockly.ComponentInstances.addInstance(name, uid, typeName); }; /** * Rename component with given uid and instance name oldname to newname * @param oldname the Component's current name, e.g., Button1 * @param newname the newname the component will be given, e.g., Button2 * @param uid the component's unique id * * Here are the various places that a component's name must be changed, using Button1 * as an example name. * Blockly.ComponentInstances -- an index containing an entry for each Component used by the app * keyed on its oldname, needs to change to the new name * e.g., ComponentInstances['Button1'] --> ComponentInstances['Button2'] * * Call rename on all component blocks */ Blockly.Component.rename = function(oldname, newname, uid) { console.log("Got call to Blockly.Component.rename(" + oldname + ", " + newname + ", " + uid + ")"); Blockly.TypeBlock.needsReload.components = true; if (!Blockly.ComponentInstances.haveInstance(oldname, uid)) { console.log("Renaming, No such Component instance " + oldname + " aborting"); return; } // Create an entry in Blockly.ComponentInstances for the block's newname and delete oldname (below) Blockly.ComponentInstances[newname] = {} Blockly.ComponentInstances[newname].uid = uid; Blockly.ComponentInstances[newname].typeName = Blockly.ComponentInstances[oldname].typeName; // Delete the index entry for the oldname Blockly.ComponentInstances[oldname] = null; delete Blockly.ComponentInstances[oldname]; console.log("Revised Blockly.ComponentInstances, Blockly.Language, Blockly.Yail for " + newname); // Revise names, types, and block titles for all blocks containing newname in Blockly.mainWorkspace var blocks = Blockly.mainWorkspace.getAllBlocks(); for (var x = 0, block; block = blocks[x]; x++) { if (!block.category) { continue; } else if (block.category == 'Component') { block.rename(oldname, newname); // Changes block's instanceName, typeName, and current title } } console.log("Revised Blockly.mainWorkspace for " + newname); }; /** * Remove component with given type and instance name and unique id uid * @param type, Component's type -- e.g., Button * @param name, Component's name == e.g., Buton1 * @param uid, Component's unique id -- not currently used * * The component should be listed in the ComponentInstances list. * - For each instance of the component's block in the Blockly.mainWorkspace * -- Call its BlocklyBlock.destroy() method to remove the block * from the workspace and adjust enclosed or enclosing blocks. * Remove the block's entry from ComponentInstances * */ Blockly.Component.remove = function(type, name, uid) { console.log("Got call to Blockly.Component.remove(" + type + ", " + name + ", " + uid + ")"); Blockly.TypeBlock.needsReload.components = true; // Delete instances of this type of block from the workspace var allblocks = Blockly.mainWorkspace.getAllBlocks(); for (var x = 0, block; block = allblocks[x]; x++) { if (!block.category) { continue; } else if (block.category == 'Component' && block.instanceName == name) { block.dispose(true); // Destroy the block gently } } // Remove the component instance console.log("Deleting " + name + " from Blockly.ComponentInstances"); delete Blockly.ComponentInstances[name]; }; /** * Builds a map of component name -> top level blocks for that component. * A special entry for "globals" maps to top-level global definitions. * * @param warnings a Map that will be filled with warnings for troublesome blocks * @param errors a list that will be filled with error messages * @param forRepl whether this is executed for REPL * @param compileUnattachedBlocks whether to compile unattached blocks * @returns object mapping component names to the top-level blocks for that component in the * workspace. For each component C the object contains a field "component.C" whose * value is an array of blocks. In addition, the object contains a field named "globals" * whose value is an array of all valid top-level blocks not associated with a * component (procedure and variable definitions) */ Blockly.Component.buildComponentMap = function(warnings, errors, forRepl, compileUnattachedBlocks) { var map = {}; map.components = {}; map.globals = []; // TODO: populate warnings, errors as we traverse the top-level blocks var blocks = Blockly.mainWorkspace.getTopBlocks(true); for (var x = 0, block; block = blocks[x]; x++) { // TODO: deal with unattached blocks that are not valid top-level definitions. Valid blocks // are events, variable definitions, or procedure definitions. if (!block.category) { continue; } if (block.type == 'procedures_defnoreturn' || block.type == 'procedures_defreturn' || block.type == 'global_declaration') { map.globals.push(block); // TODO: eventually deal with variable declarations, once we have them } else if (block.category == 'Component') { var instanceName = block.instanceName; if(block.blockType != "event") { continue; } if (block.isGeneric) { map.globals.push(block); continue; } if (!map.components[instanceName]) { map.components[instanceName] = []; // first block we've found for this component } // TODO: check for duplicate top-level blocks (e.g., two event handlers with same name) - // or better yet, prevent these from happening! map.components[instanceName].push(block); } } return map; }; /** * Verify all blocks after a Component upgrade */ Blockly.Component.verifyAllBlocks = function () { // We can only verify blocks once the workspace has been injected... if (Blockly.mainWorkspace != null) { var allBlocks = Blockly.mainWorkspace.getAllBlocks(); for (var x = 0, block; block = allBlocks[x]; ++x) { if (block.category != 'Component') { continue; } block.verify(); } } } /** * Blockly.ComponentTypes * * Object whose fields are names of component types. For a given component type object, the "componentInfo" * field is the parsed JSON type object for the component type and the "blocks" field is an array * of block names for the generic blocks for that type. * For example: * Blockly.ComponentTypes['Canvas'].componentInfo = the JSON object from parsing the typeJsonString * * eventDictionary, methodDictionary, and properties take in the name of the event/method/property * and give the relevant object in from the componentInfo object. * * The componentInfo has the following format (where upper-case strings are * non-terminals and lower-case strings are literals): * { "type": "COMPONENT-TYPE", * "name": "COMPONENT-TYPE-NAME", * "external": "true"|"false", * "version": "VERSION", * "categoryString": "PALETTE-CATEGORY", * "helpString": "DESCRIPTION", * "showOnPalette": "true"|"false", * "nonVisible": "true"|"false", * "iconName": "ICON-FILE-NAME", * "properties": [ * { "name": "PROPERTY-NAME", * "editorType": "EDITOR-TYPE", * "defaultValue": "DEFAULT-VALUE"},* * ], * "blockProperties": [ * { "name": "PROPERTY-NAME", * "description": "DESCRIPTION", * "type": "YAIL-TYPE", * "rw": "read-only"|"read-write"|"write-only"|"invisible"},* * ], * "events": [ * { "name": "EVENT-NAME", * "description": "DESCRIPTION", * "params": [ * { "name": "PARAM-NAME", * "type": "YAIL-TYPE"},* * ]},+ * ], * "methods": [ * { "name": "METHOD-NAME", * "description": "DESCRIPTION", * "params": [ * { "name": "PARAM-NAME", * "type": "YAIL-TYPE"},* * ]},+ * ] * } */ Blockly.ComponentTypes.haveType = function(typeName) { return Blockly.ComponentTypes[typeName] != undefined; }; /** * Populate Blockly.ComponentTypes object * * @param projectId the projectid whose types we are loading. Note: projectId is * a string at this point. We will convert it to a long in Java code we call * later. */ Blockly.ComponentTypes.populateTypes = function(projectId) { var componentInfoArray = JSON.parse(window.parent.BlocklyPanel_getComponentsJSONString(projectId)); for(var i=0;i<componentInfoArray.length;i++) { var componentInfo = componentInfoArray[i]; var typeName = componentInfo.name; Blockly.ComponentTypes[typeName] = {}; Blockly.ComponentTypes[typeName].type = componentInfo.type; Blockly.ComponentTypes[typeName].external = componentInfo.external; Blockly.ComponentTypes[typeName].componentInfo = componentInfo; Blockly.ComponentTypes[typeName].eventDictionary = {}; Blockly.ComponentTypes[typeName].methodDictionary = {}; Blockly.ComponentTypes[typeName].setPropertyList = []; Blockly.ComponentTypes[typeName].getPropertyList = []; Blockly.ComponentTypes[typeName].properties = {}; //parse type description and fill in all of the fields for(var k=0;k<componentInfo.events.length;k++) { Blockly.ComponentTypes[typeName].eventDictionary[componentInfo.events[k].name] = componentInfo.events[k]; } for(var k=0;k<componentInfo.methods.length;k++) { Blockly.ComponentTypes[typeName].methodDictionary[componentInfo.methods[k].name] = componentInfo.methods[k]; } for(var k=0;k<componentInfo.blockProperties.length;k++) { Blockly.ComponentTypes[typeName].properties[componentInfo.blockProperties[k].name] = componentInfo.blockProperties[k]; if (componentInfo.blockProperties[k].deprecated == "true") continue; if(componentInfo.blockProperties[k].rw == "read-write" || componentInfo.blockProperties[k].rw == "read-only") { Blockly.ComponentTypes[typeName].getPropertyList.push(componentInfo.blockProperties[k].name); } if(componentInfo.blockProperties[k].rw == "read-write" || componentInfo.blockProperties[k].rw == "write-only") { Blockly.ComponentTypes[typeName].setPropertyList.push(componentInfo.blockProperties[k].name); } } } }; /** * Blockly.ComponentInstances * * Object whose fields are names of component instances and whose field values * are objects with a blocks field containing an array of block names for the * instance. * For example: * Blockly.ComponentInstances['Canvas1'].blocks = ['Canvas1_Touched', * 'Canvas1_DrawCircle', 'Canvas1_getproperty', 'Canvas1_setproperty', ...] * Blockly.ComponentInstances is populated by the Blockly.Component.add method. */ Blockly.ComponentInstances.addInstance = function(name, uid, typeName) { Blockly.ComponentInstances[name] = {}; Blockly.ComponentInstances[name].uid = uid; Blockly.ComponentInstances[name].typeName = typeName; }; Blockly.ComponentInstances.haveInstance = function(name, uid) { return Blockly.ComponentInstances[name] != undefined && Blockly.ComponentInstances[name].uid == uid; }; Blockly.ComponentInstances.getInstanceNames = function() { var instanceNames = []; for(var instanceName in Blockly.ComponentInstances) { if(typeof Blockly.ComponentInstances[instanceName] == "object" && Blockly.ComponentInstances[instanceName].uid != null){ instanceNames.push(instanceName); } } return instanceNames; } Blockly.Component.instanceNameToTypeName = function(instanceName) { return window.parent.BlocklyPanel_getComponentInstanceTypeName(Blockly.BlocklyEditor.formName,instanceName); } Blockly.Component.getComponentNamesByType = function(componentType) { var componentNameArray = []; for(var componentName in Blockly.ComponentInstances) { if(Blockly.ComponentInstances[componentName].typeName == componentType) { componentNameArray.push([componentName,componentName]); } } return componentNameArray; };
mit-cml/appinventor-sources
appinventor/blocklyeditor/src/component.js
214,491
/* * Utils.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.app.bss; import java.awt.Desktop; import java.awt.Frame; import java.io.BufferedInputStream; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.URL; import java.nio.charset.Charset; import java.text.NumberFormat; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import javax.swing.ImageIcon; import javax.swing.JOptionPane; import javax.swing.JTabbedPane; import javax.swing.SwingUtilities; import dr.evolution.datatype.HiddenDataType; import dr.evolution.tree.TreeUtils; import org.apache.commons.math.random.MersenneTwister; import dr.app.bss.test.AncestralSequenceTrait; import dr.evolution.datatype.Codons; import dr.evolution.datatype.DataType; import dr.evolution.io.Importer.ImportException; import dr.evolution.io.NewickImporter; import dr.evolution.io.NexusImporter; import dr.evolution.sequence.Sequence; import dr.evolution.tree.NodeRef; import dr.evolution.tree.Tree; import dr.evolution.tree.TreeTraitProvider; import dr.evolution.util.MutableTaxonList; import dr.evolution.util.Taxa; import dr.evolution.util.Taxon; import dr.evomodel.tree.TreeModel; import dr.inference.model.Parameter; import dr.math.MathUtils; /** * @author Filip Bielejec * @version $Id$ */ public class Utils { // //////////////////// // ---THREAD UTILS---// // //////////////////// public static void sleep(int seconds) { try { Thread.sleep(seconds * 1000); } catch (InterruptedException e) { e.printStackTrace(); } }// END: sleep // //////////////////////////////// // ---RANDOM NUMB3R GENERATION---// // //////////////////////////////// private static MersenneTwister random = new MersenneTwister( MathUtils.nextLong()); public static double rLogNormal(double stdev, double mean) { double rNorm = random.nextGaussian() * stdev + mean; double rLognormal = Math.exp(rNorm); return rLognormal; }// END: drawRandom public static int rMultinom(double[] probabilities) { int range = probabilities.length + 1; double[] distribution = new double[range]; double sumProb = 0; for (double value : probabilities) { sumProb += value; }// END: probabilities loop distribution[0] = 0; for (int i = 1; i < range; ++i) { distribution[i] = distribution[i - 1] + (probabilities[i - 1] / sumProb); }// END: i loop distribution[range - 1] = 1.0; double key = random.nextDouble(); int mindex = 1; int maxdex = range - 1; int midpoint = mindex + (maxdex - mindex) / 2; while (mindex <= maxdex) { if (key < distribution[midpoint - 1]) { maxdex = midpoint - 1; } else if (key > distribution[midpoint]) { mindex = midpoint + 1; } else { return midpoint - 1; } midpoint = mindex + (int) Math.ceil((maxdex - mindex) / 2); }//END: mindex loop System.out.println("Error in rMultinom!"); return range - 1; }//END: rMultinom // //////////// // ---MATH---// // //////////// public static int sample(double[] probabilities) { int samplePos = -Integer.MAX_VALUE; double cumProb = 0.0; double u = random.nextDouble(); for (int i = 0; i < probabilities.length; i++) { cumProb += probabilities[i]; if (u <= cumProb) { samplePos = i; break; } } return samplePos; }// END: randomChoicePDF public static void rescale(double[] logX) { double max = max(logX); for (int i = 0; i < logX.length; i++) { logX[i] -= max; } }// END: rescale public double getParameterVariance(Parameter param) { int n = param.getSize(); double mean = getParameterMean(param); double var = 0; for (int i = 0; i < n; i++) { var+= Math.pow( (param.getValue(i) - mean), 2); } var/= (n-1); return var; }// END: getParameterVariance public double getParameterMean(Parameter param) { double mean = 0; int n = param.getSize(); for (int i = 0; i < n; i++) { mean += param.getValue(i); } mean /= n; return mean; }// END: getParameterMean public static double getNorm(double[] vector) { double norm = 0; for (int i = 0; i < vector.length; i++) { norm += Math.pow(vector[i], 2); } return Math.sqrt(norm); }// END: getNorm public static void normalize(double[] vector) { double norm = getNorm(vector); for (int i = 0; i < vector.length; i++) { vector[i] = vector[i] / norm; } }// END: normalize // //////////////////// // ---ARRAYS UTILS---// // //////////////////// public static void exponentiate(double[] array) { for (int i = 0; i < array.length; i++) { array[i] = Math.exp(array[i]); } }// END: exponentiate public static int max(int[] array) { int max = -Integer.MAX_VALUE; for (int i = 0; i < array.length; i++) { if (array[i] > max) { max = (int)array[i]; }// END: if check }// END: i loop return max; }// END: findMaximum public static int max(double[] array) { int max = -Integer.MAX_VALUE; for (int i=0; i< array.length;i++) { if (array[i] > max) { max = (int)array[i]; }// END: if check }// END: i loop return max; }// END: findMaximum public static int max(ArrayList<Integer> array) { int max = -Integer.MAX_VALUE; for (Integer element : array) { if (element > max) { max = element; }// END: if check }// END: i loop return max; }// END: findMaximum public static double sumArray(int[] array) { double sum = 0.0; for (int i = 0; i < array.length; i++) { sum += array[i]; } return sum; }// END: sumArray public static double sumArray(double[] array) { double sum = 0.0; for (int i = 0; i < array.length; i++) { sum += array[i]; } return sum; }// END: sumArray // ///////////////// // ---CONSTANTS---// // ///////////////// // public static final int TREE_MODEL_ELEMENT = 0; public static final int BRANCH_MODEL_ELEMENT = 1; public static final int SITE_RATE_MODEL_ELEMENT = 2; public static final int BRANCH_RATE_MODEL_ELEMENT = 3; public static final int FREQUENCY_MODEL_ELEMENT = 4; public static final int DEMOGRAPHIC_MODEL_ELEMENT = 5; public static final String TOPOLOGY = "topology"; public static final String ABSOLUTE_HEIGHT = "absoluteHeight"; public static final String TREE_FILENAME = "treeFilename"; public static final String SUBSTITUTION_MODEL = "substitutionModel"; public static final String DEMOGRAPHIC_MODEL = "demographicModel"; public static final String FREQUENCY_MODEL = "frequencyModel"; public static final String CODON_UNIVERSAL = "codon-universal"; public static final String CHOOSE_FILE = "Choose file..."; public static final String EDIT_TAXA_SET = "Edit taxa set..."; public static final String ANCESTRAL_SEQUENCE = "ancestralSequence"; public static final String BSS_ICON = "icons/bss.png"; public static final String CHECK_ICON = "icons/check.png"; public static final String ERROR_ICON = "icons/error.png"; public static final String HAMMER_ICON = "icons/hammer.png"; public static final String CLOSE_ICON = "icons/close.png"; public static final String BIOHAZARD_ICON = "icons/biohazard.png"; public static final String BUBBLE_BLUE_ICON = "icons/bubble-blue.png"; public static final String SAVE_ICON = "icons/save.png"; public static final String TEXT_FILE_ICON = "icons/file.png"; public static final double[] UNIFORM_CODON_FREQUENCIES = new double[] { 0.0163936, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0.01639344 }; public static final String STOP_CODONS[] = new String[] { "TAA", "TAG", "TGA" }; // /////////////////////////////// // ---GENERAL UTILITY METHODS---// // /////////////////////////////// public static double logfactor(int n) { double logfactor = 0.0; for (int i = 1; i <= n; i++) { logfactor += Math.log(i); } return logfactor; } public static double map(double value, double low1, double high1, double low2, double high2) { /** * maps a single value from its range into another interval * * @param low1 * , high1 - range of value; low2, high2 - interval * @return the mapped value */ return (value - low1) / (high1 - low1) * (high2 - low2) + low2; }// END: map public static String[] loadStrings(String filename) throws IOException { int linesCount = countLines(filename); String[] lines = new String[linesCount]; FileInputStream inputStream; BufferedReader reader; String line; inputStream = new FileInputStream(filename); reader = new BufferedReader(new InputStreamReader(inputStream, Charset.forName("UTF-8"))); int i = 0; while ((line = reader.readLine()) != null) { lines[i] = line; i++; } // Clean up reader.close(); reader = null; inputStream = null; return lines; }// END: loadStrings public static int countLines(String filename) throws IOException { InputStream is = new BufferedInputStream(new FileInputStream(filename)); byte[] c = new byte[1024]; int count = 0; int readChars = 0; boolean empty = true; while ((readChars = is.read(c)) != -1) { empty = false; for (int i = 0; i < readChars; ++i) { if (c[i] == '\n') { ++count; } } } is.close(); return (count == 0 && !empty) ? 1 : count; }// END: countLines public static Taxa importTaxaFromFile(File file) throws IOException { Taxa taxa = new Taxa(); Taxon taxon; String[] lines = Utils.loadStrings(file.getAbsolutePath()); for (int i = 0; i < lines.length; i++) { String[] line = lines[i].split("\\s+"); taxon = new Taxon(line[TaxaEditorTableModel.NAME_INDEX]); taxon.setAttribute(Utils.ABSOLUTE_HEIGHT, Double.valueOf(line[TaxaEditorTableModel.HEIGHT_INDEX])); taxa.addTaxon(taxon); }// END: i loop return taxa; }// END: importTaxaFromFile public static Tree importTreeFromFile(File file) throws IOException, ImportException { Tree tree = null; BufferedReader reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); if (line.toUpperCase().startsWith("#NEXUS")) { NexusImporter importer = new NexusImporter(reader); tree = importer.importTree(null); } else { NewickImporter importer = new NewickImporter(reader); tree = importer.importTree(null); } reader.close(); return tree; }// END: importTreeFromFile public static void removeTaxaWithAttributeValue(PartitionDataList dataList, String attribute, String value) { for (int i = 0; i < dataList.allTaxa.getTaxonCount(); i++) { Taxon taxon = dataList.allTaxa.getTaxon(i); if (taxon.getAttribute(attribute).toString() .equalsIgnoreCase(value)) { dataList.allTaxa.removeTaxon(taxon); i--; } } }// END: removeTaxaWithAttributeValue public static void centreLine(String line, int pageWidth) { int n = pageWidth - line.length(); int n1 = n / 2; for (int i = 0; i < n1; i++) { System.out.print(" "); } System.out.println(line); } public static int getSiteCount(PartitionDataList dataList) { int siteCount = 0; int to = 0; for (PartitionData data : dataList) { // siteCount += data.createPartitionSiteCount(); to = data.to; if (to > siteCount) { siteCount = to; } } return siteCount;// + 1; }// END: getSiteCount public static int arrayIndex(String[] array, String element) { List<String> vector = new ArrayList<String>(); for (int i = 0; i < array.length; i++) { vector.add(array[i]); } return vector.indexOf(element); }// END: arrayIndex public static ArrayList<TreeModel> treesToList(PartitionDataList dataList) { ArrayList<TreeModel> treeModelsList = new ArrayList<TreeModel>(); for (PartitionData data : dataList) { treeModelsList.add(data.createTreeModel()); } return treeModelsList; }// END: treesToList public static boolean taxonExists(Taxon taxon, MutableTaxonList taxonList) { boolean exists = false; for (Taxon taxon2 : taxonList) { if (taxon.equals(taxon2) // && // taxon.getAttribute(Utils.TREE_FILENAME).toString().equalsIgnoreCase(taxon2.getAttribute(Utils.TREE_FILENAME).toString()) ) { exists = true; break; } } return exists; }// END: taxonExists // private boolean isFileInList(File file) { // boolean exists = false; // // for (File file2 : dataList.treesList) { // // if (file.getName().equalsIgnoreCase(file2.getName())) { // exists = true; // break; // } // // } // // return exists; // }// END: isFileInList public static double getAbsoluteTaxonHeight(Taxon taxon, Tree tree) { double height = 0.0; for (int i = 0; i < tree.getExternalNodeCount(); i++) { NodeRef externalNode = tree.getExternalNode(i); Taxon externalNodeTaxon = tree.getNodeTaxon(externalNode); if (externalNodeTaxon.equals(taxon)) { height = tree.getNodeHeight(externalNode); } }// END: external node loop return height; }// END: getAbsoluteTaxonHeight public static boolean isRecordInList(TreesTableRecord record, ArrayList<TreesTableRecord> recordsList) { boolean exists = false; for (TreesTableRecord record2 : recordsList) { if (record.getName().equalsIgnoreCase(record2.getName())) { exists = true; break; } } return exists; }// END: isRecordInList public static boolean isTaxaInList(Taxa taxa, ArrayList<Taxa> taxaList) { boolean exists = false; for (Taxa taxa2 : taxaList) { if (taxaToString(taxa, true).equalsIgnoreCase( taxaToString(taxa2, true))) { exists = true; break; } } return exists; }// END: isTaxaInList public static int taxaIsIdenticalWith(Taxa taxa, ArrayList<Taxa> taxaList) { int index = -Integer.MAX_VALUE; for (Taxa taxa2 : taxaList) { if (taxaToString(taxa, true).equalsIgnoreCase( taxaToString(taxa2, true))) { index = taxaList.indexOf(taxa2); break; } } return index; }// END: treeModelIsIdenticalWith public static boolean isTreeModelInList(TreeModel treeModel, ArrayList<TreeModel> treeModelList) { boolean exists = false; for (TreeModel treeModel2 : treeModelList) { if (treeModel.getNewick().equalsIgnoreCase(treeModel2.getNewick())) { exists = true; break; } } return exists; }// END: isTreeModelInList public static int treeModelIsIdenticalWith(TreeModel treeModel, ArrayList<TreeModel> treeModelList) { int index = -Integer.MAX_VALUE; for (TreeModel treeModel2 : treeModelList) { if (treeModel.getNewick().equalsIgnoreCase(treeModel2.getNewick())) { index = treeModelList.indexOf(treeModel2); break; } } return index; }// END: treeModelIsIdenticalWith public static boolean isElementInList(PartitionData data, ArrayList<PartitionData> partitionList, int elementIndex) { boolean exists = false; switch (elementIndex) { case DEMOGRAPHIC_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (demographicModelToString(data).equalsIgnoreCase( demographicModelToString(data2))) { exists = true; break; } } break; case BRANCH_RATE_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (clockRateModelToString(data).equalsIgnoreCase( clockRateModelToString(data2))) { exists = true; break; } } break; case FREQUENCY_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (frequencyModelToString(data).equalsIgnoreCase( frequencyModelToString(data2))) { exists = true; break; } } break; case BRANCH_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (branchSubstitutionModelToString(data).equalsIgnoreCase( branchSubstitutionModelToString(data2))) { exists = true; break; } } break; case SITE_RATE_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (siteRateModelToString(data).equalsIgnoreCase( siteRateModelToString(data2))) { exists = true; break; } } break; default: throw new RuntimeException("Unknown element"); }// END: switch return exists; }// END: isModelInList public static int isIdenticalWith(PartitionData data, ArrayList<PartitionData> partitionList, int elementIndex) { int index = -Integer.MAX_VALUE; switch (elementIndex) { case DEMOGRAPHIC_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (demographicModelToString(data).equalsIgnoreCase( demographicModelToString(data2))) { index = partitionList.indexOf(data2); break; } } break; case BRANCH_RATE_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (clockRateModelToString(data).equalsIgnoreCase( clockRateModelToString(data2))) { index = partitionList.indexOf(data2); break; } } break; case FREQUENCY_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (frequencyModelToString(data).equalsIgnoreCase( frequencyModelToString(data2))) { index = partitionList.indexOf(data2); break; } } break; case BRANCH_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (branchSubstitutionModelToString(data).equalsIgnoreCase( branchSubstitutionModelToString(data2))) { index = partitionList.indexOf(data2); break; } } break; case SITE_RATE_MODEL_ELEMENT: for (PartitionData data2 : partitionList) { if (siteRateModelToString(data).equalsIgnoreCase( siteRateModelToString(data2))) { index = partitionList.indexOf(data2); break; } } break; default: throw new RuntimeException("Unknown element"); }// END: switch return index; }// END: isIdenticalWith // ///////////////// // ---GUI UTILS---// // ///////////////// public static ImageIcon createImageIcon(String path) { ImageIcon icon = null; URL imgURL = BeagleSequenceSimulatorApp.class.getResource(path); if (imgURL != null) { icon = new ImageIcon(imgURL); } else { System.err.println("Couldn't find file: " + path + "\n"); } return icon; }// END: CreateImageIcon public static boolean isBrowsingSupported() { if (!Desktop.isDesktopSupported()) { return false; } boolean result = false; Desktop desktop = java.awt.Desktop.getDesktop(); if (desktop.isSupported(Desktop.Action.BROWSE)) { result = true; } return result; }// END: isBrowsingSupported public static int getTabbedPaneComponentIndex(JTabbedPane tabbedPane, String title) { int index = -Integer.MAX_VALUE; int count = tabbedPane.getTabCount(); for (int i = 0; i < count; i++) { if (tabbedPane.getTitleAt(i).toString().equalsIgnoreCase(title)) { index = i; break; }// END: title check }// END: i loop return index; }// END: getComponentIndex public static Frame getActiveFrame() { Frame result = null; Frame[] frames = Frame.getFrames(); for (int i = 0; i < frames.length; i++) { Frame frame = frames[i]; if (frame.isVisible()) { result = frame; break; } } return result; } public static String getMultipleWritePath(File outFile, String defaultExtension, int i) { String path = outFile.getParent(); String[] nameArray = outFile.getName().split("\\.", 2); String name = ((i == 0) ? nameArray[0] : nameArray[0] + i); String extension = (nameArray.length == 1) ? (defaultExtension) : (nameArray[1]); String fullPath = path + System.getProperty("file.separator") + name + "." + extension; return fullPath; }// END: getMultipleWritePath public static String getWritePath(File outFile, String defaultExtension) { String path = outFile.getParent(); String[] nameArray = outFile.getName().split("\\.", 2); String name = nameArray[0]; String extension = (nameArray.length == 1) ? (defaultExtension) : (nameArray[1]); String fullPath = path + System.getProperty("file.separator") + name + "." + extension; return fullPath; }// END: getWritePath public static void showDialog(final String message) { if (SwingUtilities.isEventDispatchThread()) { JOptionPane.showMessageDialog(getActiveFrame(), message, "Message", JOptionPane.ERROR_MESSAGE, Utils.createImageIcon(Utils.BUBBLE_BLUE_ICON)); } else { SwingUtilities.invokeLater(new Runnable() { public void run() { JOptionPane.showMessageDialog(getActiveFrame(), message, "Message", JOptionPane.ERROR_MESSAGE, Utils.createImageIcon(Utils.BUBBLE_BLUE_ICON)); } }); }// END: edt check }// END: showDialog // //////////////////////////////// // ---EXCEPTION HANDLING UTILS---// // //////////////////////////////// public static void handleException(final Throwable e, final String message) { final Thread t = Thread.currentThread(); if (SwingUtilities.isEventDispatchThread()) { showExceptionDialog(t, e, message); } else { SwingUtilities.invokeLater(new Runnable() { public void run() { showExceptionDialog(t, e, message); } }); }// END: edt check }// END: uncaughtException public static void handleException(final Throwable e) { final Thread t = Thread.currentThread(); if (SwingUtilities.isEventDispatchThread()) { showExceptionDialog(t, e); } else { SwingUtilities.invokeLater(new Runnable() { public void run() { showExceptionDialog(t, e); } }); }// END: edt check }// END: handleException private static void showExceptionDialog(Thread t, Throwable e) { String msg = String.format("Unexpected problem on thread %s: %s", t.getName(), e.getMessage()); logException(t, e); JOptionPane.showMessageDialog(Utils.getActiveFrame(), // msg, // "Error", // JOptionPane.ERROR_MESSAGE, // Utils.createImageIcon(Utils.ERROR_ICON)); }// END: showExceptionDialog private static void showExceptionDialog(Thread t, Throwable e, String message) { String msg = String.format("Unexpected problem on thread %s: %s" + "\n" + message, t.getName(), e.getMessage()); logException(t, e); JOptionPane.showMessageDialog(Utils.getActiveFrame(), // msg, // "Error", // JOptionPane.ERROR_MESSAGE, // Utils.createImageIcon(Utils.ERROR_ICON)); }// END: showExceptionDialog private static void logException(Thread t, Throwable e) { e.printStackTrace(); }// END: logException // /////////////////// // ---PRINT UTILS---// // /////////////////// public static void printMap(Map<?, ?> mp) { Iterator<?> it = mp.entrySet().iterator(); while (it.hasNext()) { Entry<?, ?> pairs = (Entry<?, ?>) it.next(); Object obj = pairs.getValue(); if (obj instanceof int[]) { int[] seq = (int[]) obj; System.out.print(pairs.getKey() + " ="); for (int i = 0; i < seq.length; ++i) { System.out.print(" " + seq[i]); } System.out.println(); } else if (obj instanceof double[]) { double[] seq = (double[]) obj; System.out.print(pairs.getKey() + " ="); for (int i = 0; i < seq.length; ++i) { System.out.print(" " + seq[i]); } System.out.println(); } else { System.out.println(pairs.getKey() + " = " + pairs.getValue()); }// END: obj class check } }// END: printMap public static void printHashMap(ConcurrentHashMap<?, ?> hashMap) { Iterator<?> iterator = hashMap.entrySet().iterator(); while (iterator.hasNext()) { Entry<?, ?> pairs = (Entry<?, ?>) iterator.next(); Taxon taxon = (Taxon) pairs.getKey(); int[] sequence = (int[]) pairs.getValue(); System.out.println(taxon.toString()); Utils.printArray(sequence); }// END: while has next }// END: printHashMap public static void printArray(Object[] x) { for (int i = 0; i < x.length; i++) { System.out.print(x[i] + " "); } System.out.println(); }// END: printArray public static void printArray(int[] x) { for (int i = 0; i < x.length; i++) { System.out.print(x[i] + " "); } System.out.println(); }// END: printArray public static void printArray(double[] x) { for (int i = 0; i < x.length; i++) { System.out.print(x[i] + " "); } System.out.println(); }// END: printArray public static void printArray(boolean[] x) { for (int i = 0; i < x.length; i++) { System.out.print(x[i] + " "); } System.out.println(); } public static void printArray(String[] x) { for (int i = 0; i < x.length; i++) { System.out.println(x[i]); } }// END: printArray public static void print2DArray(double[][] array) { for (int row = 0; row < array.length; row++) { for (int col = 0; col < array[row].length; col++) { System.out.print(array[row][col] + " "); } System.out.print("\n"); } }// END: print2DArray public static void print2DArray(int[][] array) { for (int row = 0; row < array.length; row++) { for (int col = 0; col < array[row].length; col++) { System.out.print(array[row][col] + " "); } System.out.print("\n"); } }// END: print2DArray public static void print2Arrays(int[] array1, double[] array2, int nrow) { for (int row = 0; row < nrow; row++) { System.out.print(array1[row] + " " + array2[row] + " "); System.out.print("\n"); } }// END: print2DArray public static void print2DArray(double[][] array, int formatEvery) { int i = 0; for (int row = 0; row < array.length; row++) { for (int col = 0; col < array[row].length; col++) { if (i == formatEvery) { System.out.print("\n"); i = 0; } System.out.print(array[row][col] + " "); i++; } System.out.print("\n"); } }// END: print2DArray public static void printBranchSubstitutionModel(PartitionData data) { System.out.print("\tBranch Substitution model: "); System.out.print(branchSubstitutionModelToString(data)); System.out.print("\n"); }// END: printBranchSubstitutionModel public static void printClockRateModel(PartitionData data) { System.out.print("\tClock rate model: "); System.out.print(clockRateModelToString(data)); System.out.print("\n"); }// END: printClockRateModel public static void printFrequencyModel(PartitionData data) { System.out.print("\tFrequency model: "); System.out.print(frequencyModelToString(data)); System.out.print("\n"); }// END: printFrequencyModel public static void printSiteRateModel(PartitionData data) { System.out.print("\tSite rate model: "); System.out.print(siteRateModelToString(data)); System.out.print("\n"); }// END: printFrequencyModel public static void printDemographicModel(PartitionData data) { System.out.print("\tDemographic model: "); System.out.print(demographicModelToString(data)); System.out.print("\n"); }// END: printFrequencyModel private static void printDataType(PartitionData data) { System.out.print("\tData type: "); System.out.print(dataTypeToString(data)); System.out.print("\n"); }// END: printDataType public static void printTaxaSet(Taxa taxa) { for (int i = 0; i < taxa.getTaxonCount(); i++) { Taxon taxon = taxa.getTaxon(i); System.out.print("\t\t " + taxonToString(taxon, false) + ("\n")); } }// END: printTaxaSet public static void printTree(TreesTableRecord record) { System.out.print(record.getTree().toString()); System.out.print("\n"); }// END: printTree public static void printRecord(TreesTableRecord record) { if (record == null) { System.out.println("\tRecord: NOT SET"); } else if (record.isTreeSet()) { System.out.print("\t" + record.getName() + ": "); printTree(record); } else if (record.isTaxaSet()) { System.out.println("\t" + record.getName() + ":"); printTaxaSet(record.getTaxa()); } else { // } }// END: printRecord public static void printRecords(PartitionDataList dataList) { for (TreesTableRecord record : dataList.recordsList) { printRecord(record); }// END: record loop }// END: printRecords public static void printPartitionData(PartitionData data) { printRecord(data.record); printDataType(data); printDemographicModel(data); System.out.println("\tFrom: " + data.from); System.out.println("\tTo: " + data.to); System.out.println("\tEvery: " + data.every); printBranchSubstitutionModel(data); printSiteRateModel(data); printClockRateModel(data); printFrequencyModel(data); }// END: printPartitionData public static void printPartitionDataList(PartitionDataList dataList) { // System.out.println(dataList.get(0).from + " " + // dataList.get(1).from); if (BeagleSequenceSimulatorApp.DEBUG) { System.out.println("Possible records: "); printRecords(dataList); } System.out.println("\tSite count: " + getSiteCount(dataList)); System.out.println("\tOutput type: " + dataList.outputFormat); if (dataList.setSeed) { System.out.println("\tStarting seed: " + dataList.startingSeed); } int row = 1; for (PartitionData data : dataList) { System.out.println("Partition: " + row); printPartitionData(data); row++; }// END: data list loop }// END: printDataList public static void printTaxonList(PartitionDataList dataList) { System.out.println(taxaToString(dataList.allTaxa, true)); }// END: printTaxonList public static Sequence intArray2Sequence(Taxon taxon, int[] seq, int gapFlag, DataType dataType) { StringBuilder sSeq = new StringBuilder(); int partitionSiteCount = seq.length; if (dataType instanceof Codons) { for (int i = 0; i < partitionSiteCount; i++) { int state = seq[i]; if (state == gapFlag) { sSeq.append(dataType.getTriplet(dataType.getGapState())); } else { sSeq.append(dataType.getTriplet(seq[i])); }// END: gap check }// END: replications loop } else { for (int i = 0; i < partitionSiteCount; i++) { int state = seq[i]; if (state == gapFlag) { sSeq.append(dataType.getCode(dataType.getGapState())); } else { if(dataType instanceof HiddenDataType){ sSeq.append(dataType.getCode(seq[i] % (dataType.getStateCount()/((HiddenDataType) dataType).getHiddenClassCount()))); }else{ sSeq.append(dataType.getCode(seq[i])); } }// END: gap check }// END: replications loop }// END: dataType check return new Sequence(taxon, sSeq.toString()); }// END: intArray2Sequence // ////////////////////// // ---TOSTRING UTILS---// // ////////////////////// public static String taxonToString(Taxon taxon, boolean printNames) { String string = null; if (printNames) { string = taxon.getId() + " (" + taxon.getAttribute(Utils.ABSOLUTE_HEIGHT) + "," + taxon.getAttribute(Utils.TREE_FILENAME) + ")"; } else { string = taxon.getId() + " (" + taxon.getAttribute(Utils.ABSOLUTE_HEIGHT) + ")"; } return string; }// END: taxonToString public static String taxaToString(Taxa taxa, boolean printNames) { String string = ""; for (int i = 0; i < taxa.getTaxonCount(); i++) { Taxon taxon = taxa.getTaxon(i); string += taxonToString(taxon, printNames) + ("\n"); } return string; }// END: taxaToString public static String partitionDataToString(PartitionData data, TreeModel simulatedTreeModel // , LinkedHashMap<NodeRef, int[]> sequencesMap ) { String string = ""; // if (data.record.isTreeSet()) { // // string += ("Tree: " + data.record.getTree().toString())+ ("\n"); // // } else if (data.record.isTaxaSet()) { // // string += ("Taxa Set: \n" + taxaToString(data.record.getTaxa(), // false));//+ ("\n"); // // } else { // // // } string += ("Tree model: " + simulatedTreeModel.toString()) + ("\n"); // string += ("Tree model: " // +annotatedTreeModelToString(simulatedTreeModel, sequencesMap, // data.createDataType()) ) + ("\n"); string += ("From: " + data.from) + ("\n"); string += ("To: " + data.to) + ("\n"); string += ("Every: " + data.every) + ("\n"); string += ("Data type: ") + dataTypeToString(data) + ("\n"); string += ("Demographic model: ") + demographicModelToString(data) + ("\n"); string += ("Branch Substitution model: ") + branchSubstitutionModelToString(data) + ("\n"); string += ("Frequency model: ") + frequencyModelToString(data) + ("\n"); string += ("Site Rate model: ") + siteRateModelToString(data) + ("\n"); string += ("Clock Rate model: ") + clockRateModelToString(data) + ("\n"); return string; }// END: partitionDataToString public static String partitionDataListToString(PartitionDataList dataList, // ArrayList<TreeModel> simulatedTreeModelList // ,LinkedHashMap<Integer,LinkedHashMap<NodeRef, int[]>> // partitionSequencesMap ) { String string = ""; TreeModel simulatedTreeModel; // LinkedHashMap<NodeRef, int[]> sequencesMap; string += ("Site count: " + getSiteCount(dataList)) + ("\n"); if (dataList.setSeed) { string += ("Starting seed: " + dataList.startingSeed) + ("\n"); } int row = 0; for (PartitionData data : dataList) { simulatedTreeModel = simulatedTreeModelList.get(row); // sequencesMap = partitionSequencesMap.get(row); string += ("Partition: " + (row + 1)) + ("\n"); string += partitionDataToString(data, simulatedTreeModel // , sequencesMap ); string += ("\n"); row++; }// END: data list loop return string; }// END: partitionDataListToString // TODO: doesn't work public static String annotatedTreeModelToString(TreeModel treeModel, LinkedHashMap<NodeRef, int[]> sequencesMap, DataType dataType) { StringBuffer buffer = new StringBuffer(); NumberFormat format = NumberFormat.getNumberInstance(Locale.ENGLISH); boolean useTipLabels = true; AncestralSequenceTrait ancestralSequence = new AncestralSequenceTrait( sequencesMap, dataType); TreeTraitProvider[] treeTraitProviders = new TreeTraitProvider[] { ancestralSequence }; TreeUtils.newick(treeModel, // treeModel.getRoot(), // useTipLabels, // TreeUtils.BranchLengthType.LENGTHS_AS_TIME, // format, // null, // treeTraitProviders, // null, buffer); return buffer.toString(); } private static String dataTypeToString(PartitionData data) { String string = PartitionData.dataTypes[data.dataTypeIndex]; return string; } public static String demographicModelToString(PartitionData data) { String string = PartitionData.demographicModels[data.demographicModelIndex]; string += (" ( "); for (int i = 0; i < PartitionData.demographicParameterIndices[data.demographicModelIndex].length; i++) { string += data.demographicParameterValues[PartitionData.demographicParameterIndices[data.demographicModelIndex][i]]; string += " "; }// END: indices loop string += ")"; return string; } public static String clockRateModelToString(PartitionData data) { String string = PartitionData.clockModels[data.clockModelIndex]; string += (" ( "); for (int i = 0; i < PartitionData.clockParameterIndices[data.clockModelIndex].length; i++) { string += data.clockParameterValues[PartitionData.clockParameterIndices[data.clockModelIndex][i]]; string += " "; }// END: indices loop string += ")"; if(data.clockModelIndex == data.LRC_INDEX) { String space = (data.lrcParametersInRealSpace == true ? "real" : "log"); string += " ( " + "Parameters in " + space + " space )"; } return string; } public static String frequencyModelToString(PartitionData data) { String string = PartitionData.frequencyModels[data.frequencyModelIndex]; string += (" ( "); for (int i = 0; i < data.frequencyParameterIndices[data.frequencyModelIndex].length; i++) { string += data.frequencyParameterValues[data.frequencyParameterIndices[data.frequencyModelIndex][i]]; string += " "; }// END: indices loop string += ")"; return string; } public static String branchSubstitutionModelToString(PartitionData data) { String string = PartitionData.substitutionModels[data.substitutionModelIndex]; string += (" ( "); for (int i = 0; i < PartitionData.substitutionParameterIndices[data.substitutionModelIndex].length; i++) { string += data.substitutionParameterValues[PartitionData.substitutionParameterIndices[data.substitutionModelIndex][i]]; string += " "; }// END: indices loop string += ")"; return string; } public static String siteRateModelToString(PartitionData data) { String string = PartitionData.siteRateModels[data.siteRateModelIndex]; string += (" ( "); for (int i = 0; i < PartitionData.siteRateModelParameterIndices[data.siteRateModelIndex].length; i++) { string += data.siteRateModelParameterValues[PartitionData.siteRateModelParameterIndices[data.siteRateModelIndex][i]]; string += " "; }// END: indices loop string += ")"; return string; } public static String demographyModelToString(PartitionData data) { String string = PartitionData.demographicModels[data.demographicModelIndex]; string += (" ( "); for (int i = 0; i < PartitionData.demographicParameterIndices[data.demographicModelIndex].length; i++) { string += data.demographicParameterValues[PartitionData.demographicParameterIndices[data.demographicModelIndex][i]]; string += " "; }// END: indices loop string += ")"; return string; } }// END: class
maxbiostat/beast-mcmc
src/dr/app/bss/Utils.java
214,492
package shared; import java.io.Serializable; import util.linalg.DenseVector; import util.linalg.Vector; /** * The abstract class representing some instance. * @author Andrew Guillory [email protected] * @version 1.0 */ public class Instance implements Serializable, Copyable { /** * The label for this instance */ private Instance label; /** * The vector storing the data */ private Vector data; /** * The weight of the instance */ private double weight; /** * Make a new instance from the given data * @param data the data itself * @param label the label * @param weight the weight * @param dataSet the data set */ public Instance(Vector data, Instance label, double weight) { this.data = data; this.label = label; this.weight = weight; } /** * Make a new instance from the given data * @param data the data itself * @param label the label */ public Instance(Vector data, Instance label) { this.data = data; this.label = label; this.weight = 1.0; } /** * Make a new instance using the given vector * @param v the vector of data */ public Instance(Vector v) { data = v; weight = 1.0; } /** * Make a new instance * @param ds the data */ public Instance(double[] ds) { data = new DenseVector(ds); weight = 1.0; } /** * Make a new instance with the given value * @param val the value */ public Instance(double val) { data = new DenseVector(1); data.set(0, val); weight = 1.0; } /** * Make a new instance with the given value * @param val the value */ public Instance(int val) { data = new DenseVector(1); data.set(0, val); weight = 1.0; } /** * Make a new discrete input ouptu instance * @param i the input * @param o the output */ public Instance(int i, int o) { this(i); label = new Instance(o); } /** * Make a new double input discrete output * @param ds the input * @param i the output */ public Instance(double[] ds, int i) { this(ds); label = new Instance(i); } /** * Make a new input output instance * @param ds the data * @param b the label */ public Instance(double[] ds, boolean b) { this(ds); label = new Instance(b); } /** * Make a new instance with the given boolean value * @param val the value */ public Instance(boolean val) { this(val ? 1 : 0); } /** * Get the size of the instance * @return the size */ public int size() { return data.size(); } /** * Get the ith continuous value * @param i the value to get * @return the value */ public double getContinuous(int i) { return data.get(i); } /** * Get the ith discrete value * @param i the value to get * @return the value */ public int getDiscrete(int i) { return (int) Math.round(data.get(i)); } /** * Get the continuous value of this instance * @return the value */ public double getContinuous() { return getContinuous(0); } /** * Get the discrete value of this instance * @return the discrete value */ public int getDiscrete() { return getDiscrete(0); } /** * Get a plus or minus value * @return a plus or minus boolean value */ public double getPlusMinus() { return getDiscrete() == 1 ? 1 : -1; } /** * Get the boolean value * @return the boolen value */ public boolean getBoolean() { return getDiscrete() == 1; } /** * Get the label for this instance * @return the label */ public Instance getLabel() { return label; } /** * Get the data vector * @return the data */ public Vector getData() { return data; } /** * Get the weight of this instance * @return the weight */ public double getWeight() { return weight; } /** * Set the data vector * @param vector the data vector */ public void setData(Vector vector) { data = vector; } /** * Set the label for this instance * @param instance the label */ public void setLabel(Instance instance) { label = instance; } /** * Set the weight for the instance * @param d the new weight */ public void setWeight(double d) { weight = d; } /** * Make a new instance * @return the copy */ public Copyable copy() { if (label != null) { return new Instance((Vector) data.copy(), (Instance) label.copy(), weight); } else { return new Instance((Vector) data.copy(), null, weight); } } /** * @see java.lang.Object#toString() */ public String toString() { String result = data.toString(); if (label != null) { result += " : " + label.toString(); } if (weight != 1.0) { result += " x " + weight; } return result; } }
gijigae/ABAGAIL
src/shared/Instance.java
214,493
/* * Copyright The WildFly Authors * SPDX-License-Identifier: Apache-2.0 */ package org.jboss.as.xts; import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.ADD; import static org.jboss.as.xts.XTSSubsystemDefinition.DEFAULT_CONTEXT_PROPAGATION; import static org.jboss.as.xts.XTSSubsystemDefinition.ASYNC_REGISTRATION; import static org.jboss.as.xts.XTSSubsystemDefinition.ENVIRONMENT_URL; import static org.jboss.as.xts.XTSSubsystemDefinition.HOST_NAME; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Locale; import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLStreamException; import org.jboss.as.controller.PathAddress; import org.jboss.as.controller.operations.common.Util; import org.jboss.as.controller.parsing.ParseUtils; import org.jboss.as.controller.persistence.SubsystemMarshallingContext; import org.jboss.dmr.ModelNode; import org.jboss.staxmapper.XMLElementReader; import org.jboss.staxmapper.XMLElementWriter; import org.jboss.staxmapper.XMLExtendedStreamReader; import org.jboss.staxmapper.XMLExtendedStreamWriter; /** * @author <a href="mailto:[email protected]">Andrew Dinn</a> * @author <a href="mailto:[email protected]">Tomaz Cerar</a> */ class XTSSubsystemParser implements XMLStreamConstants, XMLElementReader<List<ModelNode>>, XMLElementWriter<SubsystemMarshallingContext> { /** * {@inheritDoc} */ @Override public void readElement(XMLExtendedStreamReader reader, List<ModelNode> list) throws XMLStreamException { // no attributes if (reader.getAttributeCount() > 0) { throw ParseUtils.unexpectedAttribute(reader, 0); } final ModelNode subsystem = Util.getEmptyOperation(ADD, PathAddress.pathAddress(XTSExtension.SUBSYSTEM_PATH) .toModelNode()); list.add(subsystem); final EnumSet<Element> encountered = EnumSet.noneOf(Element.class); final List<Element> expected = getExpectedElements(reader); while (reader.hasNext() && reader.nextTag() != END_ELEMENT) { final Element element = Element.forName(reader.getLocalName()); if (!expected.contains(element) || !encountered.add(element)) { throw ParseUtils.unexpectedElement(reader); } switch (element) { case HOST: { parseHostElement(reader, subsystem); break; } case XTS_ENVIRONMENT: { parseXTSEnvironmentElement(reader,subsystem); break; } case DEFAULT_CONTEXT_PROPAGATION: { parseDefaultContextPropagationElement(reader, subsystem); break; } case ASYNC_REGISTRATION: { parseAsyncRegistrationElement(reader, subsystem); break; } default: { throw ParseUtils.unexpectedElement(reader); } } } } /** * {@inheritDoc} XMLExtendedStreamReader reader */ @Override public void writeContent(XMLExtendedStreamWriter writer, SubsystemMarshallingContext context) throws XMLStreamException { context.startSubsystemElement(Namespace.CURRENT.getUriString(), false); ModelNode node = context.getModelNode(); if (node.hasDefined(HOST_NAME.getName())) { writer.writeStartElement(Element.HOST.getLocalName()); HOST_NAME.marshallAsAttribute(node, writer); writer.writeEndElement(); } if (node.hasDefined(ENVIRONMENT_URL.getName())) { writer.writeStartElement(Element.XTS_ENVIRONMENT.getLocalName()); ENVIRONMENT_URL.marshallAsAttribute(node, writer); writer.writeEndElement(); } if (node.hasDefined(DEFAULT_CONTEXT_PROPAGATION.getName())) { writer.writeStartElement(Element.DEFAULT_CONTEXT_PROPAGATION.getLocalName()); DEFAULT_CONTEXT_PROPAGATION.marshallAsAttribute(node, writer); writer.writeEndElement(); } if (node.hasDefined(ASYNC_REGISTRATION.getName())) { writer.writeStartElement(Element.ASYNC_REGISTRATION.getLocalName()); ASYNC_REGISTRATION.marshallAsAttribute(node, writer); writer.writeEndElement(); } writer.writeEndElement(); } private void parseHostElement(XMLExtendedStreamReader reader, ModelNode subsystem) throws XMLStreamException { final EnumSet<Attribute> required = EnumSet.of(Attribute.NAME); processAttributes(reader, (index, attribute) -> { required.remove(attribute); final String value = reader.getAttributeValue(index); switch (attribute) { case NAME: HOST_NAME.parseAndSetParameter(value, subsystem, reader); break; default: throw ParseUtils.unexpectedAttribute(reader, index); } }); // Handle elements ParseUtils.requireNoContent(reader); if (!required.isEmpty()) { throw ParseUtils.missingRequired(reader, required); } } /** * Handle the xts-environment element * * * @param reader * @param subsystem * @return ModelNode for the core-environment * @throws javax.xml.stream.XMLStreamException * */ private void parseXTSEnvironmentElement(XMLExtendedStreamReader reader, ModelNode subsystem) throws XMLStreamException { processAttributes(reader, (index, attribute) -> { final String value = reader.getAttributeValue(index); switch (attribute) { case URL: ENVIRONMENT_URL.parseAndSetParameter(value, subsystem, reader); break; default: throw ParseUtils.unexpectedAttribute(reader, index); } }); // Handle elements ParseUtils.requireNoContent(reader); } /** * Handle the enable-client-handler element. * * @param reader * @param subsystem * @throws XMLStreamException */ private void parseDefaultContextPropagationElement(XMLExtendedStreamReader reader, ModelNode subsystem) throws XMLStreamException { processAttributes(reader, (index, attribute) -> { final String value = reader.getAttributeValue(index); switch (attribute) { case ENABLED: if (value == null || (!value.toLowerCase(Locale.ENGLISH).equals("true") && !value.toLowerCase(Locale.ENGLISH).equals("false"))) { throw ParseUtils.invalidAttributeValue(reader, index); } DEFAULT_CONTEXT_PROPAGATION.parseAndSetParameter(value, subsystem, reader); break; default: throw ParseUtils.unexpectedAttribute(reader, index); } }); // Handle elements ParseUtils.requireNoContent(reader); } /** * Handle the async-registration element. */ private void parseAsyncRegistrationElement(XMLExtendedStreamReader reader, ModelNode subsystem) throws XMLStreamException { processAttributes(reader, (index, attribute) -> { final String value = reader.getAttributeValue(index); switch (attribute) { case ENABLED: ASYNC_REGISTRATION.parseAndSetParameter(value, subsystem, reader); break; default: throw ParseUtils.unexpectedAttribute(reader, index); } }); // Handle elements ParseUtils.requireNoContent(reader); } private List<Element> getExpectedElements(final XMLExtendedStreamReader reader) { final Namespace namespace = Namespace.forUri(reader.getNamespaceURI()); final List<Element> elements = new ArrayList<>(); if (Namespace.XTS_1_0.equals(namespace)) { elements.add(Element.XTS_ENVIRONMENT); } else if (Namespace.XTS_2_0.equals(namespace)) { elements.add(Element.XTS_ENVIRONMENT); elements.add(Element.HOST); elements.add(Element.DEFAULT_CONTEXT_PROPAGATION); } else if (Namespace.XTS_3_0.equals(namespace)) { elements.add(Element.XTS_ENVIRONMENT); elements.add(Element.HOST); elements.add(Element.DEFAULT_CONTEXT_PROPAGATION); elements.add(Element.ASYNC_REGISTRATION); } return elements; } /** * Functional interface to provide similar functionality as {@link BiConsumer} * but with cought exception {@link XMLStreamException} declared. */ @FunctionalInterface private interface AttributeProcessor<T, R> { void process(T t, R r) throws XMLStreamException; } /** * Iterating over all attributes got from the reader parameter. * * @param reader reading the parameters from * @param attributeProcessorCallback callback being processed for each attribute * @throws XMLStreamException troubles parsing xml */ private void processAttributes(final XMLExtendedStreamReader reader, AttributeProcessor<Integer, Attribute> attributeProcessorCallback) throws XMLStreamException { final int count = reader.getAttributeCount(); for (int i = 0; i < count; i++) { ParseUtils.requireNoNamespaceAttribute(reader, i); // final String value = reader.getAttributeValue(i); final Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i)); attributeProcessorCallback.process(i, attribute); } } }
bstansberry/wildfly
xts/src/main/java/org/jboss/as/xts/XTSSubsystemParser.java
214,494
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * Copyright (C) 2010-2024 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.ui.locks.table; import org.eclipse.jface.viewers.IStructuredContentProvider; import org.eclipse.jface.viewers.Viewer; import org.eclipse.swt.widgets.Composite; import org.eclipse.ui.IWorkbenchSite; import org.jkiss.code.NotNull; import org.jkiss.dbeaver.model.admin.locks.DBAServerLock; import org.jkiss.dbeaver.model.admin.locks.DBAServerLockItem; import org.jkiss.dbeaver.model.admin.locks.DBAServerLockManager; import org.jkiss.dbeaver.model.exec.DBCExecutionContext; import org.jkiss.dbeaver.model.exec.DBCExecutionPurpose; import org.jkiss.dbeaver.model.exec.DBCSession; import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor; import org.jkiss.dbeaver.model.runtime.load.DatabaseLoadService; import org.jkiss.dbeaver.ui.LoadingJob; import org.jkiss.dbeaver.ui.controls.TreeContentProvider; import org.jkiss.dbeaver.ui.navigator.itemlist.DatabaseObjectListControl; import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.List; import java.util.Map; /** * Session table */ public class LockTable extends DatabaseObjectListControl<DBAServerLock> { private DBAServerLockManager<DBAServerLock,DBAServerLockItem> lockManager; public LockTable(Composite parent, int style, IWorkbenchSite site, DBAServerLockManager<DBAServerLock,DBAServerLockItem> lockManager) { super(parent, style, site, CONTENT_PROVIDER); this.lockManager = lockManager; } public DBAServerLockManager<DBAServerLock,DBAServerLockItem> getLockManager() { return lockManager; } @NotNull @Override protected String getListConfigId(List<Class<?>> classList) { return "Locks/" + lockManager.getDataSource().getContainer().getDriver().getId(); } @Override protected int getDataLoadTimeout() { return 20000; } @Override protected LoadingJob<Collection<DBAServerLock>> createLoadService(boolean forUpdate) { return LoadingJob.createService( new LoadLocksService(), new ObjectsLoadVisualizer()); } public LoadingJob<Void> createAlterService(DBAServerLock lock, Map<String, Object> options) { return LoadingJob.createService( new KillSessionByLockService(lock, options), new ObjectActionVisualizer()); } public void init(DBAServerLockManager<DBAServerLock, DBAServerLockItem> lockManager) { this.lockManager = lockManager; } private static IStructuredContentProvider CONTENT_PROVIDER = new TreeContentProvider() { // Use Tree provider for the grouping elements support in ObjectListControl @Override public Object[] getElements(Object inputElement) { if (inputElement instanceof Collection) { return ((Collection<?>)inputElement).toArray(); } return null; } @Override public Object[] getChildren(Object parentElement) { return new Object[0]; } @Override public boolean hasChildren(Object element) { return false; } @Override public void dispose() { } @Override public void inputChanged(Viewer viewer, Object oldInput, Object newInput) { } }; private class LoadLocksService extends DatabaseLoadService<Collection<DBAServerLock>> { protected LoadLocksService() { super("Load locks", lockManager.getDataSource()); } @Override public Collection<DBAServerLock> evaluate(DBRProgressMonitor monitor) throws InvocationTargetException, InterruptedException { try { try (DBCExecutionContext isolatedContext = lockManager.getDataSource().getDefaultInstance().openIsolatedContext(monitor, "View Locks", null)) { try (DBCSession session = isolatedContext.openSession(monitor, DBCExecutionPurpose.UTIL, "Retrieve server locks")) { return lockManager.getLocks(session, null).values(); } } } catch (Throwable ex) { throw new InvocationTargetException(ex); } } } private class KillSessionByLockService extends DatabaseLoadService<Void> { private final DBAServerLock lock; private final Map<String, Object> options; protected KillSessionByLockService(DBAServerLock lock, Map<String, Object> options) { super("Kill session by lock", lockManager.getDataSource()); this.lock = lock; this.options = options; } @Override public Void evaluate(DBRProgressMonitor monitor) throws InvocationTargetException, InterruptedException { try { try (DBCExecutionContext isolatedContext = lockManager.getDataSource().getDefaultInstance().openIsolatedContext(monitor, "View locks", null)) { try (DBCSession session = isolatedContext.openSession(monitor, DBCExecutionPurpose.UTIL, "Kill server session by lock")) { lockManager.alterSession(session, this.lock, options); return null; } } } catch (Throwable ex) { throw new InvocationTargetException(ex); } } } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.ui.locks/src/org/jkiss/dbeaver/ext/ui/locks/table/LockTable.java
214,495
/* * Polynomial.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.math; //import org.apfloat.Apfloat; //import org.apfloat.ApfloatMath; import java.math.BigDecimal; import java.math.MathContext; import java.util.Arrays; /** * @author Marc A. Suchard */ public interface Polynomial extends Cloneable { public int getDegree(); public Polynomial multiply(Polynomial b); public Polynomial integrate(); public double evaluate(double x); public double logEvaluate(double x); public double logEvaluateHorner(double x); public void expand(double x); public Polynomial integrateWithLowerBound(double bound); public double getCoefficient(int n); public String getCoefficientString(int n); public void setCoefficient(int n, double x); public Polynomial getPolynomial(); public Polynomial copy(); public abstract class Abstract implements Polynomial { public abstract int getDegree(); public abstract Polynomial multiply(Polynomial b); public abstract Polynomial integrate(); public abstract double evaluate(double x); public abstract double getCoefficient(int n); public abstract void setCoefficient(int n, double x); public abstract Polynomial integrateWithLowerBound(double bound); public Polynomial getPolynomial() { return this; } public abstract double logEvaluate(double x); public abstract double logEvaluateHorner(double x); public abstract void expand(double x); public String toString() { StringBuffer bf = new StringBuffer(); for (int n = getDegree(); n >= 0; n--) { bf.append(getCoefficientString(n)); bf.append(X); bf.append(n); if (n > 0) bf.append(" + "); } return bf.toString(); } public abstract String getCoefficientString(int n); protected static final String FORMAT = "%3.2e"; private static final String X = " x^"; } public class LogDouble extends Abstract { public LogDouble(double[] coefficient) { this.logCoefficient = new double[coefficient.length]; this.positiveCoefficient = new boolean[coefficient.length]; for(int i=0; i<coefficient.length; i++) { if(coefficient[i] < 0) { this.logCoefficient[i] = Math.log(-coefficient[i]); this.positiveCoefficient[i] = false; } else { this.logCoefficient[i] = Math.log(coefficient[i]); this.positiveCoefficient[i] = true; } } } public double getLogCoefficient(int n) { return logCoefficient[n]; } public void expand(double x) { final int degree = getDegree(); for(int i=0; i<=degree; i++) logCoefficient[i] = x + logCoefficient[i]; } public String getCoefficientString(int n) { return String.format(FORMAT, getCoefficient(n)); } public LogDouble(double[] logCoefficient, boolean[] positiveCoefficient) { this.logCoefficient = logCoefficient; if (positiveCoefficient != null) this.positiveCoefficient = positiveCoefficient; else { this.positiveCoefficient = new boolean[logCoefficient.length]; Arrays.fill(this.positiveCoefficient,true); } } public LogDouble copy() { return new LogDouble(logCoefficient.clone(), positiveCoefficient.clone()); } public int getDegree() { return logCoefficient.length - 1; } public LogDouble multiply(Polynomial inB) { if (!(inB.getPolynomial() instanceof LogDouble)) throw new RuntimeException("yuck!"); LogDouble b = (LogDouble) inB.getPolynomial(); final int degreeA = getDegree(); final int degreeB = b.getDegree(); double[] newLogCoefficient = new double[degreeA + degreeB + 1]; boolean[] newPositiveCoefficient = new boolean[degreeA + degreeB + 1]; Arrays.fill(newLogCoefficient, java.lang.Double.NEGATIVE_INFINITY); Arrays.fill(newPositiveCoefficient, true); for (int n = 0; n <= degreeA; n++) { for (int m = 0; m <= degreeB; m++) { final double change = logCoefficient[n] + b.logCoefficient[m]; final int nm = n + m; final boolean positiveChange = !(positiveCoefficient[n] ^ b.positiveCoefficient[m]); if (newLogCoefficient[nm] == java.lang.Double.NEGATIVE_INFINITY) { newLogCoefficient[nm] = change; newPositiveCoefficient[nm] = positiveChange; } else { if (change != 0.0) { if (newPositiveCoefficient[nm] ^ positiveChange) { // Sign difference, must subtract if (newLogCoefficient[nm] > change) newLogCoefficient[nm] = LogTricks.logDiff(newLogCoefficient[nm], change); else { newLogCoefficient[nm] = LogTricks.logDiff(change, newLogCoefficient[nm]); newPositiveCoefficient[nm] = !newPositiveCoefficient[nm]; // Switch signs } } else { // Same signs, just add newLogCoefficient[nm] = LogTricks.logSum(newLogCoefficient[nm], change); } } } } } return new LogDouble(newLogCoefficient,newPositiveCoefficient); } public LogDouble integrate() { final int degree = getDegree(); double[] newLogCoefficient = new double[degree + 2]; boolean[] newPositiveCoefficient = new boolean[degree + 2]; for (int n=0; n<=degree; n++) { newLogCoefficient[n+1] = logCoefficient[n] - Math.log(n+1); newPositiveCoefficient[n+1] = positiveCoefficient[n]; } newLogCoefficient[0] = java.lang.Double.NEGATIVE_INFINITY; newPositiveCoefficient[0] = true; return new LogDouble(newLogCoefficient,newPositiveCoefficient); } public double evaluate(double x) { SignedLogDouble result = signedLogEvaluate(x); double value = Math.exp(result.value); if (!result.positive) value = -value; return value; } public double evaluateAsReal(double x) { double result = 0; double xn = 1; for (int n = 0; n <= getDegree(); n++) { result += xn * getCoefficient(n); xn *= x; } return result; } public double logEvaluate(double x) { if (x < 0) throw new RuntimeException("Negative arguments not yet implemented in Polynomial.LogDouble"); SignedLogDouble result = signedLogEvaluate(x); if (result.positive) return result.value; return java.lang.Double.NaN; // return -result.value; } public double logEvaluateHorner(double x) { if (x < 0) throw new RuntimeException("Negative arguments not yet implemented in Polynomial.LogDouble"); SignedLogDouble result = signedLogEvaluateHorners(x); if (result.positive) return result.value; return java.lang.Double.NaN; // return -result.value; } public SignedLogDouble signedLogEvaluateHorners(double x) { // Using Horner's Rule final double logX = Math.log(x); final int degree = getDegree(); double logResult = logCoefficient[degree]; boolean positive = positiveCoefficient[degree]; for(int n=degree-1; n>=0; n--) { logResult += logX; if (!(positiveCoefficient[n] ^ positive)) // Same sign logResult = LogTricks.logSum(logResult,logCoefficient[n]); else { // Different signs if (logResult > logCoefficient[n]) logResult = LogTricks.logDiff(logResult,logCoefficient[n]); else { logResult = LogTricks.logDiff(logCoefficient[n],logResult); positive = !positive; } } } return new SignedLogDouble(logResult,positive); } private SignedLogDouble signedLogEvaluate(double x) { final double logX = Math.log(x); final int degree = getDegree(); double logResult = logCoefficient[0]; boolean positive = positiveCoefficient[0]; for(int n=1; n<=degree; n++) { // logResult += logX; final double value = n*logX + logCoefficient[n]; if (!(positiveCoefficient[n] ^ positive)) // Same sign logResult = LogTricks.logSum(logResult,value); else { // Different signs if (logResult > value) logResult = LogTricks.logDiff(logResult,value); else { logResult = LogTricks.logDiff(value,logResult); positive = !positive; } } } return new SignedLogDouble(logResult,positive); } public double getCoefficient(int n) { double coef = Math.exp(logCoefficient[n]); if (!positiveCoefficient[n]) coef *= -1; return coef; } public void setCoefficient(int n, double x) { if (x < 0) { positiveCoefficient[n] = false; x = -x; } else positiveCoefficient[n] = true; logCoefficient[n] = Math.log(x); } public Polynomial integrateWithLowerBound(double bound) { LogDouble integrand = integrate(); SignedLogDouble signedLogDouble = integrand.signedLogEvaluate(bound); integrand.logCoefficient[0] = signedLogDouble.value; integrand.positiveCoefficient[0] = !signedLogDouble.positive; return integrand; } double[] logCoefficient; boolean[] positiveCoefficient; class SignedLogDouble { double value; boolean positive; SignedLogDouble(double value, boolean positive) { this.value = value; this.positive = positive; } } } public class BigDouble extends Abstract { private static MathContext precision = new MathContext(1000); public BigDouble(double[] doubleCoefficient) { this.coefficient = new BigDecimal[doubleCoefficient.length]; for(int i=0; i<doubleCoefficient.length; i++) coefficient[i] = new BigDecimal(doubleCoefficient[i]); } public BigDouble copy() { return new BigDouble(coefficient.clone()); } public String getCoefficientString(int n) { return coefficient[n].toString(); } public void expand(double x) { throw new RuntimeException("Not yet implement: Polynomial.BigDouble.expand()"); } public BigDouble(BigDecimal[] coefficient) { this.coefficient = coefficient; } public int getDegree() { return coefficient.length - 1; } public BigDouble multiply(Polynomial b) { if (!(b.getPolynomial() instanceof BigDouble)) throw new RuntimeException("Incompatiable polynomial types"); BigDouble bd = (BigDouble) b.getPolynomial(); BigDecimal[] newCoefficient = new BigDecimal[getDegree() + bd.getDegree()+1]; for(int i=0; i<newCoefficient.length; i++) newCoefficient[i] = new BigDecimal(0.0); for(int n=0; n<=getDegree(); n++) { for(int m=0; m<=bd.getDegree(); m++) newCoefficient[n+m] = newCoefficient[n+m].add(coefficient[n].multiply(bd.coefficient[m])); } return new BigDouble(newCoefficient); } public BigDouble integrate() { BigDecimal[] newCoefficient = new BigDecimal[getDegree()+2]; for(int n=0; n<=getDegree(); n++) { newCoefficient[n+1] = coefficient[n].divide(new BigDecimal(n+1),precision); } newCoefficient[0] = new BigDecimal(0.0); return new BigDouble(newCoefficient); } public double evaluate(double x) { return evaluateBigDecimal(new BigDecimal(x)).doubleValue(); } public double logEvaluate(double x) { return BigDecimalUtils.ln(evaluateBigDecimal(new BigDecimal(x)),10).doubleValue(); } public double logEvaluateHorner(double x) { return logEvaluate(x); } protected BigDecimal evaluateBigDecimal(BigDecimal x) { BigDecimal result = new BigDecimal(0.0); BigDecimal xn = new BigDecimal(1.0); for(int n=0; n<=getDegree(); n++) { result = result.add(coefficient[n].multiply(xn)); xn = xn.multiply(x); } return result; } public double getCoefficient(int n) { return coefficient[n].doubleValue(); } public void setCoefficient(int n, double x) { coefficient[n] = new BigDecimal(x); } public Polynomial integrateWithLowerBound(double bound) { BigDouble integrand = integrate(); final BigDecimal x0 = integrand.evaluateBigDecimal(new BigDecimal(bound)); integrand.coefficient[0] = x0.multiply(new BigDecimal(-1.0)); return integrand; } public void setCoefficient(int n, BigDecimal x) { coefficient[n] = x; } BigDecimal[] coefficient; } // public class APDouble extends Abstract { // // public String getCoefficientString(int n) { // return coefficient[n].toString(); // } // // public APDouble copy() { // return new APDouble(coefficient.clone()); // } // // public APDouble(double[] doubleCoefficient) { // this.coefficient = new Apfloat[doubleCoefficient.length]; // for(int i=0; i<doubleCoefficient.length; i++) // coefficient[i] = new Apfloat(doubleCoefficient[i]); // } // // public APDouble(Apfloat[] coefficient) { // this.coefficient = coefficient; // } // // public int getDegree() { // return coefficient.length - 1; // } // // public APDouble multiply(Polynomial b) { // if (!(b.getPolynomial() instanceof APDouble)) // throw new RuntimeException("Incompatiable polynomial types"); // APDouble bd = (APDouble) b.getPolynomial(); // Apfloat[] newCoefficient = new Apfloat[getDegree() + bd.getDegree()+1]; // for(int i=0; i<newCoefficient.length; i++) // newCoefficient[i] = new Apfloat(0.0); // for(int n=0; n<=getDegree(); n++) { // for(int m=0; m<=bd.getDegree(); m++) // newCoefficient[n+m] = newCoefficient[n+m].add(coefficient[n].multiply(bd.coefficient[m])); // } // return new APDouble(newCoefficient); // } // // public APDouble integrate() { // Apfloat[] newCoefficient = new Apfloat[getDegree()+2]; // for(int n=0; n<=getDegree(); n++) { // newCoefficient[n+1] = coefficient[n].divide(new Apfloat(n+1)); // } // newCoefficient[0] = new Apfloat(0.0); // return new APDouble(newCoefficient); // } // // public double evaluate(double x) { // return evaluateAPDouble(new Apfloat(x)).doubleValue(); // } // // public double logEvaluate(double x) { // Apfloat result = evaluateAPDouble(new Apfloat((x))); // if (result.doubleValue() == 0) // return java.lang.Double.NEGATIVE_INFINITY; // return ApfloatMath.log(result).doubleValue(); // } // // public double logEvaluateHorner(double x) { // return logEvaluateInLogSpace(x); // } // // private static double log(Apfloat x) { // double log = ApfloatMath.log(x).doubleValue(); // if (java.lang.Double.isInfinite(log)) // throw new RuntimeException("Still infinite"); // return log; // } // // private static boolean positive(Apfloat x) { // return x.signum() != -1; // } // // public double logEvaluateInLogSpace(double x) { // // Using Horner's Rule // final double logX = Math.log(x); // final int degree = getDegree(); // boolean positive = positive(coefficient[degree]); // double logResult; // if (positive) // logResult = log(coefficient[degree]); // else // logResult = log(coefficient[degree].negate()); // for(int n=degree-1; n>=0; n--) { // logResult += logX; // if (coefficient[n].signum() != 0) { // final boolean nextPositive = positive(coefficient[n]); // double logNextValue; // if (nextPositive) // logNextValue = log(coefficient[n]); // else // logNextValue = log(coefficient[n].negate()); // if(!(nextPositive ^ positive)) // Same sign // logResult = LogTricks.logSum(logResult,logNextValue); // else { // Different signs // if (logResult > logNextValue) // logResult = LogTricks.logDiff(logResult,logNextValue); // else { // logResult = LogTricks.logDiff(logNextValue,logResult); // positive = !positive; // } // } // } // } // if (!positive) // logResult = -logResult; // return logResult; // } // // protected Apfloat evaluateAPDouble(Apfloat x) { // Apfloat result = new Apfloat(0.0); // Apfloat xn = new Apfloat(1.0); // for(int n=0; n<=getDegree(); n++) { // result = result.add(coefficient[n].multiply(xn)); // xn = xn.multiply(x); // } // // TODO Rewrite using Horner's Rule // return result; // } // // public double getCoefficient(int n) { // return coefficient[n].doubleValue(); // } // // public void setCoefficient(int n, double x) { // coefficient[n] = new Apfloat(x); // } // // public Polynomial integrateWithLowerBound(double bound) { // APDouble integrand = integrate(); // final Apfloat x0 = integrand.evaluateAPDouble(new Apfloat(bound)); // integrand.coefficient[0] = x0.multiply(new Apfloat(-1.0)); // return integrand; // } // // public void setCoefficient(int n, Apfloat x) { // coefficient[n] = x; // } // // Apfloat[] coefficient; // } public class Double extends Abstract { public Double(double[] coefficient) { this.coefficient = coefficient; } public Double copy() { return new Double(coefficient.clone()); } public Double(Polynomial polynomial) { this.coefficient = new double[polynomial.getDegree() + 1]; for (int n = 0; n <= polynomial.getDegree(); n++) coefficient[n] = polynomial.getCoefficient(n); } public void expand(double x) { final int degree = getDegree(); for(int i=0; i<=degree; i++) coefficient[i] = x * coefficient[i]; } public String getCoefficientString(int n) { return String.format(FORMAT, getCoefficient(n)); } public int getDegree() { return coefficient.length - 1; } public double getCoefficient(int n) { return coefficient[n]; } public double logEvaluate(double x) { return Math.log(evaluate(x)); } public double logEvaluateQuick(double x, int n) { return Math.log(evaluateQuick(x,n)); } public double logEvaluateHorner(double x) { // Uses Horner's Rule in log scale final int degree = getDegree(); final double logX = Math.log(x); boolean positive = coefficient[degree] > 0; double logResult; if (positive) logResult = Math.log(coefficient[degree]); else logResult = Math.log(-coefficient[degree]); for(int n=degree-1; n>=0; n--) { logResult += logX; boolean positiveCoefficient = coefficient[n] > 0; double logCoefficient; if (positiveCoefficient) logCoefficient = Math.log(coefficient[n]); else logCoefficient = Math.log(-coefficient[n]); if (!(positiveCoefficient ^ positive)) // Same sign logResult = LogTricks.logSum(logResult,logCoefficient); else { // Different signs if (logResult > logCoefficient) logResult = LogTricks.logDiff(logResult,logCoefficient); else { logResult = LogTricks.logDiff(logCoefficient,logResult); positive = !positive; } } } if (!positive) return java.lang.Double.NaN; return logResult; } public Polynomial.Double multiply(Polynomial b) { double[] newCoefficient = new double[getDegree() + b.getDegree() + 1]; for (int n = 0; n <= getDegree(); n++) { for (int m = 0; m <= b.getDegree(); m++) { newCoefficient[n + m] += coefficient[n] * b.getCoefficient(m); } } return new Double(newCoefficient); } public Polynomial.Double integrate() { double[] newCoefficient = new double[getDegree() + 2]; for (int n = 0; n <= getDegree(); n++) { newCoefficient[n + 1] = coefficient[n] / (n + 1); } return new Double(newCoefficient); } public double evaluateHorners(double x) { // Uses Horner's Rule final int degree = getDegree(); double result = coefficient[degree]; for (int n=degree-1; n>=0; n--) result = result*x + coefficient[n]; return result; } public double evaluateQuick(double x, int n) { int m = getDegree(); double xm = Math.pow(x,m); double result = xm * coefficient[m]; for (int i=n-1; i>0; i--) { xm /= x; m--; result += xm * coefficient[m]; } return result; } public double evaluate(double x) { double result = 0.0; double xn = 1; for (int n = 0; n <= getDegree(); n++) { result += xn * coefficient[n]; xn *= x; } return result; } public Polynomial integrateWithLowerBound(double bound) { Double integrand = integrate(); // System.err.println("integrand = "+integrand); integrand.coefficient[0] = -integrand.evaluate(bound); return integrand; } public void setCoefficient(int n, double x) { coefficient[n] = x; } double[] coefficient; } public enum Type { DOUBLE, APDOUBLE, LOG_DOUBLE, BIG_DOUBLE//, RATIONAL } }
maxbiostat/beast-mcmc
src/dr/math/Polynomial.java
214,496
/* * MathUtils.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.math; import java.text.NumberFormat; import java.text.ParseException; import dr.util.NumberFormatter; /** * Handy utility functions which have some Mathematical relavance. * * @author Matthew Goode * @author Alexei Drummond * @author Gerton Lunter * @version $Id: MathUtils.java,v 1.13 2006/08/31 14:57:24 rambaut Exp $ */ public class MathUtils { private MathUtils() { } /** * A random number generator that is initialized with the clock when this * class is loaded into the JVM. Use this for all random numbers. * Note: This method or getting random numbers in not thread-safe. Since * MersenneTwisterFast is currently (as of 9/01) not synchronized using * this function may cause concurrency issues. Use the static get methods of the * MersenneTwisterFast class for access to a single instance of the class, that * has synchronization. */ private static final MersenneTwisterFast random = MersenneTwisterFast.DEFAULT_INSTANCE; // Chooses one category if a cumulative probability distribution is given public static int randomChoice(double[] cf) { double U = MathUtils.nextDouble(); int s; if (U <= cf[0]) { s = 0; } else { for (s = 1; s < cf.length; s++) { if (U <= cf[s] && U > cf[s - 1]) { break; } } } return s; } /** * @param pdf array of unnormalized probabilities * @return a sample according to an unnormalized probability distribution */ public static int randomChoicePDF(double[] pdf) { double U = MathUtils.nextDouble() * getTotal(pdf); for (int i = 0; i < pdf.length; i++) { U -= pdf[i]; if (U < 0.0) { return i; } } for (int i = 0; i < pdf.length; i++) { System.out.println(i + "\t" + pdf[i]); } throw new Error("randomChoicePDF falls through -- negative, infinite or NaN components in input " + "distribution, or all zeroes?"); } /** * @param logpdf array of unnormalised log probabilities * @return a sample according to an unnormalised probability distribution * <p/> * Use this if probabilities are rounding to zero when converted to real space */ public static int randomChoiceLogPDF(double[] logpdf) { double scalingFactor = Double.NEGATIVE_INFINITY; for (double aLogpdf : logpdf) { if (aLogpdf > scalingFactor) { scalingFactor = aLogpdf; } } if (scalingFactor == Double.NEGATIVE_INFINITY) { throw new Error("randomChoiceLogPDF falls through -- all -INF components in input distribution"); } for (int j = 0; j < logpdf.length; j++) { logpdf[j] = logpdf[j] - scalingFactor; } double[] pdf = new double[logpdf.length]; for (int j = 0; j < logpdf.length; j++) { pdf[j] = Math.exp(logpdf[j]); } return randomChoicePDF(pdf); } /** * @param array to normalize * @return a new double array where all the values sum to 1. * Relative ratios are preserved. */ public static double[] getNormalized(double[] array) { double[] newArray = new double[array.length]; double total = getTotal(array); for (int i = 0; i < array.length; i++) { newArray[i] = array[i] / total; } return newArray; } /** * @param array entries to be summed * @param start start position * @param end the index of the element after the last one to be included * @return the total of a the values in a range of an array */ public static double getTotal(double[] array, int start, int end) { double total = 0.0; for (int i = start; i < end; i++) { total += array[i]; } return total; } /** * @param array to sum over * @return the total of the values in an array */ public static double getTotal(double[] array) { return getTotal(array, 0, array.length); } // ===================== (Synchronized) Static access methods to the private random instance =========== /** * Access a default instance of this class, access is synchronized */ public static long getSeed() { synchronized (random) { return random.getSeed(); } } /** * Access a default instance of this class, access is synchronized */ public static void setSeed(long seed) { synchronized (random) { random.setSeed(seed); } } /** * Access a default instance of this class, access is synchronized */ public static byte nextByte() { synchronized (random) { return random.nextByte(); } } /** * Access a default instance of this class, access is synchronized */ public static boolean nextBoolean() { synchronized (random) { return random.nextBoolean(); } } /** * Access a default instance of this class, access is synchronized */ public static void nextBytes(byte[] bs) { synchronized (random) { random.nextBytes(bs); } } /** * Access a default instance of this class, access is synchronized */ public static char nextChar() { synchronized (random) { return random.nextChar(); } } /** * Access a default instance of this class, access is synchronized */ public static double nextGaussian() { synchronized (random) { return random.nextGaussian(); } } //Mean = alpha / lambda //Variance = alpha / (lambda*lambda) public static double nextGamma(double alpha, double lambda) { synchronized (random) { return random.nextGamma(alpha, lambda); } } //Mean = alpha/(alpha+beta) //Variance = (alpha*beta)/(alpha+beta)^2*(alpha+beta+1) public static double nextBeta(double alpha, double beta) { double x = nextGamma(alpha, 1); double y = nextGamma(beta, 1); return x / (x + y); } /** * Access a default instance of this class, access is synchronized * * @return a pseudo random double precision floating point number in [01) */ public static double nextDouble() { synchronized (random) { return random.nextDouble(); } } /** * @return log of random variable in [0,1] */ public static double randomLogDouble() { return Math.log(nextDouble()); } /** * Access a default instance of this class, access is synchronized */ public static double nextExponential(double lambda) { synchronized (random) { return -1.0 * Math.log(1 - random.nextDouble()) / lambda; } } /** * Access a default instance of this class, access is synchronized */ public static double nextInverseGaussian(double mu, double lambda) { synchronized (random) { /* CODE TAKEN FROM WIKIPEDIA. TESTING DONE WITH RESULTS GENERATED IN R AND LOOK COMPARABLE */ double v = random.nextGaussian(); // sample from a normal distribution with a mean of 0 and 1 standard deviation double y = v * v; double x = mu + (mu * mu * y) / (2 * lambda) - (mu / (2 * lambda)) * Math.sqrt(4 * mu * lambda * y + mu * mu * y * y); double test = MathUtils.nextDouble(); // sample from a uniform distribution between 0 and 1 if (test <= (mu) / (mu + x)) { return x; } else { return (mu * mu) / x; } } } /** * Access a default instance of this class, access is synchronized */ public static float nextFloat() { synchronized (random) { return random.nextFloat(); } } /** * Access a default instance of this class, access is synchronized */ public static long nextLong() { synchronized (random) { return random.nextLong(); } } /** * Access a default instance of this class, access is synchronized */ public static short nextShort() { synchronized (random) { return random.nextShort(); } } /** * Access a default instance of this class, access is synchronized */ public static int nextInt() { synchronized (random) { return random.nextInt(); } } /** * Access a default instance of this class, access is synchronized */ public static int nextInt(int n) { synchronized (random) { return random.nextInt(n); } } /** * @param low * @param high * @return uniform between low and high */ public static double uniform(double low, double high) { return low + nextDouble() * (high - low); } /** * Shuffles an array. */ public static void shuffle(int[] array) { synchronized (random) { random.shuffle(array); } } /** * Shuffles an array. Shuffles numberOfShuffles times */ public static void shuffle(int[] array, int numberOfShuffles) { synchronized (random) { random.shuffle(array, numberOfShuffles); } } /** * Returns an array of shuffled indices of length l. * * @param l length of the array required. */ public static int[] shuffled(int l) { synchronized (random) { return random.shuffled(l); } } public static int[] sampleIndicesWithReplacement(int length) { synchronized (random) { int[] result = new int[length]; for (int i = 0; i < length; i++) result[i] = random.nextInt(length); return result; } } /** * Permutes an array. */ public static void permute(int[] array) { synchronized (random) { random.permute(array); } } /** * Returns a uniform random permutation of 0,...,l-1 * * @param l length of the array required. */ public static int[] permuted(int l) { synchronized (random) { return random.permuted(l); } } public static double logHyperSphereVolume(int dimension, double radius) { return dimension * (0.5723649429247001 + Math.log(radius)) + -GammaFunction.lnGamma(dimension / 2.0 + 1.0); } /** * Returns sqrt(a^2 + b^2) without under/overflow. */ public static double hypot(double a, double b) { double r; if (Math.abs(a) > Math.abs(b)) { r = b / a; r = Math.abs(a) * Math.sqrt(1 + r * r); } else if (b != 0) { r = a / b; r = Math.abs(b) * Math.sqrt(1 + r * r); } else { r = 0.0; } return r; } /** * return double *.???? * * @param value * @param sf * @return */ public static double round(double value, int sf) { NumberFormatter formatter = new NumberFormatter(sf); try { return NumberFormat.getInstance().parse(formatter.format(value)).doubleValue(); } catch (ParseException e) { return value; } } public static int[] getRandomState() { synchronized (random) { return random.getRandomState(); } } public static void setRandomState(int[] rngState) { synchronized (random) { random.setRandomState(rngState); } } public static boolean isClose(double[] x, double[] y, double tolerance) { if (x.length != y.length) return false; for (int i = 0, dim = x.length; i < dim; ++i) { if (Math.abs(x[i] - y[i]) > tolerance) return false; } return true; } }
acorg/beast-mcmc
src/dr/math/MathUtils.java
214,497
/* * * Copyright (C) Andrew Smith 2013 * * Usage: java MCast [-v] code toaddr port replyport wait * * If any are missing or blank they use the defaults: * * -v means report how long the last reply took * * code = 'FTW' * toaddr = '224.0.0.75' * port = '4028' * replyport = '4027' * wait = '1000' * */ import java.net.*; import java.io.*; import java.util.*; class MCast implements Runnable { static private final String MCAST_CODE = "FTW"; static private final String MCAST_ADDR = "224.0.0.75"; static private final int MCAST_PORT = 4028; static private final int MCAST_REPORT = 4027; static private final int MCAST_WAIT4 = 1000; static private String code = MCAST_CODE; static private String addr = MCAST_ADDR; static private int port = MCAST_PORT; static private int report = MCAST_REPORT; static private int wait4 = MCAST_WAIT4; private InetAddress mcast_addr = null; static private final Integer lock = new Integer(666); static private boolean ready = false; static private Thread listen = null; static public boolean verbose = false; static private Date start = null; static private Date last = null; static boolean got_last = false; static public void usAge() { System.err.println("usAge: java MCast [-v] [code [toaddr [port [replyport [wait]]]]]"); System.err.println(" -v=report elapsed ms to last reply"); System.err.println(" Anything below missing or blank will use it's default"); System.err.println(" code=X in sgminer-X-Port default="+MCAST_CODE); System.err.println(" toaddr=multicast address default="+MCAST_ADDR); System.err.println(" port=multicast port default="+MCAST_PORT); System.err.println(" replyport=local post to listen for replies default="+MCAST_REPORT); System.err.println(" wait=how long to wait for replies default="+MCAST_WAIT4+"ms"); System.exit(1); } private int port(String _port, String name) { int tmp = 0; try { tmp = Integer.parseInt(_port); } catch (NumberFormatException nfe) { System.err.println("Invalid " + name + " - must be a number between 1 and 65535"); usAge(); System.exit(1); } if (tmp < 1 || tmp > 65535) { System.err.println("Invalid " + name + " - must be a number between 1 and 65535"); usAge(); System.exit(1); } return tmp; } public void set_code(String _code) { if (_code.length() > 0) code = _code; } public void set_addr(String _addr) { if (_addr.length() > 0) { addr = _addr; try { mcast_addr = InetAddress.getByName(addr); } catch (Exception e) { System.err.println("ERR: Invalid multicast address"); usAge(); System.exit(1); } } } public void set_port(String _port) { if (_port.length() > 0) port = port(_port, "port"); } public void set_report(String _report) { if (_report.length() > 0) report = port(_report, "reply port"); } public void set_wait(String _wait4) { if (_wait4.length() > 0) { try { wait4 = Integer.parseInt(_wait4); } catch (NumberFormatException nfe) { System.err.println("Invalid wait - must be a number between 0ms and 60000ms"); usAge(); System.exit(1); } if (wait4 < 0 || wait4 > 60000) { System.err.println("Invalid wait - must be a number between 0ms and 60000ms"); usAge(); System.exit(1); } } } public void run() // listen { byte[] message = new byte[1024]; DatagramSocket socket = null; DatagramPacket packet = null; try { socket = new DatagramSocket(report); packet = new DatagramPacket(message, message.length); synchronized (lock) { ready = true; } while (true) { socket.receive(packet); synchronized (lock) { last = new Date(); } int off = packet.getOffset(); int len = packet.getLength(); System.out.println("Got: '" + new String(message, off, len) + "' from" + packet.getSocketAddress()); } } catch (Exception e) { socket.close(); } } public void sendMCast() { try { String message = new String("sgminer-" + code + "-" + report); MulticastSocket socket = null; DatagramPacket packet = null; socket = new MulticastSocket(); packet = new DatagramPacket(message.getBytes(), message.length(), mcast_addr, port); System.out.println("About to send " + message + " to " + mcast_addr + ":" + port); start = new Date(); socket.send(packet); socket.close(); } catch (Exception e) { e.printStackTrace(); } } public void init() { MCast lis = new MCast(); listen = new Thread(lis); listen.start(); while (true) { synchronized (lock) { if (ready) break; } try { Thread.sleep(100); } catch (Exception sl1) { } } try { Thread.sleep(500); } catch (Exception sl2) { } sendMCast(); try { Thread.sleep(wait4); } catch (Exception sl3) { } listen.interrupt(); if (verbose) { try { Thread.sleep(100); } catch (Exception sl4) { } synchronized (lock) { if (last == null) System.out.println("No replies received"); else { long diff = last.getTime() - start.getTime(); System.out.println("Last reply took " + diff + "ms"); } } } System.exit(0); } public MCast() { } public static void main(String[] params) throws Exception { int p = 0; MCast mcast = new MCast(); mcast.set_addr(MCAST_ADDR); if (params.length > p) { if (params[p].equals("-?") || params[p].equalsIgnoreCase("-h") || params[p].equalsIgnoreCase("-help") || params[p].equalsIgnoreCase("--help")) MCast.usAge(); else { if (params[p].equals("-v")) { mcast.verbose = true; p++; } if (params.length > p) { mcast.set_code(params[p++]); if (params.length > p) { mcast.set_addr(params[p++]); if (params.length > p) { mcast.set_port(params[p++]); if (params.length > p) { mcast.set_report(params[p++]); if (params.length > p) mcast.set_wait(params[p]); } } } } } } mcast.init(); } }
tpruvot/sph-sgminer-tribus
MCast.java
214,498
/****************************************************************************** * Top contributors (to current version): * Mudathir Mohamed, Andrew Reynolds, Aina Niemetz * * This file is part of the cvc5 project. * * Copyright (c) 2009-2022 by the authors listed in the file AUTHORS * in the top-level source directory and their institutional affiliations. * All rights reserved. See the file COPYING in the top-level source * directory for licensing information. * **************************************************************************** * * The cvc5 java API. */ package io.github.cvc5; /** * A cvc5 datatype selector. */ public class DatatypeSelector extends AbstractPointer { // region construction and destruction DatatypeSelector(Solver solver, long pointer) { super(solver, pointer); } protected native void deletePointer(long pointer); public long getPointer() { return pointer; } // endregion /** @return The Name of this Datatype selector. */ public String getName() { return getName(pointer); } private native String getName(long pointer); /** * Get the selector term of this datatype selector. * * Selector terms are a class of function-like terms of selector * sort (Sort::isDatatypeSelector), and should be used as the first * argument of Terms of kind APPLY_SELECTOR. * * @return The Selector term. */ public Term getTerm() { long termPointer = getTerm(pointer); return new Term(solver, termPointer); } private native long getTerm(long pointer); /** * Get the updater term of this datatype selector. * * Similar to selectors, updater terms are a class of function-like terms of * updater Sort (Sort::isDatatypeUpdater), and should be used as the first * argument of Terms of kind APPLY_UPDATER. * * @return The Updater term. */ public Term getUpdaterTerm() { long termPointer = getUpdaterTerm(pointer); return new Term(solver, termPointer); } private native long getUpdaterTerm(long pointer); /** @return The Codomain sort of this selector. */ public Sort getCodomainSort() { long sortPointer = getCodomainSort(pointer); return new Sort(solver, sortPointer); } private native long getCodomainSort(long pointer); /** * @return True If this DatatypeSelector is a null object. */ public boolean isNull() { return isNull(pointer); } private native boolean isNull(long pointer); /** * @return A String representation of this datatype selector. */ protected native String toString(long pointer); }
HanielB/cvc5
src/api/java/io/github/cvc5/DatatypeSelector.java
214,499
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe; import java.io.*; import java.net.URI; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; import cc.mallet.types.Instance; import cc.mallet.util.CharSequenceLexer; /** * Pipe that can read from various kinds of text sources * (either URI, File, or Reader) into a CharSequence * * @version $Id: Input2CharSequence.java,v 1.1 2007/10/22 21:37:39 mccallum Exp $ */ public class Input2CharSequence extends Pipe implements Serializable { String encoding = null; public Input2CharSequence () {} public Input2CharSequence( String encoding ) { this.encoding = encoding; } @Override public Instance pipe (Instance carrier) { try { if (carrier.getData() instanceof URI) carrier.setData(pipe ((URI)carrier.getData())); else if (carrier.getData() instanceof File) carrier.setData(pipe ((File)carrier.getData())); else if (carrier.getData() instanceof Reader) carrier.setData(pipe ((Reader)carrier.getData())); else if (carrier.getData() instanceof CharSequence) ; // No conversion necessary else throw new IllegalArgumentException ("Does not handle class "+carrier.getData().getClass()); } catch (java.io.IOException e) { throw new IllegalArgumentException ("IOException " + e); } // System.out.println(carrier.getData().toString()); return carrier; } public CharSequence pipe (URI uri) throws java.io.FileNotFoundException, java.io.IOException { if (! uri.getScheme().equals("file")) throw new UnsupportedOperationException ("Only file: scheme implemented."); return pipe (new File (uri.getPath())); } public CharSequence pipe (File file) throws java.io.FileNotFoundException, java.io.IOException { BufferedReader br = null; if (encoding == null) { br = Files.newBufferedReader (file.toPath(), Charset.defaultCharset()); } else { br = new BufferedReader( new InputStreamReader(new FileInputStream(file), encoding) ); } CharSequence cs = pipe(br); br.close(); return cs; } public CharSequence pipe (Reader reader) throws java.io.IOException { final int BUFSIZE = 2048; char[] buf = new char[BUFSIZE]; int count; StringBuilder sb = new StringBuilder (BUFSIZE); do { count = reader.read (buf, 0, BUFSIZE); if (count == -1) break; //System.out.println ("count="+count); sb.append (buf, 0, count); } while (count == BUFSIZE); return sb; } public CharSequence pipe (CharSequence cs) { return cs; } // Serialization private static final long serialVersionUID = 2; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); if (encoding == null) { out.writeObject("null"); } else { out.writeObject(encoding); } } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); this.encoding = (String) in.readObject(); if (encoding.equals("null")) { encoding = null; } } }
lebiathan/Mallet
src/cc/mallet/pipe/Input2CharSequence.java
214,501
/* * MathUtils.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.math; import dr.util.NumberFormatter; import java.text.NumberFormat; import java.text.ParseException; /** * Handy utility functions which have some Mathematical relavance. * * @author Matthew Goode * @author Alexei Drummond * @author Gerton Lunter * @version $Id: MathUtils.java,v 1.13 2006/08/31 14:57:24 rambaut Exp $ */ public class MathUtils { private MathUtils() { } /** * A random number generator that is initialized with the clock when this * class is loaded into the JVM. Use this for all random numbers. * Note: This method or getting random numbers in not thread-safe. Since * MersenneTwisterFast is currently (as of 9/01) not synchronized using * this function may cause concurrency issues. Use the static get methods of the * MersenneTwisterFast class for access to a single instance of the class, that * has synchronization. */ private static final MersenneTwisterFast random = MersenneTwisterFast.DEFAULT_INSTANCE; // Chooses one category if a cumulative probability distribution is given public static int randomChoice(double[] cf) { double U = MathUtils.nextDouble(); int s; if (U <= cf[0]) { s = 0; } else { for (s = 1; s < cf.length; s++) { if (U <= cf[s] && U > cf[s - 1]) { break; } } } return s; } /** * @param pdf array of unnormalized probabilities * @return a sample according to an unnormalized probability distribution */ public static int randomChoicePDF(double[] pdf) { double U = MathUtils.nextDouble() * getTotal(pdf); for (int i = 0; i < pdf.length; i++) { U -= pdf[i]; if (U < 0.0) { return i; } } // fallen through so throw error... StringBuilder sb = new StringBuilder("pdf=["); sb.append(pdf[0]); for (int i = 1; i < pdf.length; i++) { sb.append(","); sb.append(pdf[i]); } sb.append("]"); throw new Error("randomChoicePDF falls through -- negative, infinite or NaN components in input " + "distribution, or all zeroes? " + sb.toString()); } /** * @param logpdf array of unnormalised log probabilities * @return a sample according to an unnormalised probability distribution * <p/> * Use this if probabilities are rounding to zero when converted to real space */ public static int randomChoiceLogPDF(double[] logpdf) { double scalingFactor = Double.NEGATIVE_INFINITY; for (double aLogpdf : logpdf) { if (aLogpdf > scalingFactor) { scalingFactor = aLogpdf; } } if (scalingFactor == Double.NEGATIVE_INFINITY) { throw new Error("randomChoiceLogPDF falls through -- all -INF components in input distribution"); } for (int j = 0; j < logpdf.length; j++) { logpdf[j] = logpdf[j] - scalingFactor; } double[] pdf = new double[logpdf.length]; for (int j = 0; j < logpdf.length; j++) { pdf[j] = Math.exp(logpdf[j]); } return randomChoicePDF(pdf); } /** * @param array to normalize * @return a new double array where all the values sum to 1. * Relative ratios are preserved. */ public static double[] getNormalized(double[] array) { double[] newArray = new double[array.length]; double total = getTotal(array); for (int i = 0; i < array.length; i++) { newArray[i] = array[i] / total; } return newArray; } /** * @param array entries to be summed * @param start start position * @param end the index of the element after the last one to be included * @return the total of a the values in a range of an array */ public static double getTotal(double[] array, int start, int end) { double total = 0.0; for (int i = start; i < end; i++) { total += array[i]; } return total; } /** * @param array to sum over * @return the total of the values in an array */ public static double getTotal(double[] array) { return getTotal(array, 0, array.length); } // ===================== (Synchronized) Static access methods to the private random instance =========== /** * Access a default instance of this class, access is synchronized */ public static long getSeed() { synchronized (random) { return random.getSeed(); } } /** * Access a default instance of this class, access is synchronized */ public static void setSeed(long seed) { synchronized (random) { random.setSeed(seed); } } /** * Access a default instance of this class, access is synchronized */ public static byte nextByte() { synchronized (random) { return random.nextByte(); } } /** * Access a default instance of this class, access is synchronized */ public static boolean nextBoolean() { synchronized (random) { return random.nextBoolean(); } } /** * Access a default instance of this class, access is synchronized */ public static void nextBytes(byte[] bs) { synchronized (random) { random.nextBytes(bs); } } /** * Access a default instance of this class, access is synchronized */ public static char nextChar() { synchronized (random) { return random.nextChar(); } } /** * Access a default instance of this class, access is synchronized */ public static double nextGaussian() { synchronized (random) { return random.nextGaussian(); } } //Mean = alpha / lambda //Variance = alpha / (lambda*lambda) public static double nextGamma(double alpha, double lambda) { synchronized (random) { return random.nextGamma(alpha, lambda); } } //Mean = alpha/(alpha+beta) //Variance = (alpha*beta)/(alpha+beta)^2*(alpha+beta+1) public static double nextBeta(double alpha, double beta) { double x = nextGamma(alpha, 1); double y = nextGamma(beta, 1); return x / (x + y); } /** * Access a default instance of this class, access is synchronized * * @return a pseudo random double precision floating point number in [01) */ public static double nextDouble() { synchronized (random) { return random.nextDouble(); } } /** * @return log of random variable in [0,1] */ public static double randomLogDouble() { return Math.log(nextDouble()); } /** * Access a default instance of this class, access is synchronized */ public static double nextExponential(double lambda) { synchronized (random) { return -1.0 * Math.log(1 - random.nextDouble()) / lambda; } } /** * Access a default instance of this class, access is synchronized */ public static double nextInverseGaussian(double mu, double lambda) { synchronized (random) { /* CODE TAKEN FROM WIKIPEDIA. TESTING DONE WITH RESULTS GENERATED IN R AND LOOK COMPARABLE */ double v = random.nextGaussian(); // sample from a normal distribution with a mean of 0 and 1 standard deviation double y = v * v; double x = mu + (mu * mu * y) / (2 * lambda) - (mu / (2 * lambda)) * Math.sqrt(4 * mu * lambda * y + mu * mu * y * y); double test = MathUtils.nextDouble(); // sample from a uniform distribution between 0 and 1 if (test <= (mu) / (mu + x)) { return x; } else { return (mu * mu) / x; } } } /** * Access a default instance of this class, access is synchronized */ public static float nextFloat() { synchronized (random) { return random.nextFloat(); } } /** * Access a default instance of this class, access is synchronized */ public static long nextLong() { synchronized (random) { return random.nextLong(); } } /** * Access a default instance of this class, access is synchronized */ public static short nextShort() { synchronized (random) { return random.nextShort(); } } /** * Access a default instance of this class, access is synchronized */ public static int nextInt() { synchronized (random) { return random.nextInt(); } } /** * Access a default instance of this class, access is synchronized */ public static int nextInt(int n) { synchronized (random) { return random.nextInt(n); } } /** * @param low * @param high * @return uniform between low and high */ public static double uniform(double low, double high) { return low + nextDouble() * (high - low); } /** * Shuffles an array. */ public static void shuffle(int[] array) { synchronized (random) { random.shuffle(array); } } /** * Shuffles an array. Shuffles numberOfShuffles times */ public static void shuffle(int[] array, int numberOfShuffles) { synchronized (random) { random.shuffle(array, numberOfShuffles); } } /** * Returns an array of shuffled indices of length l. * * @param l length of the array required. */ public static int[] shuffled(int l) { synchronized (random) { return random.shuffled(l); } } public static int[] sampleIndicesWithReplacement(int length) { synchronized (random) { int[] result = new int[length]; for (int i = 0; i < length; i++) result[i] = random.nextInt(length); return result; } } /** * Permutes an array. */ public static void permute(int[] array) { synchronized (random) { random.permute(array); } } /** * Returns a uniform random permutation of 0,...,l-1 * * @param l length of the array required. */ public static int[] permuted(int l) { synchronized (random) { return random.permuted(l); } } public static double logHyperSphereVolume(int dimension, double radius) { return dimension * (0.5723649429247001 + Math.log(radius)) + -GammaFunction.lnGamma(dimension / 2.0 + 1.0); } /** * Returns sqrt(a^2 + b^2) without under/overflow. */ public static double hypot(double a, double b) { double r; if (Math.abs(a) > Math.abs(b)) { r = b / a; r = Math.abs(a) * Math.sqrt(1 + r * r); } else if (b != 0) { r = a / b; r = Math.abs(b) * Math.sqrt(1 + r * r); } else { r = 0.0; } return r; } /** * return double *.???? * * @param value * @param sf * @return */ public static double round(double value, int sf) { NumberFormatter formatter = new NumberFormatter(sf); try { return NumberFormat.getInstance().parse(formatter.format(value)).doubleValue(); } catch (ParseException e) { return value; } } public static int[] getRandomState() { synchronized (random) { return random.getRandomState(); } } public static void setRandomState(int[] rngState) { synchronized (random) { random.setRandomState(rngState); } } public static boolean isClose(double[] x, double[] y, double tolerance) { if (x.length != y.length) return false; for (int i = 0, dim = x.length; i < dim; ++i) { if (Double.isNaN(x[i]) || Double.isNaN(y[i])) return false; if (Math.abs(x[i] - y[i]) > tolerance) return false; } return true; } public static boolean isClose(double x, double y, double tolerance) { return Math.abs(x - y) < tolerance; } public static boolean isRelativelyClose(double[] x, double[] y, double relativeTolerance) { if (x.length != y.length) return false; for (int i = 0, dim = x.length; i < dim; ++i) { if (!isRelativelyClose(x[i], y[i], relativeTolerance)) { return false; } } return true; } public static boolean isRelativelyClose(double x, double y, double relativeTolerance) { double relativeDifference = 2 * (x - y) / (x + y); if (Math.abs(relativeDifference) > relativeTolerance) { return false; } return true; } public static double maximum(double[] array) { double max = array[0]; for (double x : array) { if (x > max) { max = x; } } return max; } }
jsigao/beast-mcmc
src/dr/math/MathUtils.java
214,502
package edu.drexel.psal.anonymouth.engine; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import edu.drexel.psal.anonymouth.utils.Pair; import edu.drexel.psal.jstylo.generics.Logger; /** * Extracts targets * @author Andrew W.E. McDonald * */ public class TargetExtractor { private final String NAME = "( "+this.getClass().getSimpleName()+" ) - "; private static ArrayList<Integer> previousInitialization; private int numMeans; private int numAuthors; private int additionalPartitions = 0; private double min; private double max; private double spread; private int numPartitions; // same as number of clusters (1 partition == 1 cluster) private int originalNumMeans; private boolean isFinished; private double authorAvg; private double authorStdDev; private double authorMin; private double authorMax; private boolean targetSet=false; private double presentValue; private double targetDev; ArrayList<Cluster> thisFeaturesClusters; private ArrayList<String> trainTitlesList; private Pair[] thePairs; private boolean maxCentroidsFound = false; private String featName; /** * Constructor * @param numAuthors the number of authors (not including the user). This defines the starting number of centroids * @param attrib the Attribute to extract a target for */ public TargetExtractor(int numAuthors, Attribute attrib){//, boolean usePreviousInitialization){ this.featName = attrib.getConcatGenNameAndStrInBraces(); //Logger.logln(NAME+"In TargetExtractor extracting targets for "+featName); this.trainTitlesList = DocumentMagician.getTrainTitlesList(); this.numAuthors = numAuthors; this.numMeans = numAuthors-2;// todo maybe remove this. (xxx xxx xxx try n-2, n-3, n-1, n, etc xxx xxx xxx) double[] thisFeature = attrib.getTrainVals(); int lenThisFeature = thisFeature.length; int i=0; this.thePairs = new Pair[lenThisFeature]; for(i=0;i<lenThisFeature;i++){ thePairs[i] = new Pair(trainTitlesList.get(i),thisFeature[i]); } this.min = attrib.getTrainMin(); this.max = attrib.getTrainMax(); this.spread = max-min; isFinished = false; this.authorAvg = attrib.getAuthorAvg(); this.authorStdDev = attrib.getAuthorStdDev(); this.presentValue = attrib.getToModifyValue(); } /** * Empty constructor for testing purposes */ public TargetExtractor(){ thisFeaturesClusters = new ArrayList<Cluster>(); } /** * Implementation of k-means++ initialization (seeding) algorithm for k-means */ public void kPlusPlusPrep(){//double[] thisFeature){ // parameter just for testing previousInitialization = new ArrayList<Integer>(); thisFeaturesClusters.clear(); int numFeatures = thePairs.length; MersenneTwisterFast mtfGen = new MersenneTwisterFast(); int firstPick = mtfGen.nextInt(numFeatures); thisFeaturesClusters.add(0,new Cluster(thePairs[firstPick].value)); previousInitialization.add(firstPick); int numClusters = thisFeaturesClusters.size(); int i =0; int j = 0; int k = 0; double smallestDSquared = Integer.MAX_VALUE; // large initial value double tempSmallestDSquared; double currentValue; double currentCentroid; double[] dSquaredRay = new double[numFeatures]; double dSquaredRaySum = 0; double[] probabilities = new double[numFeatures]; double randomChoice; boolean notFound = true; boolean tooManyTries = false; Set<Double> skipSet = new HashSet<Double>(); int numLoops = 0; for(i=0;i<numMeans-1;i++){ for(j=0;j<numFeatures;j++){ currentValue = thePairs[j].value; smallestDSquared = Integer.MAX_VALUE; for(k=0;k<numClusters;k++){ currentCentroid = thisFeaturesClusters.get(k).getCentroid(); tempSmallestDSquared = (currentValue-currentCentroid)*(currentValue-currentCentroid); if(tempSmallestDSquared < smallestDSquared) smallestDSquared = tempSmallestDSquared; } dSquaredRay[j]=smallestDSquared; } dSquaredRaySum = 0; for(k=0;k<numFeatures;k++) dSquaredRaySum += dSquaredRay[k]; if(dSquaredRaySum== 0){// this will occur if we have multiple duplicate values. In that case, we can't choose more centroids because the remaining data points are equal. maxCentroidsFound = true; numMeans = i+1; // need to add one because it starts counting from '0', and even if every document has the same value for this feature, there // will still be one centroid chosen because we randomly select the first one before the loop (if ALL values are the same, this will break out of the loop at i==0, and we'll have a single centroid, so, we need to account for it in numMeans) break; } for(k=0;k<numFeatures;k++) probabilities[k]=dSquaredRay[k]/dSquaredRaySum; notFound = true; ArrayList<Double> badRandomsTesting = new ArrayList<Double>(); double thisProb = 0; while(notFound == true){ numLoops += 1; randomChoice = mtfGen.nextDouble(true,true); thisProb = 0; for(k=0;k<numFeatures;k++){ thisProb = thisProb +probabilities[k]; if((randomChoice <= thisProb) && (!skipSet.contains(thePairs[k].value))){ thisFeaturesClusters.add(new Cluster(thePairs[k].value)); skipSet.add(thePairs[k].value); previousInitialization.add(k); notFound = false; break; } } if(notFound == true){ if(skipSet.size() > 10000 || numLoops > 1000){ Logger.logln(NAME+"kPlusPlusPrep reached 10k tries."); tooManyTries = true; break; } Set<Double> cSkipSet = new HashSet<Double>(skipSet); int preSize = cSkipSet.size(); for(k=0;k<numFeatures;k++){ cSkipSet.add(thePairs[k].value); } int postSize = cSkipSet.size(); if(preSize == postSize){ maxCentroidsFound = true; numMeans = thisFeaturesClusters.size(); } badRandomsTesting.add(randomChoice); } if(maxCentroidsFound==true || tooManyTries == true) break; } if(maxCentroidsFound==true) break; if(tooManyTries == true){ kPlusPlusPrep(); break; } } } /** * * Initializes the clustering algorithm by evenly spacing 'numMeans' centroids between the features [min,max], * and assigns features to partitions based upon Euclidean distance from centroids (single dimension) */ public boolean initialize(){ //Logger.logln(NAME+"Initializing Clustering, will call kPlusPlusPrep."); kPlusPlusPrep(); int i; int j; double[] temp = new double[2];// temp[0] <=> parition number && temp[1] <=> difference value int partitionToGoTo; numPartitions = numMeans; // create list of all centroids double[] allCentroids = getAllCentroids(); // Initialize cluster element sets based on distance from each centroid double[][] differences = new double[numMeans][thePairs.length]; for(i=0;i<numMeans;i++){ double tempCentroid = allCentroids[i]; for(j=0;j<thePairs.length;j++){ //TODO: squared?? differences[i][j] =Math.abs(thePairs[j].value-tempCentroid); } } for(i=0;i<differences[0].length;i++){//differences array's columns (correspond to 'thisFeature' indices (feature events per document) j=0; temp[0] = j; temp[1] = differences[j][i]; for(j=1;j<differences.length;j++){// differences array's rows (correspond to 'thisFeaturesClusters' cluster indices) if (temp[1]>differences[j][i]){ temp[0] = j; temp[1] =differences[j][i]; } } partitionToGoTo = (int)temp[0]; thisFeaturesClusters.get(partitionToGoTo).addElement(thePairs[i]); } //Logger.logln(NAME+"Initial positions for elements found. Updating Centroids."); return updateCentroids(); } /** * Updates the centroids to be the average of the values contained within their respective partitions. */ public boolean updateCentroids(){ //Logger.logln(NAME+"Begin updating centroids."); // update centroids to be the averages of their respective element lists int i=0; int j = 0; for(i=0;i<numMeans;i++){ double sum= 0; double avg = 0; int count = 0; Pair[] someElements = thisFeaturesClusters.get(i).getElements(); int someElementsLen = someElements.length; for(j=0; j<someElementsLen;j++){ double temp = someElements[j].value; sum+=temp; count += 1; } if (count == 0) avg = -1; // don't divide by zero. just set avg to -1 and deal with it later. else avg = sum/count; thisFeaturesClusters.get(i).updateCentroid(avg); } // Once all centroids have been updated, re-organize //Logger.logln(NAME+"Updating centroids complete, will reOrganize"); return reOrganize(); } public double[] getAllCentroids(){ int i; int numClusters = thisFeaturesClusters.size(); double[] allCentroids = new double[numClusters]; for(i=0;i<numClusters;i++) allCentroids[i] = thisFeaturesClusters.get(i).getCentroid(); return allCentroids; } /** * Moves the features to their new nearest centroids */ public boolean reOrganize(){ //Logger.logln(NAME+"Starting reOrganize"); // need to go through all elements, extract data, and check distance agains new centroids // create list of all centroids int i; int j; int k; int m; double[] temp = new double[2];// index '0' holds the centroid number that corresponds to the difference value in index '1' int bestCentroid; boolean movedElement = false; //System.out.println("all centroids: "+getAllCentroids().toString()); //TODO: maybe there is a better way to go about this than casting from Object to Double double[] allCentroids = getAllCentroids(); double[] diffs = new double[allCentroids.length]; Pair[] elementHolder; for(i=0;i<numMeans;i++){// for each cluster elementHolder = thisFeaturesClusters.get(i).getElements(); //get the element list, and change it to a Double[] (from ArrayList<Double>) for(j=0;j<elementHolder.length;j++){ for(k=0;k<numMeans;k++){ //TODO: squared?? diffs[k] = Math.abs(elementHolder[j].value-(Double)allCentroids[k]); } temp[0]=0; temp[1]=diffs[0]; for(m=1;m<diffs.length;m++){ if(temp[1]>diffs[m]){ temp[0]=m; temp[1]=diffs[m]; } } bestCentroid = (int)temp[0]; if(! (bestCentroid == i) ){// if a more fitting centroid was found for the element in question... thisFeaturesClusters.get(i).removeElement((Pair)elementHolder[j]); thisFeaturesClusters.get(bestCentroid).addElement((Pair)elementHolder[j]); movedElement = true; } } } boolean noProblems = true; if(movedElement == false ){ //Logger.logln(NAME+"Elements stopped moving - algorithm converged."); int numClusters = thisFeaturesClusters.size(); if(numClusters < 2 && maxCentroidsFound == false){ additionalPartitions++; numMeans = originalNumMeans+additionalPartitions; noProblems = false; } else{ for(i=0;i<numClusters;i++){ if(thisFeaturesClusters.get(i).getElements().length < 3 && maxCentroidsFound == false){ numMeans--; noProblems = false; break; } } } if(noProblems == true){ Logger.logln(NAME+"All is well, clustering complete."); isFinished=true; } else{ noProblems = true; thisFeaturesClusters.clear(); return true; } } else{ //Logger.logln(NAME+"Updating Centroids... something moved"); return updateCentroids(); } return false; } /** * returns the average absolute deviation of the elements in the target cluster from the centroid * @return */ public double getTargetAvgAbsDev(){ if(targetSet==true) return targetDev; else return -1; } /** * Method that runs the modified k-means clustering algorithm, initialized via the k-means++ algorithm */ public void aMeansCluster(){ // a-means-cluster vs k-means-cluster Logger.logln(NAME+"Entered aMeansCluster"); thisFeaturesClusters = new ArrayList<Cluster>(numPartitions); boolean mustRestart = true; while (mustRestart) mustRestart = initialize(); double avgAbsDev; Cluster thisOne; int numRemoved = 0; int i; for(i = 0; i< numMeans; i++){ thisOne = thisFeaturesClusters.get(i); if ((thisOne.getElements().length == 0) && (thisOne.getCentroid() == -1)){ // this may be redundant.. we can probably just pick one (I'd keep the first one) thisFeaturesClusters.remove(thisOne); // no reason keeping empty clusters around numMeans--; // if we remove a cluster, we need to reduce the number of means, AND we need to decrement 'i' so that we don't miss the cluster that moves forward into the place of the cluster that was just deleted. i--; } } ArrayList<String> holderForLogger = new ArrayList<String>(10); Iterator<Cluster> clusterIter = thisFeaturesClusters.iterator(); int clusterNumber = 0; Logger.logln(NAME+"Clusters for: "+featName); while(clusterIter.hasNext()){ thisOne = clusterIter.next(); holderForLogger.clear(); Pair[] somePairs = thisOne.getElements(); int numSomePairs = somePairs.length; for(i=0;i<numSomePairs;i++){ holderForLogger.add(somePairs[i].toString()); } Logger.logln(NAME+"Cluster "+clusterNumber+" has its centroid at "+thisOne.getCentroid()+" and has "+thisOne.getElements().length+" elements. They are: "+holderForLogger.toString()); clusterNumber+=1; } Logger.logln(NAME+featName+" has: "+thisFeaturesClusters.size()+" clusters... leaving aMeansCluster"); } /** * Orders the clusters with respect to 'preference'. * * preference = (number of elements in cluster)*(positive distance between cluster centroid and user's average) * * @return */ public Cluster[] getPreferredOrdering(){ Logger.logln(NAME+"Getting preferred ordering for clusters"); int i=0; int sizeSum =0; double sizeAvg = 0; double tempClustCent; double tempClustDev; double clustMin; double clustMax; int numClusters = thisFeaturesClusters.size(); int[] sizes = new int[numClusters]; double[] dists = new double[numClusters]; Double[][] preferences = new Double[numClusters][2]; // highest number => most ideal cluster double distSum = 0 ; double distAvg = 0; // collect sizes of all clusters for(i=0;i<numClusters;i++){ sizes[i] = thisFeaturesClusters.get(i).getElements().length; sizeSum += sizes[i]; } sizeAvg = (double)sizeSum/numClusters; for(i=0; i<numClusters;i++){ Cluster tempCluster = thisFeaturesClusters.get(i); tempClustCent = tempCluster.getCentroid(); if(tempClustCent< authorAvg) dists[i] = authorAvg - tempClustCent; else if (tempClustCent > authorMax) dists[i] = tempClustCent - authorAvg; else dists[i] = 0; distSum += dists[i]; } distAvg = distSum/numClusters; for(i = 0; i < numClusters; i++){ preferences[i][0] =(Double)(double)i; preferences[i][1] = (dists[i])*(sizes[i]/sizeAvg); // ( distance)*(cluster size/ average cluster size) } Arrays.sort(preferences, new Comparator<Double[]>(){ public int compare(Double one[], Double two[]){ return one[1].compareTo(two[1]); } }); Cluster[] targets = new Cluster[numClusters]; // can't be more than this. i= 0; for(i=0;i<numClusters;i++){ targets[i]= thisFeaturesClusters.get(preferences[i][0].intValue()); } Logger.logln(NAME+"finished ordering clusters"); return targets; } }
spencermwoo/anonymouth
src/edu/drexel/psal/anonymouth/engine/TargetExtractor.java
214,503
//Copyright (c) 2007-2008 The Board of Trustees of //Tregex/Tsurgeon, DisplayMatchesPanel - a GUI for tree search and modification //The Leland Stanford Junior University. All Rights Reserved. //This program is free software; you can redistribute it and/or //modify it under the terms of the GNU General Public License //as published by the Free Software Foundation; either version 2 //of the License, or (at your option) any later version. //This program is distributed in the hope that it will be useful, //but WITHOUT ANY WARRANTY; without even the implied warranty of //MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //GNU General Public License for more details. //You should have received a copy of the GNU General Public License //along with this program; if not, write to the Free Software //Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. //This code is a GUI interface to Tregex and Tsurgeon (which were //written by Rogey Levy and Galen Andrew). //For more information, bug reports, fixes, contact: //Christopher Manning //Dept of Computer Science, Gates 1A //Stanford CA 94305-9010 //USA // Support/Questions: [email protected] // Licensing: [email protected] //http://www-nlp.stanford.edu/software/tregex.shtml package edu.stanford.nlp.trees.tregex.gui; import java.awt.BorderLayout; import java.awt.Color; import java.awt.Dimension; import java.awt.Graphics2D; import java.awt.datatransfer.StringSelection; import java.awt.datatransfer.Transferable; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.InputEvent; import java.awt.event.KeyEvent; import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.awt.geom.Point2D; import java.awt.image.BufferedImage; import java.io.File; import java.io.IOException; import java.util.List; import javax.imageio.ImageIO; import javax.swing.*; import javax.swing.event.ListSelectionEvent; import javax.swing.event.ListSelectionListener; import javax.swing.event.MouseInputAdapter; import javax.swing.filechooser.FileNameExtensionFilter; import edu.stanford.nlp.trees.EnglishGrammaticalStructure; import edu.stanford.nlp.trees.GrammaticalStructureConversionUtils; import edu.stanford.nlp.trees.Tree; import edu.stanford.nlp.trees.UniversalEnglishGrammaticalStructure; import edu.stanford.nlp.util.Pair; /** * Class for creating the panel which shows a graphical version of the tree (as in TreeJPanel) as well * as the file name of the file from which the tree is from. * * @author Anna Rafferty */ @SuppressWarnings("serial") public class DisplayMatchesPanel extends JPanel implements ListSelectionListener { private JScrollPane scroller; private MouseEvent firstMouseEvent = null; private String fontName = ""; private int fontSize = 12; private Color defaultColor = Color.BLACK; private Color matchedColor = Color.RED; private static DisplayMatchesPanel instance = null; private ScrollableTreeJPanel tjp; private List<Point2D.Double> matchedPartCoordinates; private int matchedPartCoordinateIdx = -1; public static synchronized DisplayMatchesPanel getInstance() { if (instance == null) { instance = new DisplayMatchesPanel(); } return instance; } private DisplayMatchesPanel() { //data JPanel spaceholder = new JPanel(); spaceholder.setBackground(Color.white); JTextArea message = new JTextArea("For non-English trees, first set up the tree reader and encoding in Preferences. Then load trees from the File menu."); message.setEditable(false); spaceholder.add(message); scroller = new JScrollPane(spaceholder); // Fix slow scrolling on OS X if (TregexGUI.isMacOSX()) { scroller.getVerticalScrollBar().setUnitIncrement(3); scroller.getHorizontalScrollBar().setUnitIncrement(3); } this.setFocusable(true); this.setTransferHandler(new DisplayTransferHandler()); MatchesPanel.getInstance().addListener(this); //layout this.setLayout(new BorderLayout()); this.setBorder(BorderFactory.createTitledBorder(BorderFactory.createEmptyBorder(),"")); this.add(scroller, BorderLayout.CENTER); } private static class DisplayTransferHandler extends TransferHandler { public DisplayTransferHandler() { super(); } protected static String exportString(JComponent c) { if (c instanceof ScrollableTreeJPanel) { ScrollableTreeJPanel tjp = (ScrollableTreeJPanel) c; return tjp.getTree().pennString(); } return ""; } @Override protected Transferable createTransferable(JComponent c) { return new StringSelection(exportString(c)); } @Override public int getSourceActions(JComponent c) { return COPY_OR_MOVE; } } // end class DisplayTransferHandler /** * Used to set the single tree to be displayed in this panel (which should match * the tregex expression) * @param match tree that matches the expression */ public void setMatch(TreeFromFile match, List<Tree> matchedParts) { clearMatches(); if(match != null) addMatch(match, matchedParts); } /** * Remove all trees from the display */ public void clearMatches() { JPanel spaceholder = new JPanel(); spaceholder.setBackground(Color.white); scroller.setViewportView(spaceholder); scroller.validate(); scroller.repaint(); matchedPartCoordinates = null; matchedPartCoordinateIdx = -1; } public class FilenameMouseInputAdapter extends MouseInputAdapter { JTextField textField; public FilenameMouseInputAdapter(JTextField textField) { this.textField = textField; } private boolean dragNDrop = false; @Override public void mousePressed(MouseEvent e) { if (MatchesPanel.getInstance().isEmpty()) return; if(firstMouseEvent == null) { firstMouseEvent = e; } e.consume(); if(((e.getModifiersEx()) & InputEvent.SHIFT_DOWN_MASK) == InputEvent.SHIFT_DOWN_MASK) { //shift is being held addHighlight(textField, firstMouseEvent, e); } else if(!HighlightUtils.isInHighlight(e, textField, textField.getHighlighter())) { textField.getHighlighter().removeAllHighlights(); firstMouseEvent = e; dragNDrop = false; textField.repaint(); } else { //in a highlight, if we drag after this, we'll be DnDing dragNDrop = true; } } private boolean addHighlight(JTextField label, MouseEvent mouseEvent1, MouseEvent mouseEvent2) { return HighlightUtils.addHighlight(label, mouseEvent1, mouseEvent2); } @Override public void mouseDragged(MouseEvent e) { if (MatchesPanel.getInstance().isEmpty()) return; if (firstMouseEvent != null) { e.consume(); if(dragNDrop) { if(textField == null) return; if(Point2D.distanceSq(e.getX(), e.getY(), firstMouseEvent.getX(), firstMouseEvent.getY()) > 25) { //do DnD textField.getTransferHandler().exportAsDrag((JComponent) e.getSource(), firstMouseEvent, TransferHandler.COPY); } } else { addHighlight(textField, firstMouseEvent, e); } } } } /** * Adds the given tree to the display without removing already * displayed trees * @param match tree to be added */ private void addMatch(TreeFromFile match, List<Tree> matchedParts) { JPanel treeDisplay = new JPanel(new BorderLayout()); JTextField filename = new JTextField("From file: " + match.getFilename()); filename.setEditable(false); MouseInputAdapter listener = new FilenameMouseInputAdapter(filename); filename.addMouseListener(listener); filename.addMouseMotionListener(listener); treeDisplay.add(filename, BorderLayout.NORTH); if(TregexGUI.getInstance().isTdiffEnabled()) { tjp = getTreeJPanel(match.getDiffDecoratedTree(), matchedParts); tjp.setDiffConstituents(match.getDiffConstituents()); } else { tjp = getTreeJPanel(match.getTree(), matchedParts); } matchedPartCoordinates = tjp.getMatchedPartCoordinates(); matchedPartCoordinateIdx = -1; treeDisplay.add(tjp, BorderLayout.CENTER); filename.setOpaque(true); filename.setBackground(tjp.getBackground()); filename.setBorder(BorderFactory.createEmptyBorder(0, 5, 0, 0)); scroller.setViewportView(treeDisplay); this.revalidate(); this.repaint(); } void showPrevMatchedPart() { if (matchedPartCoordinates.size() == 0) return; else if (matchedPartCoordinateIdx <= 0) matchedPartCoordinateIdx = matchedPartCoordinates.size(); matchedPartCoordinateIdx--; showMatchedPart(matchedPartCoordinateIdx); } void showNextMatchedPart() { if (matchedPartCoordinates.size() == 0) return; matchedPartCoordinateIdx = ++matchedPartCoordinateIdx % matchedPartCoordinates.size(); showMatchedPart(matchedPartCoordinateIdx); } private void showMatchedPart(int idx) { Point2D.Double coord = matchedPartCoordinates.get(idx); Dimension treeSize = tjp.getPreferredSize(); JScrollBar horizontal = scroller.getHorizontalScrollBar(); JScrollBar vertical = scroller.getVerticalScrollBar(); int horizontalLength = horizontal.getMaximum() - horizontal.getMinimum(); double x = Math.max(0, (coord.getX() / treeSize.getWidth() * horizontalLength - (scroller.getWidth() / 2.0))); int verticalLength = vertical.getMaximum() - vertical.getMinimum(); double y = Math.max(0, (coord.getY() / treeSize.getHeight() * verticalLength - (scroller.getHeight() / 2.0))); horizontal.setValue((int) x); vertical.setValue((int) y); } private void doExportTree() { JFileChooser chooser = new JFileChooser(); chooser.setSelectedFile(new File("./tree.png")); FileNameExtensionFilter filter = new FileNameExtensionFilter("PNG images", "png"); chooser.setFileFilter(filter); int status = chooser.showSaveDialog(this); if (status != JFileChooser.APPROVE_OPTION) return; Dimension size = tjp.getPreferredSize(); BufferedImage im = new BufferedImage((int) size.getWidth(), (int) size.getHeight(), BufferedImage.TYPE_INT_ARGB); Graphics2D g = im.createGraphics(); tjp.paint(g); try { ImageIO.write(im, "png", chooser.getSelectedFile()); } catch (IOException e) { JOptionPane.showMessageDialog(this, "Failed to save the tree image file.\n" + e.getLocalizedMessage(), "Export Error", JOptionPane.ERROR_MESSAGE); } } // BEGIN - sebschu private void showDependencies() { EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(tjp.getTree()); JOptionPane.showMessageDialog(this, GrammaticalStructureConversionUtils.dependenciesToString(gs, gs.typedDependencies(false), tjp.getTree(), false, false, false), "Dependencies", JOptionPane.INFORMATION_MESSAGE, null); } private void showUniversalDependencies() { UniversalEnglishGrammaticalStructure gs = new UniversalEnglishGrammaticalStructure(tjp.getTree()); JOptionPane.showMessageDialog(this, GrammaticalStructureConversionUtils.dependenciesToString(gs, gs.typedDependencies(false), tjp.getTree(), false, false, false), "Universal dependencies", JOptionPane.INFORMATION_MESSAGE, null); } // END - sebschu private ScrollableTreeJPanel getTreeJPanel(Tree t, List<Tree> matchedParts) { final ScrollableTreeJPanel treeJP = new ScrollableTreeJPanel(SwingConstants.CENTER,SwingConstants.TOP); treeJP.setFontName(fontName); treeJP.setFontSize(fontSize); treeJP.setDefaultColor(defaultColor); treeJP.setMatchedColor(matchedColor); treeJP.setTree(t); treeJP.setMatchedParts(matchedParts); treeJP.setBackground(Color.WHITE); treeJP.setFocusable(true); final JPopupMenu treePopup = new JPopupMenu(); JMenuItem copy = new JMenuItem("Copy"); copy.setActionCommand((String) TransferHandler.getCopyAction() .getValue(Action.NAME)); copy.addActionListener(new TregexGUI.TransferActionListener()); int mask = TregexGUI.isMacOSX() ? InputEvent.META_MASK : InputEvent.CTRL_MASK; copy.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_C, mask)); treePopup.add(copy); JMenuItem exportTree = new JMenuItem("Export tree as image"); exportTree.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { doExportTree(); } }); treePopup.add(exportTree); //BEGIN - sebschu JMenuItem showDependencies = new JMenuItem("Show dependencies"); showDependencies.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { showDependencies(); } }); treePopup.add(showDependencies); JMenuItem showUniversalDependencies = new JMenuItem("Show universal dependencies"); showUniversalDependencies.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent e) { showUniversalDependencies(); } }); treePopup.add(showUniversalDependencies); //END - sebschu treeJP.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { treeJP.requestFocusInWindow(); } private void maybeShowPopup(MouseEvent e) { if (e.isPopupTrigger()) treePopup.show(e.getComponent(), e.getX(), e.getY()); } @Override public void mousePressed(MouseEvent e) { maybeShowPopup(e); } @Override public void mouseReleased(MouseEvent e) { maybeShowPopup(e); } }); DisplayMouseMotionAdapter d = new DisplayMouseMotionAdapter(); treeJP.addMouseMotionListener(d); treeJP.addMouseListener(d); treeJP.setTransferHandler(new DisplayTransferHandler()); InputMap imap = treeJP.getInputMap(); imap.put(KeyStroke.getKeyStroke("ctrl C"), TransferHandler.getCopyAction().getValue(Action.NAME)); ActionMap map = treeJP.getActionMap(); map.put(TransferHandler.getCopyAction().getValue(Action.NAME), TransferHandler.getCopyAction()); return treeJP; } private static class DisplayMouseMotionAdapter extends MouseInputAdapter { /* * Motion listener is based off the Java sun tutorial for DnD transfer */ MouseEvent firstMouseEvent1 = null; @Override public void mousePressed(MouseEvent e) { if (MatchesPanel.getInstance().isEmpty()) return; firstMouseEvent1 = e; e.consume(); } @Override public void mouseDragged(MouseEvent e) { if (MatchesPanel.getInstance().isEmpty()) return; if (firstMouseEvent1 != null) { e.consume(); int dx = Math.abs(e.getX() - firstMouseEvent1.getX()); int dy = Math.abs(e.getY() - firstMouseEvent1.getY()); //Arbitrarily define a 5-pixel shift as the //official beginning of a drag. if (dx > 5 || dy > 5) { //This is a drag, not a click. JComponent c = (JComponent)e.getSource(); //Tell the transfer handler to initiate the drag. TransferHandler handler = c.getTransferHandler(); handler.exportAsDrag(c, firstMouseEvent1, TransferHandler.COPY); firstMouseEvent1 = null; } } } @Override public void mouseReleased(MouseEvent e) { firstMouseEvent1 = null; } } // end class DisplayMouseMotionAdapter public void setFontName(String fontName) { this.fontName = fontName; } public void setFontSize(int fontSize) { this.fontSize = fontSize; } public void setFontSizeRepaint(int fontSize) { this.fontSize = fontSize; if (tjp != null) { tjp.setFontSize(fontSize); // cdm 2009: it seems like you need to call revalidate and repaint on precisely these components or it doesn't work ... tricky stuff. tjp.revalidate(); scroller.repaint(); } } public void setDefaultColor(Color defaultColor) { this.defaultColor = defaultColor; } public void setMatchedColor(Color matchedColor) { this.matchedColor = matchedColor; } public void valueChanged(ListSelectionEvent e) { Pair<TreeFromFile, List<Tree>> newMatch = MatchesPanel.getInstance().getSelectedMatch(); if(newMatch == null) clearMatches(); else setMatch(newMatch.first(), newMatch.second()); } }
stanfordnlp/CoreNLP
src/edu/stanford/nlp/trees/tregex/gui/DisplayMatchesPanel.java
214,504
// -*- mode: java; c-basic-offset: 2; -*- // Copyright 2013-2017 MIT, All rights reserved // Released under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 /** * @license * @fileoverview Text blocks for Blockly, modified for MIT App Inventor. * @author [email protected] (Andrew F. McKinney) */ 'use strict'; goog.provide('Blockly.Blocks.text'); goog.require('Blockly.Blocks.Utilities'); Blockly.Blocks['text'] = { // Text value. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_TEXT_HELPURL, init: function () { var textInput = new Blockly.FieldTextInput(''); textInput.onFinishEditing_ = Blockly.Blocks.text .bumpBlockOnFinishEdit.bind(this); this.setColour(Blockly.TEXT_CATEGORY_HUE); this.appendDummyInput() .appendField(Blockly.Msg.LANG_TEXT_TEXT_LEFT_QUOTE) .appendField(textInput, 'TEXT') .appendField(Blockly.Msg.LANG_TEXT_TEXT_RIGHT_QUOTE); this.setOutput(true, [Blockly.Blocks.text.connectionCheck]); this.setTooltip(Blockly.Msg.LANG_TEXT_TEXT_TOOLTIP); }, errors: [{name:"checkInvalidNumber"}], typeblock: [{translatedName: Blockly.Msg.LANG_CATEGORY_TEXT}] }; Blockly.Blocks.text.connectionCheck = function (myConnection, otherConnection, opt_value) { var otherTypeArray = otherConnection.check_; if (!otherTypeArray) { // Other connection accepts everything. return true; } var block = myConnection.sourceBlock_; var shouldIgnoreError = Blockly.mainWorkspace.isLoading; var value = opt_value || block.getFieldValue('TEXT'); for (var i = 0; i < otherTypeArray.length; i++) { if (otherTypeArray[i] == "String") { return true; } else if (otherTypeArray[i] == "Number") { if (shouldIgnoreError) { // Error may be noted by WarningHandler's checkInvalidNumber return true; } else if (Blockly.Blocks.Utilities.NUMBER_REGEX.test(value)) { // Value passes a floating point regex return !isNaN(parseFloat(value)); } } else if (otherTypeArray[i] == "Key") { return true; } else if (otherTypeArray[i] == "Key") { return true; } } return false; }; /** * Bumps the text block out of its connection iff it is connected to a number * input and it no longer contains a number. * @param {string} finalValue The final value typed into the text input. * @this Blockly.Block */ Blockly.Blocks.text.bumpBlockOnFinishEdit = function(finalValue) { var connection = this.outputConnection.targetConnection; if (!connection) { return; } // If the connections are no longer compatible. if (!Blockly.Blocks.text.connectionCheck( this.outputConnection, connection, finalValue)) { connection.disconnect(); connection.sourceBlock_.bumpNeighbours_(); } } Blockly.Blocks['text_join'] = { // Create a string made up of any number of elements of any type. // TODO: (Andrew) Make this handle multiple arguments. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_JOIN_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('ADD0') .appendField(Blockly.Msg.LANG_TEXT_JOIN_TITLE_JOIN); this.appendValueInput('ADD1'); this.setTooltip(Blockly.Msg.LANG_TEXT_JOIN_TOOLTIP); this.setMutator(new Blockly.Mutator(['text_join_item'])); this.emptyInputName = 'EMPTY'; this.repeatingInputName = 'ADD'; this.itemCount_ = 2; }, mutationToDom: Blockly.mutationToDom, domToMutation: Blockly.domToMutation, decompose: function (workspace) { return Blockly.decompose(workspace, 'text_join_item', this); }, compose: Blockly.compose, saveConnections: Blockly.saveConnections, addEmptyInput: function () { this.appendDummyInput(this.emptyInputName) .appendField(Blockly.Msg.LANG_TEXT_JOIN_TITLE_JOIN); }, addInput: function (inputNum) { var input = this.appendValueInput(this.repeatingInputName + inputNum).setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)); if (inputNum === 0) { input.appendField(Blockly.Msg.LANG_TEXT_JOIN_TITLE_JOIN); } return input; }, updateContainerBlock: function (containerBlock) { containerBlock.inputList[0].fieldRow[0].setText(Blockly.Msg.LANG_TEXT_JOIN_TITLE_JOIN); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_JOIN_TITLE_JOIN}] }; Blockly.Blocks['text_join_item'] = { // Add items. init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.appendDummyInput() .appendField(Blockly.Msg.LANG_TEXT_JOIN_ITEM_TITLE_ITEM); this.setPreviousStatement(true); this.setNextStatement(true); this.setTooltip(Blockly.Msg.LANG_TEXT_JOIN_ITEM_TOOLTIP); this.contextMenu = false; } }; Blockly.Blocks['text_length'] = { // String length. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_LENGTH_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("number", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('VALUE') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_LENGTH_INPUT_LENGTH); this.setTooltip(Blockly.Msg.LANG_TEXT_LENGTH_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_LENGTH_INPUT_LENGTH}] }; Blockly.Blocks['text_isEmpty'] = { // Is the string null? category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_ISEMPTY_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("boolean", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('VALUE') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_ISEMPTY_INPUT_ISEMPTY); this.setTooltip(Blockly.Msg.LANG_TEXT_ISEMPTY_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_ISEMPTY_INPUT_ISEMPTY}] }; Blockly.Blocks['text_compare'] = { // Compare two texts category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_COMPARE_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("boolean", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('TEXT1') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_COMPARE_INPUT_COMPARE); this.appendValueInput('TEXT2') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(new Blockly.FieldDropdown(this.OPERATORS), 'OP'); this.setInputsInline(true); var thisBlock = this; this.setTooltip(function () { var mode = thisBlock.getFieldValue('OP'); return Blockly.Blocks.text_compare.TOOLTIPS()[mode]; }); }, typeblock: [{ translatedName: Blockly.Msg.LANG_TEXT_COMPARE_INPUT_COMPARE + Blockly.Msg.LANG_TEXT_COMPARE_LT, dropDown: { titleName: 'OP', value: 'LT' } }, { translatedName: Blockly.Msg.LANG_TEXT_COMPARE_INPUT_COMPARE + Blockly.Msg.LANG_TEXT_COMPARE_EQUAL, dropDown: { titleName: 'OP', value: 'EQUAL' } }, { translatedName: Blockly.Msg.LANG_TEXT_COMPARE_INPUT_COMPARE + Blockly.Msg.LANG_TEXT_COMPARE_NEQ, dropDown: { titleName: 'OP', value: 'NEQ' } }, { translatedName: Blockly.Msg.LANG_TEXT_COMPARE_INPUT_COMPARE + Blockly.Msg.LANG_TEXT_COMPARE_GT, dropDown: { titleName: 'OP', value: 'GT' } }] }; Blockly.Blocks.text_compare.OPERATORS = function () { return [ [Blockly.Msg.LANG_TEXT_COMPARE_LT, 'LT'], [Blockly.Msg.LANG_TEXT_COMPARE_EQUAL, 'EQUAL'], [Blockly.Msg.LANG_TEXT_COMPARE_NEQ, 'NEQ'], [Blockly.Msg.LANG_TEXT_COMPARE_GT, 'GT'] ] }; Blockly.Blocks.text_compare.TOOLTIPS = function () { return { LT: Blockly.Msg.LANG_TEXT_COMPARE_TOOLTIP_LT, EQUAL: Blockly.Msg.LANG_TEXT_COMPARE_TOOLTIP_EQUAL, NEQ: Blockly.Msg.LANG_TEXT_COMPARE_TOOLTIP_NEQ, GT: Blockly.Msg.LANG_TEXT_COMPARE_TOOLTIP_GT } }; Blockly.Blocks['text_trim'] = { // trim string category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_TRIM_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('TEXT') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_TRIM_TITLE_TRIM); this.setTooltip(Blockly.Msg.LANG_TEXT_TRIM_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_TRIM_TITLE_TRIM}] }; Blockly.Blocks['text_changeCase'] = { // Change capitalization. category: 'Text', helpUrl: function () { var mode = this.getFieldValue('OP'); return Blockly.Blocks.text_changeCase.HELPURLS()[mode]; }, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('TEXT') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(new Blockly.FieldDropdown(this.OPERATORS), 'OP'); var thisBlock = this; this.setTooltip(function () { var mode = thisBlock.getFieldValue('OP'); return Blockly.Blocks.text_changeCase.TOOLTIPS()[mode]; }); }, typeblock: [{ translatedName: Blockly.Msg.LANG_TEXT_CHANGECASE_OPERATOR_UPPERCASE, dropDown: { titleName: 'OP', value: 'UPCASE' } }, { translatedName: Blockly.Msg.LANG_TEXT_CHANGECASE_OPERATOR_DOWNCASE, dropDown: { titleName: 'OP', value: 'DOWNCASE' } }] }; Blockly.Blocks.text_changeCase.OPERATORS = function () { return [ [Blockly.Msg.LANG_TEXT_CHANGECASE_OPERATOR_UPPERCASE, 'UPCASE'], [Blockly.Msg.LANG_TEXT_CHANGECASE_OPERATOR_DOWNCASE, 'DOWNCASE'] ] }; Blockly.Blocks.text_changeCase.TOOLTIPS = function () { return { UPCASE: Blockly.Msg.LANG_TEXT_CHANGECASE_TOOLTIP_UPPERCASE, DOWNCASE: Blockly.Msg.LANG_TEXT_CHANGECASE_TOOLTIP_DOWNCASE } }; Blockly.Blocks.text_changeCase.HELPURLS = function () { return { UPCASE: Blockly.Msg.LANG_TEXT_CHANGECASE_HELPURL_UPPERCASE, DOWNCASE: Blockly.Msg.LANG_TEXT_CHANGECASE_HELPURL_DOWNCASE } }; Blockly.Blocks['text_starts_at'] = { // return index of first occurrence. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_STARTS_AT_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("number", Blockly.Blocks.Utilities.OUTPUT)); var checkTypeText = Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT); this.interpolateMsg(Blockly.Msg.LANG_TEXT_STARTS_AT_INPUT, ['TEXT', checkTypeText, Blockly.ALIGN_RIGHT], ['PIECE', checkTypeText, Blockly.ALIGN_RIGHT], Blockly.ALIGN_RIGHT); this.setTooltip(Blockly.Msg.LANG_TEXT_STARTS_AT_TOOLTIP); this.setInputsInline(false); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_STARTS_AT_INPUT_STARTS_AT}] }; Blockly.Blocks['text_contains'] = { category: 'Text', helpUrl: function() { return Blockly.Blocks.text_contains.HELPURLS()[this.getMode()]; }, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); var utils = Blockly.Blocks.Utilities; var getType = utils.YailTypeToBlocklyType; var dropdown = new Blockly.FieldDropdown( Blockly.Blocks.text_contains.OPERATORS(), Blockly.Blocks.text_contains.adjustToMode.bind(this)); var text = new Blockly.FieldLabel( Blockly.Msg.LANG_TEXT_CONTAINS_INPUT_PIECE); this.setOutput(true, getType("boolean", utils.OUTPUT)); this.interpolateMsg( Blockly.Msg.LANG_TEXT_CONTAINS_INPUT, ['OP', dropdown], ['TEXT', getType('text', utils.INPUT), Blockly.ALIGN_RIGHT], ['PIECE_TEXT', text], ['PIECE', getType('text', utils.INPUT), Blockly.ALIGN_RIGHT], Blockly.ALIGN_RIGHT); this.setInputsInline(false); this.setTooltip(function() { return Blockly.Blocks.text_contains.TOOLTIPS()[this.getMode()]; }.bind(this)); }, // TODO: This can be removed after the blockly update b/c validators are // properly triggered on load from XML. domToMutation: function (xmlElement) { var mode = xmlElement.getAttribute('mode'); Blockly.Blocks.text_contains.adjustToMode.call(this, mode); }, mutationToDom: function () { var container = document.createElement('mutation'); container.setAttribute('mode', this.getMode()); return container; }, getMode: function() { return this.getFieldValue('OP'); }, typeblock: [ { translatedName: Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS, dropDown: { titleName: 'OP', value: 'CONTAINS' } }, { translatedName: Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS_ANY, dropDown: { titleName: 'OP', value: 'CONTAINS_ANY' } }, { translatedName: Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS_ALL, dropDown: { titleName: 'OP', value: 'CONTAINS_ALL' } } ] }; /** * Updates the block's PIECE input to reflect the current mode. * @param {string} mode * @this {!Blockly.BlockSvg} */ Blockly.Blocks.text_contains.adjustToMode = function (mode) { var utils = Blockly.Blocks.Utilities; var getType = utils.YailTypeToBlocklyType; if (mode == 'CONTAINS') { this.getInput('PIECE') .setCheck(getType('text', utils.INPUT)); this.setFieldValue( Blockly.Msg.LANG_TEXT_CONTAINS_INPUT_PIECE, 'PIECE_TEXT'); } else { this.getInput('PIECE') .setCheck(getType('list', utils.INPUT)); this.setFieldValue( Blockly.Msg.LANG_TEXT_CONTAINS_INPUT_PIECE_LIST, 'PIECE_TEXT'); } }; // The order here determines the order in the dropdown Blockly.Blocks.text_contains.OPERATORS = function() { return [ [Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS, 'CONTAINS'], [Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS_ANY, 'CONTAINS_ANY'], [Blockly.Msg.LANG_TEXT_CONTAINS_OPERATOR_CONTAINS_ALL, 'CONTAINS_ALL'], ] }; Blockly.Blocks.text_contains.TOOLTIPS = function() { return { 'CONTAINS': Blockly.Msg.LANG_TEXT_CONTAINS_TOOLTIP_CONTAINS, 'CONTAINS_ANY': Blockly.Msg.LANG_TEXT_CONTAINS_TOOLTIP_CONTAINS_ANY, 'CONTAINS_ALL': Blockly.Msg.LANG_TEXT_CONTAINS_TOOLTIP_CONTAINS_ALL, } }; Blockly.Blocks.text_contains.HELPURLS = function() { return { 'CONTAINS': Blockly.Msg.LANG_TEXT_CONTAINS_HELPURL_CONTAINS, 'CONTAINS_ANY': Blockly.Msg.LANG_TEXT_CONTAINS_HELPURL_CONTAINS_ANY, 'CONTAINS_ALL': Blockly.Msg.LANG_TEXT_CONTAINS_HELPURL_CONTAINS_ALL, } }; Blockly.Blocks['text_split'] = { // This includes all four split variants (modes). The name and type of the 'AT' arg // changes to match the selected mode. category: 'Text', helpUrl: function () { var mode = this.getFieldValue('OP'); return Blockly.Blocks.text_split.HELPURLS()[mode]; }, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("list", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('TEXT') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(new Blockly.FieldDropdown(this.OPERATORS, Blockly.Blocks.text_split.dropdown_onchange), 'OP') .appendField(Blockly.Msg.LANG_TEXT_SPLIT_INPUT_TEXT); this.appendValueInput('AT') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_SPLIT_INPUT_AT, 'ARG2_NAME') .setAlign(Blockly.ALIGN_RIGHT); }, // TODO: This can be removed after the blockly update b/c validators are // properly triggered on load from XML. // adjust for the mode when the block is read in domToMutation: function (xmlElement) { var mode = xmlElement.getAttribute('mode'); Blockly.Blocks.text_split.adjustToMode(mode, this); }, // put the mode in the DOM so it can be read in by domToMutation // Note: All attributes must be 100% lowercase because IE always writes // attributes as lowercase. mutationToDom: function () { var container = document.createElement('mutation'); var savedMode = this.getFieldValue('OP'); container.setAttribute('mode', savedMode); return container; }, typeblock: [{ translatedName: Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT, dropDown: { titleName: 'OP', value: 'SPLIT' } }, { translatedName: Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_FIRST, dropDown: { titleName: 'OP', value: 'SPLITATFIRST' } }, { translatedName: Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_ANY, dropDown: { titleName: 'OP', value: 'SPLITATANY' } }, { translatedName: Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_FIRST_OF_ANY, dropDown: { titleName: 'OP', value: 'SPLITATFIRSTOFANY' } }] }; // Change the name and type of ARG2 and set tooltop depending on mode Blockly.Blocks.text_split.adjustToMode = function (mode, block) { if (mode == 'SPLITATFIRST' || mode == 'SPLIT') { block.getInput("AT").setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)); block.setFieldValue(Blockly.Msg.LANG_TEXT_SPLIT_INPUT_AT, 'ARG2_NAME'); } else if (mode == 'SPLITATFIRSTOFANY' || mode == 'SPLITATANY') { block.getInput("AT").setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("list", Blockly.Blocks.Utilities.INPUT)); block.setFieldValue(Blockly.Msg.LANG_TEXT_SPLIT_INPUT_AT_LIST, 'ARG2_NAME'); } ; block.setTooltip(Blockly.Blocks.text_split.TOOLTIPS()[mode]); }; Blockly.Blocks.text_split.dropdown_onchange = function (mode) { Blockly.Blocks.text_split.adjustToMode(mode, this.sourceBlock_) }; // The order here determines the order in the dropdown Blockly.Blocks.text_split.OPERATORS = function () { return [ [Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT, 'SPLIT'], [Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_FIRST, 'SPLITATFIRST'], [Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_ANY, 'SPLITATANY'], [Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_FIRST_OF_ANY, 'SPLITATFIRSTOFANY'] ] }; Blockly.Blocks.text_split.TOOLTIPS = function () { return { SPLITATFIRST: Blockly.Msg.LANG_TEXT_SPLIT_TOOLTIP_SPLIT_AT_FIRST, SPLITATFIRSTOFANY: Blockly.Msg.LANG_TEXT_SPLIT_TOOLTIP_SPLIT_AT_FIRST_OF_ANY, SPLIT: Blockly.Msg.LANG_TEXT_SPLIT_TOOLTIP_SPLIT, SPLITATANY: Blockly.Msg.LANG_TEXT_SPLIT_TOOLTIP_SPLIT_AT_ANY } }; Blockly.Blocks.text_split.HELPURLS = function () { return { SPLITATFIRST: Blockly.Msg.LANG_TEXT_SPLIT_HELPURL_SPLIT_AT_FIRST, SPLITATFIRSTOFANY: Blockly.Msg.LANG_TEXT_SPLIT_HELPURL_SPLIT_AT_FIRST_OF_ANY, SPLIT: Blockly.Msg.LANG_TEXT_SPLIT_HELPURL_SPLIT, SPLITATANY: Blockly.Msg.LANG_TEXT_SPLIT_HELPURL_SPLIT_AT_ANY } }; Blockly.Blocks['text_split_at_spaces'] = { // Split at spaces category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_SPLIT_AT_SPACES_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("list", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('TEXT') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_SPLIT_AT_SPACES_TITLE); this.setTooltip(Blockly.Msg.LANG_TEXT_SPLIT_AT_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_SPLIT_AT_SPACES_TITLE}] }; Blockly.Blocks['text_segment'] = { // Create text segment category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_SEGMENT_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); var checkTypeText = Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT); var checkTypeNumber = Blockly.Blocks.Utilities.YailTypeToBlocklyType("number", Blockly.Blocks.Utilities.INPUT); this.interpolateMsg(Blockly.Msg.LANG_TEXT_SEGMENT_INPUT, ['TEXT', checkTypeText, Blockly.ALIGN_RIGHT], ['START', checkTypeNumber, Blockly.ALIGN_RIGHT], ['LENGTH', checkTypeNumber, Blockly.ALIGN_RIGHT], Blockly.ALIGN_RIGHT); this.setTooltip(Blockly.Msg.LANG_TEXT_SEGMENT_AT_TOOLTIP); this.setInputsInline(false); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_SEGMENT_TITLE_SEGMENT}] }; Blockly.Blocks['text_replace_all'] = { // Replace all occurrences of text category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_REPLACE_ALL_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); var checkTypeText = Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT); this.interpolateMsg(Blockly.Msg.LANG_TEXT_REPLACE_ALL_INPUT, ['TEXT', checkTypeText, Blockly.ALIGN_RIGHT], ['SEGMENT', checkTypeText, Blockly.ALIGN_RIGHT], ['REPLACEMENT', checkTypeText, Blockly.ALIGN_RIGHT], Blockly.ALIGN_RIGHT); this.setTooltip(Blockly.Msg.LANG_TEXT_REPLACE_ALL_TOOLTIP); this.setInputsInline(false); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_REPLACE_ALL_TITLE_REPLACE_ALL}] }; Blockly.Blocks['obfuscated_text'] = { // Text value. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_TEXT_OBFUSCATE_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); var label = Blockly.Msg.LANG_TEXT_TEXT_OBFUSCATE + " " + Blockly.Msg.LANG_TEXT_TEXT_LEFT_QUOTE var textInput = new Blockly.FieldTextBlockInput(''); textInput.onFinishEditing_ = Blockly.Blocks.text .bumpBlockOnFinishEdit.bind(this); this.appendDummyInput() .appendField(label) .appendField(textInput,'TEXT') .appendField(Blockly.Msg.LANG_TEXT_TEXT_RIGHT_QUOTE); this.setOutput(true, [Blockly.Blocks.text.connectionCheck]); this.setTooltip(Blockly.Msg.LANG_TEXT_TEXT_OBFUSCATE_TOOLTIP); this.confounder = Math.random().toString(36).replace(/[^a-z]+/g, '').substr(0, 8); }, domToMutation: function(xmlElement) { var confounder = xmlElement.getAttribute('confounder'); this.confounder = confounder; }, mutationToDom: function() { var container = document.createElement('mutation') container.setAttribute('confounder', this.confounder); return container; }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_TEXT_OBFUSCATE}] }; Blockly.Blocks['text_is_string'] = { category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_TEXT_IS_STRING_HELPURL, init: function() { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.appendValueInput('ITEM') .appendField(Blockly.Msg.LANG_TEXT_TEXT_IS_STRING_TITLE) .appendField(Blockly.Msg.LANG_TEXT_TEXT_IS_STRING_INPUT_THING); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("boolean", Blockly.Blocks.Utilities.OUTPUT)); this.setTooltip(Blockly.Msg.LANG_TEXT_TEXT_IS_STRING_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_TEXT_IS_STRING_TITLE}] }; Blockly.Blocks['text_reverse'] = { // String reverse. category: 'Text', helpUrl: Blockly.Msg.LANG_TEXT_REVERSE_HELPURL, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); this.appendValueInput('VALUE') .setCheck(Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT)) .appendField(Blockly.Msg.LANG_TEXT_REVERSE_INPUT); this.setTooltip(Blockly.Msg.LANG_TEXT_REVERSE_TOOLTIP); }, typeblock: [{translatedName: Blockly.Msg.LANG_TEXT_REVERSE_INPUT}] }; Blockly.Blocks['text_replace_mappings'] = { // Replace all occurrences in mappings with their corresponding replacement category: 'Text', helpUrl: function () { var mode = this.getFieldValue('OP'); return Blockly.Blocks.text_replace_mappings.HELPURLS()[mode]; }, init: function () { this.setColour(Blockly.TEXT_CATEGORY_HUE); this.setOutput(true, Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.OUTPUT)); var checkTypeText = Blockly.Blocks.Utilities.YailTypeToBlocklyType("text", Blockly.Blocks.Utilities.INPUT); var checkTypeMap = Blockly.Blocks.Utilities.YailTypeToBlocklyType("dictionary", Blockly.Blocks.Utilities.INPUT); this.appendValueInput('MAPPINGS') .setCheck(checkTypeMap) .appendField(Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_TITLE) .setAlign(Blockly.ALIGN_RIGHT) this.appendValueInput('TEXT') .setCheck(checkTypeText) .appendField(Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_INPUT_TEXT) .setAlign(Blockly.ALIGN_RIGHT) this.appendDummyInput() .appendField(Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_INPUT_ORDER_PREFIX) .appendField(new Blockly.FieldDropdown(this.OPERATORS, Blockly.Blocks.text_replace_mappings.onchange), 'OP') .appendField(Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_INPUT_ORDER) .setAlign(Blockly.ALIGN_RIGHT) this.setInputsInline(false); // Assign 'this' to a variable for use in the closures below. var thisBlock = this; this.setTooltip(function() { var mode = thisBlock.getFieldValue('OP'); return Blockly.Blocks.text_replace_mappings.TOOLTIPS()[mode]; }); }, typeblock: [{ translatedName: Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_OPERATOR_LONGEST_STRING_FIRST, dropDown: { titleName: 'OP', value: 'LONGEST_STRING_FIRST' } }, { translatedName: Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_OPERATOR_DICTIONARY_ORDER, dropDown: { titleName: 'OP', value: 'DICTIONARY_ORDER' } } /*{ translatedName : Blockly.Msg.LANG_TEXT_SPLIT_OPERATOR_SPLIT_AT_FIRST, dropDown: { titleName: 'OP', value: 'EARLIEST_OCCURRENCE' } }*/ ] }; // The order here determines the order in the dropdown Blockly.Blocks.text_replace_mappings.OPERATORS = function () { return [ [Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_OPERATOR_LONGEST_STRING_FIRST, 'LONGEST_STRING_FIRST'], [Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_OPERATOR_DICTIONARY_ORDER, 'DICTIONARY_ORDER'] //['earliest occurrence', 'EARLIEST_OCCURRENCE'] ] }; Blockly.Blocks.text_replace_mappings.TOOLTIPS = function () { return { LONGEST_STRING_FIRST : Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_TOOLTIP_LONGEST_STRING_FIRST, DICTIONARY_ORDER : Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_TOOLTIP_DICTIONARY_ORDER //EARLIEST_OCCURRENCE : "tooltip" } }; Blockly.Blocks.text_replace_mappings.HELPURLS = function () { return { LONGEST_STRING_FIRST : Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_HELPURL_LONGEST_STRING_FIRST, DICTIONARY_ORDER : Blockly.Msg.LANG_TEXT_REPLACE_ALL_MAPPINGS_HELPURL_DICTIONARY_ORDER //EARLIEST_OCCURRENCE : "help" } }
mit-cml/appinventor-sources
appinventor/blocklyeditor/src/blocks/text.js
214,505
/* * Copyright (C) 2012 Andrew Neal * Copyright (C) 2014 The CyanogenMod Project * Copyright (C) 2015 Naman Dwivedi * Licensed under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law * or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package com.naman14.timber.dataloaders; import android.content.Context; import android.database.Cursor; import android.provider.MediaStore; import android.provider.MediaStore.Audio.AudioColumns; import com.naman14.timber.models.Song; import com.naman14.timber.utils.PreferencesUtility; import java.util.ArrayList; import java.util.List; public class LastAddedLoader { private static Cursor mCursor; public static List<Song> getLastAddedSongs(Context context) { ArrayList<Song> mSongList = new ArrayList<>(); mCursor = makeLastAddedCursor(context); if (mCursor != null && mCursor.moveToFirst()) { do { long id = mCursor.getLong(0); String title = mCursor.getString(1); String artist = mCursor.getString(2); String album = mCursor.getString(3); int duration = mCursor.getInt(4); int trackNumber = mCursor.getInt(5); long artistId = mCursor.getInt(6); long albumId = mCursor.getLong(7); final Song song = new Song(id, albumId, artistId, title, artist, album, duration, trackNumber); mSongList.add(song); } while (mCursor.moveToNext()); } if (mCursor != null) { mCursor.close(); mCursor = null; } return mSongList; } public static final Cursor makeLastAddedCursor(final Context context) { //four weeks ago long fourWeeksAgo = (System.currentTimeMillis() / 1000) - (4 * 3600 * 24 * 7); long cutoff = PreferencesUtility.getInstance(context).getLastAddedCutoff(); // use the most recent of the two timestamps if (cutoff < fourWeeksAgo) { cutoff = fourWeeksAgo; } final StringBuilder selection = new StringBuilder(); selection.append(AudioColumns.IS_MUSIC + "=1"); selection.append(" AND " + AudioColumns.TITLE + " != ''"); selection.append(" AND " + MediaStore.Audio.Media.DATE_ADDED + ">"); selection.append(cutoff); return context.getContentResolver().query(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, new String[]{"_id", "title", "artist", "album", "duration", "track", "artist_id", "album_id"}, selection.toString(), null, MediaStore.Audio.Media.DATE_ADDED + " DESC"); } }
naman14/Timber
app/src/main/java/com/naman14/timber/dataloaders/LastAddedLoader.java
214,506
404: Not Found
HanielB/cvc5
src/api/java/io/github/cvc5/SymbolManager.java
214,507
/** * Copyright © 2016-2024 The Thingsboard Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.thingsboard.server.dao.component; import com.fasterxml.jackson.databind.JsonNode; import com.github.fge.jsonschema.core.exceptions.ProcessingException; import com.github.fge.jsonschema.core.report.ProcessingReport; import com.github.fge.jsonschema.main.JsonSchemaFactory; import com.github.fge.jsonschema.main.JsonValidator; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.thingsboard.server.common.data.id.ComponentDescriptorId; import org.thingsboard.server.common.data.id.TenantId; import org.thingsboard.server.common.data.page.PageData; import org.thingsboard.server.common.data.page.PageLink; import org.thingsboard.server.common.data.plugin.ComponentDescriptor; import org.thingsboard.server.common.data.plugin.ComponentScope; import org.thingsboard.server.common.data.plugin.ComponentType; import org.thingsboard.server.dao.exception.DataValidationException; import org.thingsboard.server.dao.exception.IncorrectParameterException; import org.thingsboard.server.dao.service.DataValidator; import org.thingsboard.server.dao.service.Validator; import java.util.Optional; /** * @author Andrew Shvayka */ @Service @Slf4j public class BaseComponentDescriptorService implements ComponentDescriptorService { @Autowired private ComponentDescriptorDao componentDescriptorDao; @Autowired private DataValidator<ComponentDescriptor> componentValidator; @Override public ComponentDescriptor saveComponent(TenantId tenantId, ComponentDescriptor component) { componentValidator.validate(component, data -> TenantId.SYS_TENANT_ID); Optional<ComponentDescriptor> result = componentDescriptorDao.saveIfNotExist(tenantId, component); return result.orElseGet(() -> componentDescriptorDao.findByClazz(tenantId, component.getClazz())); } @Override public ComponentDescriptor findById(TenantId tenantId, ComponentDescriptorId componentId) { Validator.validateId(componentId, "Incorrect component id for search request."); return componentDescriptorDao.findById(tenantId, componentId); } @Override public ComponentDescriptor findByClazz(TenantId tenantId, String clazz) { Validator.validateString(clazz, "Incorrect clazz for search request."); return componentDescriptorDao.findByClazz(tenantId, clazz); } @Override public PageData<ComponentDescriptor> findByTypeAndPageLink(TenantId tenantId, ComponentType type, PageLink pageLink) { Validator.validatePageLink(pageLink); return componentDescriptorDao.findByTypeAndPageLink(tenantId, type, pageLink); } @Override public PageData<ComponentDescriptor> findByScopeAndTypeAndPageLink(TenantId tenantId, ComponentScope scope, ComponentType type, PageLink pageLink) { Validator.validatePageLink(pageLink); return componentDescriptorDao.findByScopeAndTypeAndPageLink(tenantId, scope, type, pageLink); } @Override public void deleteByClazz(TenantId tenantId, String clazz) { Validator.validateString(clazz, "Incorrect clazz for delete request."); componentDescriptorDao.deleteByClazz(tenantId, clazz); } @Override public boolean validate(TenantId tenantId, ComponentDescriptor component, JsonNode configuration) { JsonValidator validator = JsonSchemaFactory.byDefault().getValidator(); try { if (!component.getConfigurationDescriptor().has("schema")) { throw new DataValidationException("Configuration descriptor doesn't contain schema property!"); } JsonNode configurationSchema = component.getConfigurationDescriptor().get("schema"); ProcessingReport report = validator.validate(configurationSchema, configuration); return report.isSuccess(); } catch (ProcessingException e) { throw new IncorrectParameterException(e.getMessage(), e); } } }
thingsboard/thingsboard
dao/src/main/java/org/thingsboard/server/dao/component/BaseComponentDescriptorService.java
214,508
package edu.drexel.psal.anonymouth.utils; import java.io.Serializable; import java.io.StringReader; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; import java.util.List; import edu.drexel.psal.ANONConstants; import edu.drexel.psal.anonymouth.engine.Attribute; import edu.drexel.psal.anonymouth.engine.DataAnalyzer; import edu.drexel.psal.anonymouth.gooie.GUIMain; import edu.drexel.psal.jstylo.generics.Logger; import edu.drexel.psal.jstylo.generics.Logger.LogOut; import edu.stanford.nlp.ling.HasWord; import edu.stanford.nlp.process.Tokenizer; import edu.stanford.nlp.trees.PennTreebankLanguagePack; import edu.stanford.nlp.trees.TreebankLanguagePack; enum TENSE {PAST,PRESENT,FUTURE}; enum POV {FIRST_PERSON,SECOND_PERSON,THIRD_PERSON}; enum CONJ {SIMPLE,PROGRESSIVE,PERFECT,PERFECT_PROGRESSIVE}; /** * Holds all instances of our TaggedSentences, which then makes this our * "TaggedDocument". Provides means to create whole backend tagged documents * based plain string text, methods to manipulate and access TaggedSentences, * etc.<br><br> * * Since the SpecialCharTracker is specific to each individual TaggedDocument instance * (for example, a version on the undo stack may have EOS characters in a * different location than the one in the current TaggedDocument instance * does), we keep our SpecialCharTracker instances here so they are backed up in * addition to the other variables for undo/redo. * * @author Andrew W.E. McDonald * @author Marc Barrowclift * @author Joe Muoio */ public class TaggedDocument implements Serializable { private static final long serialVersionUID = 2258415935896292619L; private final String NAME = "( "+this.getClass().getSimpleName()+" ) - "; private final String DELETE_STRING = "///"; /** * Our "document" of TaggedSentences */ protected ArrayList<TaggedSentence> taggedSentences; public SpecialCharTracker specialCharTracker; private GUIMain main; protected String documentTitle = "None"; protected String documentAuthor = "None"; protected transient TreebankLanguagePack tlp = new PennTreebankLanguagePack(); protected transient List<? extends HasWord> sentenceTokenized; protected transient Tokenizer<? extends HasWord> toke; /** * The way we have taggedSentences structured, while easy in most cases, * breaks whenever the user tries to type after the last sentence. That's * why we need to actively detect when they are doing so and create an * empty tagged sentence for their new writing to go into every caret * event that calls for it.<br><br> * * We create this blank tagged sentence whenever they are typing at the * end of a document OR when they click to the end of document from * somewhere else and there isn't already a blank sentence in place ready * to accept the new text. That's where this boolean comes in, it keeps * track for us whether or not it's necessary to create a new extra tagged * sentence. */ public boolean endSentenceExists; /** * Instead of having to call size() on taggedSentences every time we want * to know the size, we can keep track of it ourselves and keep things * constant time. */ public int numOfSentences = 0; /** * Instead of having to call length() on all taggedSentences every time we * want to know the length of the document, we can keep track of it * ourselves and keep things constant time. */ public int length = 0; /** * Greater than -1 when we are supposed to be keeping an eye out for a * specific EOS character that we are as of yet unsure whether or not it's * an EOS or just an abbreviation, ellipses, etc. Should be equal to the * index of the EOS character we're watching so we can use it to set ignore * to false if necessary. */ public int watchForEOS = -1; /** * Greater than -1 when we are supposed to be keeping an eye out for a * specific EOS character at the very end of the document that we are as * of yet unsure whether or not it's an EOS or just an abbreviation, ellispes, * etc. Should be equal to the index of the EOS character (length of the * document) we're watching so we can use it to set ignore to false if necessary. */ public int watchForLastSentenceEOS = -1; public boolean userDeletedSentence = false; //======================================================================= //* CONSTRUCTORS / INITIALIZES * //======================================================================= /** * Constructor, creates a blank taggedDocument. */ public TaggedDocument(GUIMain main) { this.main = main; specialCharTracker = new SpecialCharTracker(main); taggedSentences = new ArrayList<TaggedSentence>(ANONConstants.EXPECTED_NUM_OF_SENTENCES); endSentenceExists = false; } /** * Constructor, accepts an untagged string (a whole document), and makes * sentence tokens out of it to create a full Tagged Document * * @param main * GUIMain instance * @param untaggedDocument * The String of the document you want to tag. * @param usersDocument * Whether or not to just init the tracker and that's it, or to instead * makeAndTagSentences and that's it. */ public TaggedDocument(GUIMain main, String untaggedDocument, boolean initTracker) { this.main = main; untaggedDocument = formatDocument(untaggedDocument); specialCharTracker = new SpecialCharTracker(main); taggedSentences = new ArrayList<TaggedSentence>(ANONConstants.EXPECTED_NUM_OF_SENTENCES); endSentenceExists = false; if (!initTracker) { length = -1; // makeAndTagSentences(untaggedDocument, true); } else { initSpecialCharTracker(untaggedDocument); setDocumentLength(untaggedDocument); } } /** * Constructor, accepts an untagged string (a whole document), and makes * sentence tokens out of it to create a full Tagged Document. * * @param untaggedDocument * The String of the document you want to tag. * @param docTitle * The title of the document. * @param author * The author of the document. */ public TaggedDocument(GUIMain main, String untaggedDocument, String docTitle, String author) { this.main = main; this.documentTitle = docTitle; this.documentAuthor = author; untaggedDocument = formatDocument(untaggedDocument); specialCharTracker = new SpecialCharTracker(main); taggedSentences = new ArrayList<TaggedSentence>(ANONConstants.EXPECTED_NUM_OF_SENTENCES); setDocumentLength(untaggedDocument); endSentenceExists = false; makeAndTagSentences(untaggedDocument, true); } /** * Constructor, accepts another TaggedDocument instance and initiates a deep copy of it * (so we're not just creating another pointer like with == but creating an actual copy) * * @param td * The TaggedDocument you want to initiate a deep copy for */ public TaggedDocument(TaggedDocument td) { this.main = td.main; int numTaggedSents = td.taggedSentences.size(); numOfSentences = numTaggedSents; taggedSentences = new ArrayList<TaggedSentence>(ANONConstants.EXPECTED_NUM_OF_SENTENCES); //Copy all TaggedSentences for (int i = 0; i < numTaggedSents; i++) taggedSentences.add(new TaggedSentence(td.taggedSentences.get(i))); //Copy document author and title (Strings are immutable) documentAuthor = td.documentAuthor; documentTitle = td.documentTitle; //Then the total number of sentences (could probably chuck ths) numOfSentences = td.numOfSentences; //Finally, copy the SpecialCharTracker specialCharTracker = new SpecialCharTracker(td.specialCharTracker); setDocumentLength(td.getUntaggedDocument()); endSentenceExists = td.endSentenceExists; } /** * Initializes the SpecialCharTracker by adding all EOSes currently present in * the passed document (to be used ONLY when using the "waitToTag" flag in the * appropriate constructor * * @param document * The String that this TaggedDocument instance will represent */ private void initSpecialCharTracker(String document) { char[] docToAnonymize = document.toCharArray(); int numChars = docToAnonymize.length; for (int i = 0; i < numChars; i++) { if (specialCharTracker.isEOS(docToAnonymize[i])) { specialCharTracker.addEOS(docToAnonymize[i], i, false); } else if (specialCharTracker.isQuote(docToAnonymize[i])) { specialCharTracker.addQuote(i); } else if (specialCharTracker.isParenthesis(docToAnonymize[i])) { specialCharTracker.addParenthesis(i, docToAnonymize[i]); } else if (specialCharTracker.isBracket(docToAnonymize[i])) { specialCharTracker.addBracket(i, docToAnonymize[i]); } else if (specialCharTracker.isSquiggly(docToAnonymize[i])) { specialCharTracker.addSquiggly(i, docToAnonymize[i]); } } } /** * Replace Unicode format characters that will ruin the regular * expressions (because non-printable characters in the document still * take up indices, but you won't know they're there until you * "arrow" though the document and have to hit the same arrow twice to * move past a certain point. Note that we must use "Cf" rather than * "C". If we use "C" or "Cc" (which includes control characters), we * remove our newline characters and this screws up the document. "Cf" * is "other, format". "Cc" is "other, control". Using "C" will match * both of them. * * @param text * The document String you want to format */ private String formatDocument(String text) { text = text.trim(); text = text.replaceAll("\n", "<<<NEWLINE>>>"); text = text.replaceAll("\u201c","\""); //Unicode left quotation mark text = text.replaceAll("\u201d","\""); //Unicode right quotation mark text = text.replaceAll("\\u2026", "..."); //Unicode ellipsis text = text.replaceAll("\\p{C}", ""); text = text.replaceAll("<<<NEWLINE>>>", "\n"); return text; } //======================================================================= //* BOOKKEEPING * //======================================================================= /** * Increases or decreases the length of this document based on the given * amount. * * @param amount * The amount to change the length by (negative for * decrease, positive for increase) */ public void incrementDocumentLength(int amount) { length += amount; } /** * Sets the initial length for this TaggedDocument, should be called only * during initialization. * * @param document * The string of the document you are tagging. */ public void setDocumentLength(String document) { length = document.length(); } /** * Clears all saved translations attributes to every TaggedSentence in * this instance. This is to be used for undo/redo, as it's been pretty * much proven we can't back that shit up without it grinding the editor * to a halt.<br><br> * * TODO: Possiblity a way to thread it so this doesn't happen? */ public void clearAllTranslations() { for (int i = 0; i < numOfSentences; i++) { taggedSentences.get(i).getTranslations().clear(); } } /** * Consolidates features for an ArrayList of TaggedSentences (does both * word level and sentence level features) * * @param alts */ public void consolidateFeatures(ArrayList<TaggedSentence> alts){ for (TaggedSentence ts:alts) { ConsolidationStation.featurePacker(ts); } } /** * consolidates features for a single TaggedSentence object * * @param ts */ public void consolidateFeatures(TaggedSentence ts){ ConsolidationStation.featurePacker(ts); } //======================================================================= //* MAIN TAGGEDSENTENCE MANIPULATINO METHODS * //======================================================================= //================ CREATE TAGGED SENTENCES ================ /** * Takes a String of sentences (can be an entire document), breaks it up * into individual sentences (sentence tokens), breaks those up into * tokens, and then tags them (via MaxentTagger). Each tagged sentence is * saved into a TaggedSentence object, along with its untagged * counterpart. * * @param untagged * String containing sentences to tag * @param appendTaggedSentencesToGlobalArrayList * if true, appends the TaggedSentence objects to the TaggedDocument's * arraylist of TaggedSentences * * @return * An ArrayList of the completed TaggedSentences */ public ArrayList<TaggedSentence> makeAndTagSentences(String untagged, boolean appendTaggedSentencesToGlobalArrayList) { boolean fullDocument = false; /** * If our length variable is 0, that means that the constructor * with NO INITIAL DOCUMENT TEXT was run, therefore we set the * length of the string as the TaggedDocument length and format * the String since it's our main document */ if (length == 0 ) { untagged = formatDocument(untagged); setDocumentLength(untagged); initSpecialCharTracker(untagged); fullDocument = true; } ArrayList<String> untaggedSents = main.editorDriver.sentenceMaker.makeSentences(untagged, fullDocument); ArrayList<TaggedSentence> taggedSentences = new ArrayList<TaggedSentence>(untaggedSents.size()); Iterator<String> strRayIter = untaggedSents.iterator(); String tempSent; if (untagged.matches("\\s\\s*")) { TaggedSentence taggedSentence = new TaggedSentence(untagged); taggedSentences.add(taggedSentence); } else if (untagged.matches("")) { TaggedSentence taggedSentence = new TaggedSentence(untagged); taggedSentences.add(taggedSentence); } else { while (strRayIter.hasNext()) { tempSent = strRayIter.next(); TaggedSentence taggedSentence = new TaggedSentence(tempSent); toke = tlp.getTokenizerFactory().getTokenizer(new StringReader(tempSent)); sentenceTokenized = toke.tokenize(); taggedSentence.setTaggedSentence(Tagger.mt.tagSentence(sentenceTokenized)); consolidateFeatures(taggedSentence); // todo: put stuff here taggedSentences.add(taggedSentence); } } if (appendTaggedSentencesToGlobalArrayList == true) { int i = 0; int len = taggedSentences.size(); for (i = 0; i < len; i++) { numOfSentences++; this.taggedSentences.add(taggedSentences.get(i)); } } return taggedSentences; } /** * Adds sentToAdd at placeToAdd in this TaggedDocument * * @param sentToAdd * The TaggedSentence you want to add to the TaggedDocument * @param placeToAdd * The index in which you want to add it (0, 1, 2, etc.) */ public void addTaggedSentence(TaggedSentence sentToAdd, int placeToAdd) { taggedSentences.add(placeToAdd,sentToAdd); } /** * Creates a new sentence at the very end of the document with the * given text (usually whitespace or most likely "") * * @param text * The text you want the new TaggedSentence to be made with * (most likey "" or whitespace) */ public void makeNewEndSentence(String text) { TaggedSentence newSentence = new TaggedSentence(text); taggedSentences.add(newSentence); numOfSentences++; } //================ CONCAT TAGGED SENTENCES ================ /** * Accepts a variable number of TaggedSentences and returns a single * TaggedSentence, preserving all original Word objects.<br><br> * * Note that the sentences will be concatenated together in the order that * they are passed into the method. * * @param taggedSentences * A variable number of TaggedSentences * * @return * A single tagged sentences with the properties of all the sentences in the list. */ public TaggedSentence concatSentences(TaggedSentence ... taggedSentences) { TaggedSentence toReturn = new TaggedSentence(taggedSentences[0]); int numSents = taggedSentences.length; for (int i = 1; i < numSents; i++) { toReturn.wordsInSentence.addAll(taggedSentences[i].wordsInSentence); toReturn.untagged += taggedSentences[i].untagged; } return toReturn; } /** * Accepts an ArrayList of TaggedSentences and returns a single * TaggedSentence, preserving all original Word objects.<br><br> * * Note that the sentences will be concatenated together in the order that * they are passed into the method. * * @param taggedSentences * An ArrayList of TaggedSentences * * @return * A single tagged sentences with the properties of all the sentences in the list. */ public TaggedSentence concatSentences(ArrayList<TaggedSentence> taggedSentences) { TaggedSentence toReturn =new TaggedSentence(taggedSentences.get(0)); TaggedSentence thisTaggedSent; int size = taggedSentences.size(); for (int i = 1; i < size; i++) { thisTaggedSent = taggedSentences.get(i); toReturn.wordsInSentence.addAll(thisTaggedSent.wordsInSentence); toReturn.untagged += thisTaggedSent.untagged; toReturn.sentenceLevelFeaturesFound.merge(thisTaggedSent.sentenceLevelFeaturesFound); } return toReturn; } /** * Merges the TaggedSentences specified by the indices in * 'taggedSentenceIndicesToConcat' into one TaggedSentence.<br><br> * * Note that the sentences will be concatenated together in the order that * they are passed into the method. * * @param taggedSentenceIndicesToConcat * * @return * The TaggedSentence that resulted from the merging */ public TaggedSentence concatSentences(int[] taggedSentenceIndicesToConcat) { TaggedSentence toReturn =new TaggedSentence(taggedSentences.get(taggedSentenceIndicesToConcat[0])); TaggedSentence thisTaggedSent; for (int i = 1; i < numOfSentences; i++) { thisTaggedSent = taggedSentences.get(taggedSentenceIndicesToConcat[i]); toReturn.wordsInSentence.addAll(thisTaggedSent.wordsInSentence); toReturn.untagged += thisTaggedSent.untagged; toReturn.sentenceLevelFeaturesFound.merge(thisTaggedSent.sentenceLevelFeaturesFound); } return toReturn; } /** * Concatenates the two TaggedSentences (in order), removes the second * TaggedSentence, and replaces the first with the concatenated * TaggedSentence Takes care of bookkeeping. * * @param taggedSentenceOne * The first TaggedSentence * @param tsOneIndex * The first TaggedSentence's index in the TaggedDocument * @param taggedSentenceTwo * The second TaggedSentence * @param tsTwoIndex * The second TaggedSentence's index in the TaggedDocument * * @return * The completed and combined TaggedSentence */ public TaggedSentence concatRemoveAndReplace(TaggedSentence taggedSentenceOne, int tsOneIndex, TaggedSentence taggedSentenceTwo, int tsTwoIndex) { TaggedSentence replaceWith = concatSentences(taggedSentenceOne, taggedSentenceTwo); removeAndReplace(tsTwoIndex, DELETE_STRING); //Delete the second sentence Logger.logln(NAME+"*** Replacing: \""+taggedSentenceOne.getUntagged()+"\"\n" + NAME + "*** With: \""+replaceWith.getUntagged() + "\""); return removeAndReplace(tsOneIndex,replaceWith); } //================ REMOVE TAGGED SENTENCES ================ /** * removes TaggedSentence at indexToRemove from this TaggedDocument. Does * NOT take care of any bookkeeping issues -- should only be called by * methods that do (removeAndReplace) * * @param indexToRemove * The index where you want to remove a taggedSentence from * * @return * The removed TaggedSentence. Returns a null TaggedSentence if * no TaggedSentence existed at the given index or if the index * was not within acceptable bounds (< 0 or >= numOfSentences) */ private TaggedSentence removeTaggedSentence(int indexToRemove) { TaggedSentence returnSentence = null; try { returnSentence = taggedSentences.remove(indexToRemove); } catch (Exception e) { Logger.logln(NAME+"Attemp to access TaggedSentence in an unacceptable index = " + indexToRemove, LogOut.STDERR); } return returnSentence; } /** * Removes all tagged sentences at every given index. * * @param indicesToRemove * An integer array of the indices you wish to * remove (0, 1, 2, etc.) */ public void removeTaggedSentences(int[] indicesToRemove) { int numToRemove = indicesToRemove.length; for (int i = 0; i < numToRemove; i++) removeAndReplace(indicesToRemove[i], DELETE_STRING); } /** * Removes the existing TaggedSentence at the given index and replaces it * with a new TaggedSentence made from the given text. * * @param sentsToAdd * A String representing the sentence(s) from the editBox */ public void removeAndReplace(int sentNumber, String sentsToAdd) {//, int indexToRemove, int placeToAdd){ TaggedSentence toReplace = taggedSentences.get(sentNumber); Logger.logln(NAME+"Removing: \""+toReplace.getUntagged() + "\""); Logger.logln(NAME+"Adding: \""+sentsToAdd + "\""); if (sentsToAdd.equals(DELETE_STRING)) {//checks to see if the user deleted the current sentence //CALL COMPARE TaggedSentence wasReplaced = removeTaggedSentence(sentNumber); Logger.logln(NAME+"User deleted a sentence."); updateReferences(toReplace,new TaggedSentence(""));//all features must be deleted userDeletedSentence = true; numOfSentences--; wasReplaced.delete(); wasReplaced = null; return; } ArrayList<TaggedSentence> taggedSentsToAdd = makeAndTagSentences(sentsToAdd,false); TaggedSentence wasReplaced = removeTaggedSentence(sentNumber); numOfSentences--; //call compare int len = taggedSentsToAdd.size(); for (int i = 0; i < len; i++) { addTaggedSentence(taggedSentsToAdd.get(i),sentNumber); sentNumber++; numOfSentences++; } TaggedSentence concatted = concatSentences(taggedSentsToAdd); updateReferences(toReplace,concatted); wasReplaced.delete(); wasReplaced = null; } /** * Removes multiple sentences and replaces them with a single * TaggedSentence. To be used with the right-click menu item "combine * sentences". * * @param sentsToRemove * An ArrayList of TaggedSentences to remove * @param sentToAdd * The TaggedSentence to want to replace them all with. * * @return * The index (0-based) of the first sentence removed. */ public int removeMultipleAndReplace(ArrayList<TaggedSentence> sentsToRemove, TaggedSentence sentToAdd) { int size = sentsToRemove.size(); int startingSentence = 0; for (int i = 0; i < size; i++) { if (i == 0) { startingSentence = taggedSentences.indexOf(sentsToRemove.get(i)); } taggedSentences.remove(sentsToRemove.get(i)); numOfSentences--; } addTaggedSentence(sentToAdd, startingSentence); numOfSentences++; //TODO: check if this okay to do for (int j = 0; j < size; j++) { updateReferences(sentsToRemove.get(j), sentToAdd); } return startingSentence; } /** * Removes the TaggedSentence at 'sentNumber', and switches in 'toAdd' in * its place. Takes care of all bookkeeping issues. * * @param sentNumber * The number of the TaggedSentence to remove and replace * @param toAdd * The TaggedSentence to want to replace the removed TaggedSentence with * * @return */ public TaggedSentence removeAndReplace(int sentNumber, TaggedSentence toAdd) { TaggedSentence toReplace = taggedSentences.get(sentNumber); Logger.logln(NAME+"Removing: "+toReplace.toString()); Logger.logln(NAME+"Adding: "+toAdd.getUntagged()); if (toAdd.getUntagged().matches("^\\s*$")) {//checks to see if the user deleted the current sentence //CALL COMPARE TaggedSentence wasReplaced = removeTaggedSentence(sentNumber); Logger.logln(NAME+"User deleted a sentence."); updateReferences(toReplace,new TaggedSentence(""));//all features must be deleted numOfSentences--; return wasReplaced; } // no need to subtract one from numOfSentences when removing a sentence, because we are putting a new sentence in its place immediatly TaggedSentence wasReplaced = removeTaggedSentence(sentNumber); addTaggedSentence(toAdd,sentNumber); Logger.logln(NAME+"TaggedSent to add: "+toAdd.toString()); Logger.logln(NAME+"TaggedSent to remove: "+toReplace.toString()); updateReferences(toReplace,toAdd); return wasReplaced; } //======================================================================= //* ASSORTED * //======================================================================= /** * Checks all sentences in the tagged document and returns whether or not they are all translated. */ public boolean isTranslated() { boolean result = true; for (int i = 0; i < numOfSentences; i++) { if (!taggedSentences.get(i).isTranslated()) { result = false; break; } } return result; } /** * Our custom toString() method that allows us to print out a nice, formatted * version of our tagged document when printed using standard output. * * @return * The formated string to print */ @Override public String toString() { String toReturn = "Document Title: "+documentTitle+" Author: "+documentAuthor+"\n"; for (int i = 0; i < numOfSentences; i++){ toReturn += taggedSentences.get(i).toString()+"\n"; } return toReturn; } /** * Updates the referenced Attributes 'toModifyValue's (present value) with * the amount that must be added/subtracted from each respective value * * @param oldSentence * The pre-editing version of the sentence(s) * @param newSentence * The post-editing version of the sentence(s) */ private void updateReferences(TaggedSentence oldSentence, TaggedSentence newSentence){ //Logger.logln(NAME+"Old Sentence: "+oldSentence.toString()+"\nNew Sentence: "+newSentence.toString()); SparseReferences updatedValues = newSentence.getOldToNewDeltas(oldSentence); //Logger.logln(NAME+updatedValues.toString()); for(Reference ref:updatedValues.references){ //Logger.logln(NAME+"Attribute: "+DataAnalyzer.topAttributes[ref.index].getFullName()+" pre-update value: "+DataAnalyzer.topAttributes[ref.index].getToModifyValue()); if(DataAnalyzer.topAttributes[ref.index].getFullName().contains("Percentage")){ //then it is a percentage. Logger.logln(NAME+"Attribute: "+DataAnalyzer.topAttributes[ref.index].getFullName()+"Is a percentage! ERROR!",Logger.LogOut.STDERR); } else if(DataAnalyzer.topAttributes[ref.index].getFullName().contains("Average")){ //then it is an average Logger.logln(NAME+"Attribute: "+DataAnalyzer.topAttributes[ref.index].getFullName()+"Is an average! ERROR!",Logger.LogOut.STDERR); } else{ DataAnalyzer.topAttributes[ref.index].setToModifyValue((DataAnalyzer.topAttributes[ref.index].getToModifyValue() + ref.value)); //Logger.logln(NAME+"Updated attribute: "+DataAnalyzer.topAttributes[ref.index].getFullName()); } //Logger.logln(NAME+"Attribute: "+DataAnalyzer.topAttributes[ref.index].getFullName()+" post-update value: "+DataAnalyzer.topAttributes[ref.index].getToModifyValue()); } } //======================================================================= //* GET METHODS * //======================================================================= /** * Returns the number of words in the tagged document. * * @return * The number of words (>= 0) */ public int getWordCount() { int wordCount = 0; for (TaggedSentence ts : taggedSentences) { wordCount += ts.size(); } return wordCount; } /** * Returns all words in the tagged document * * @return * An ArrayList of Word objects */ public ArrayList<Word> getWords() { int numWords = getWordCount(); ArrayList<Word> theWords = new ArrayList<Word>(numWords); for (TaggedSentence ts: taggedSentences) { theWords.addAll(ts.wordsInSentence); } return theWords; } /** * Returns all the words in the given tagged sentence * * @param sentence * The TaggedSentence instance you want to obtain words * from * * @return * An array of strings representing all the words in the * given sentence. */ public String[] getWordsInSentence(TaggedSentence sentence) { ArrayList<Word> theWords = sentence.getWordsInSentence(); int size = theWords.size(); String[] words = new String[size]; for (int i = 0; i < size; i++) { words[i] = theWords.get(i).word; } return words; } /** * Returns all words for each unique word in the given tagged sentence (no * duplicates) * * @param sentence * The TaggedSentence instance you want all unique words from the * TaggedSentence * * @return * An array of strings representing all unique words from the * TaggedSentence */ public String[] getWordsInSentenceNoDups(TaggedSentence[] sentences) { ArrayList<Word> unfiltered = new ArrayList<Word>(); int length = sentences.length; for (int i = 0; i < length; i++) { unfiltered.addAll(sentences[i].getWordsInSentence()); } int size = unfiltered.size(); HashSet<String> wordList = new HashSet<String>(size); String curWord; String[] words = new String[size]; for (int i = 0; i < size; i++) { curWord = unfiltered.get(i).word; if (wordList.contains(curWord)) { continue; } words[i] = unfiltered.get(i).word; wordList.add(words[i]); } return words; } /** * Returns this TaggedDocument (The ArrayList of TaggedSentences) * * @return * An ArrayList of TaggedSentences */ public ArrayList<TaggedSentence> getTaggedDocument() { return taggedSentences; } /** * Returns this document (The ArrayList of untagged strings) * * @return * An ArrayList of strings */ public ArrayList<String> getUntaggedSentences() { ArrayList<String> sentences = new ArrayList<String>(); for (int i=0;i<taggedSentences.size();i++) sentences.add(taggedSentences.get(i).getUntagged()); return sentences; } /** * Returns this document as a single untagged String * * @return * The complete String of the document (untagged) */ public String getUntaggedDocument() { String str = ""; for (int i = 0; i < numOfSentences; i++){ str += taggedSentences.get(i).getUntagged(); } return str; } /** * Returns the lengths of each sentence. (0 for sentence 1 length, 1 for * sentence 2 length, etc.) * * @return * An integer array representing the lengths for each sentence */ public int[] getSentenceLengths() { int numSents = taggedSentences.size(); int[] lengthsToReturn = new int[numSents]; for (int i = 0; i < numSents; i++) { lengthsToReturn[i] = taggedSentences.get(i).getLength(); } return lengthsToReturn; } /** * Returns TaggedSentence number at this index * * @param number * The index for the TaggedSentence you want (0, 1, 2, etc.) * * @return * The TaggedSentence at that index. Returns a null TaggedSentence * if none exist at the given index (or if it was an unacceptable * position < 0 or >= size) */ public TaggedSentence getSentenceNumber(int number) { TaggedSentence returnSentence = null; try { returnSentence = taggedSentences.get(number); } catch (Exception e) { Logger.logln(NAME+"Attemp to access TaggedSentence in an unacceptable index = " + number, LogOut.STDERR); } return returnSentence; } /** * Essentially the same thing as getSentenceNumAt(), except instead of * accepting a sentence number and finding the taggedSentence that corresponds * to that number it accepts an index (caret position), and finds the * taggedSentence that corresponds to that index. * * @param index * The position in the document text. * * @return * The TaggedSentence found at the index. If none exists, null is returned. */ public TaggedSentence getTaggedSentenceAtIndex(int index) { int newIndex = 0; int pastIndex = 0; int length = 0; TaggedSentence returnValue = null; for (int i = 0; i < numOfSentences; i++) { length = taggedSentences.get(i).getUntagged().length(); newIndex = length + pastIndex; if (index >= pastIndex && index < newIndex) { returnValue = taggedSentences.get(i); break; } else { pastIndex = newIndex; } } return returnValue; } /** * Calculates and returns the current change needed for the document to * reach it's optimal feature values. * * @return * The double value for the current change needed, to get the actual * "percent to the goal", simply take this return value, subtract it * from the saved value from getMaxChangeNeeded(), then divide by * the saved value from getMaxChangeNeeded(). This should all be handled * within AnonymityBar.java */ public double getCurrentChangeNeeded() { int numAttribs = DataAnalyzer.topAttributes.length; double currentChangeNeeded = 0; Attribute tempAttrib; for(int i = 0; i < numAttribs; i++) { tempAttrib = DataAnalyzer.topAttributes[i]; // not really sure how to handle this... if (tempAttrib.getFullName().contains("Percentage") || tempAttrib.getFullName().contains("Average")) continue; if (tempAttrib.getToModifyValue() <= 0) continue; currentChangeNeeded += tempAttrib.getPercentChangeNeeded(false,false,true); } return currentChangeNeeded; } /** * Calculates and returns the anonymity index of the document if all the * document features were equal to their target values. This is the ideal * scenario (though it never happens in the real world since features rely * so much on one another and can easily change each other).<br><br> * * This should only need to be called ONCE, the return value should be saved * and references for the future percent change calculations. * * @return * The beginning where getCurrentChangeNeeded() started from, acts as the * denominator for the percent change needed calculations in AnonymityBar */ public double getMaxChangeNeeded() { int numAttribs = DataAnalyzer.topAttributes.length; double maxChange = 0; Attribute tempAttrib; for(int i = 0; i < numAttribs; i++) { tempAttrib = DataAnalyzer.topAttributes[i]; // not really sure how to handle this... if(tempAttrib.getFullName().contains("Percentage") || tempAttrib.getFullName().contains("Average")) continue; if(tempAttrib.getToModifyValue() <= 0) continue; maxChange += Math.abs(tempAttrib.getFeatureBaselinePercentChangeNeeded()) / 100; } return maxChange; } }
spencermwoo/anonymouth
src/edu/drexel/psal/anonymouth/utils/TaggedDocument.java
214,509
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2024 DBeaver Corp and others * Copyright (C) 2017-2018 Andrew Khitrin ([email protected]) * Copyright (C) 2017-2018 Alexander Fedorov ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.postgresql.debug.internal.impl; import org.jkiss.dbeaver.debug.DBGStackFrame; public class PostgreDebugStackFrame implements DBGStackFrame { private final int level; private final String name; private final int oid; private final int lineNo; private final String args; public PostgreDebugStackFrame(int level, String name, int oid, int lineNo, String args) { super(); this.level = level; this.name = name; this.oid = oid; this.lineNo = lineNo; this.args = args; } public int getLevel() { return level; } @Override public Object getSourceIdentifier() { return getOid(); } public int getOid() { return oid; } @Override public int getLineNumber() { return lineNo; } public String getArgs() { return args; } @Override public String getName() { return name; } @Override public String toString() { return "PostgreDebugStackFrame [level=" + level + ", name=" + name + ", oid=" + oid + ", lineNo=" + lineNo + ", args=" + args + "]"; } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.postgresql.debug.core/src/org/jkiss/dbeaver/ext/postgresql/debug/internal/impl/PostgreDebugStackFrame.java
214,510
/* * Arguments.java * * Copyright (c) 2002-2015 Alexei Drummond, Andrew Rambaut and Marc Suchard * * This file is part of BEAST. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership and licensing. * * BEAST is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * BEAST is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with BEAST; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301 USA */ package dr.app.util; import java.util.StringTokenizer; import java.util.Arrays; import java.util.ArrayList; public class Arguments { public static final String ARGUMENT_CHARACTER = "-"; public static class ArgumentException extends Exception { /** * */ private static final long serialVersionUID = -3229759954341228233L; public ArgumentException() { super(); } public ArgumentException(String message) { super(message); } } public static class Option { public Option(String label, String description) { this.label = label; this.description = description; } String label; String description; boolean isAvailable = false; } public static class StringOption extends Option { /** * @param label Option name: * @param tag Descriptive name of option argument. * Example - tag "file-name" will show '-save <file-name>' in the usage. * @param description */ public StringOption(String label, String tag, String description) { super(label, description); this.tag = tag; } public StringOption(String label, String[] options, boolean caseSensitive, String description) { super(label, description); this.options = options; this.caseSensitive = caseSensitive; } String[] options = null; String tag = null; boolean caseSensitive = false; String value = null; } public static class IntegerOption extends Option { public IntegerOption(String label, String description) { super(label, description); } public IntegerOption(String label, int minValue, int maxValue, String description) { super(label, description); this.minValue = minValue; this.maxValue = maxValue; } int minValue = Integer.MIN_VALUE; int maxValue = Integer.MAX_VALUE; int value = 0; } public static class IntegerArrayOption extends IntegerOption { public IntegerArrayOption(String label, String description) { this(label, 0, Integer.MIN_VALUE, Integer.MAX_VALUE, description); } public IntegerArrayOption(String label, int count, String description) { this(label, count, Integer.MIN_VALUE, Integer.MAX_VALUE, description); } public IntegerArrayOption(String label, int minValue, int maxValue, String description) { this(label, 0, minValue, maxValue, description); } public IntegerArrayOption(String label, int count, int minValue, int maxValue, String description) { super(label, minValue, maxValue, description); this.count = count; } int count; int[] values = null; } public static class LongOption extends Option { public LongOption(String label, String description) { super(label, description); } public LongOption(String label, long minValue, long maxValue, String description) { super(label, description); this.minValue = minValue; this.maxValue = maxValue; } long minValue = Long.MIN_VALUE; long maxValue = Long.MAX_VALUE; long value = 0; } public static class RealOption extends Option { public RealOption(String label, String description) { super(label, description); } public RealOption(String label, double minValue, double maxValue, String description) { super(label, description); this.minValue = minValue; this.maxValue = maxValue; } double minValue = Double.NEGATIVE_INFINITY; double maxValue = Double.POSITIVE_INFINITY; double value = 0; } public static class RealArrayOption extends RealOption { // public RealArrayOption(String label, String description) { // this(label, 0, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, description); // } // A count of -1 means any length public RealArrayOption(String label, int count, String description) { this(label, count, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, description); } // public RealArrayOption(String label, double minValue, double maxValue, String description) { // this(label, 0, minValue, maxValue, description); // } public RealArrayOption(String label, int count, double minValue, double maxValue, String description) { super(label, minValue, maxValue, description); this.count = count; } private int count; double[] values = null; } /** * Parse a list of arguments ready for accessing */ public Arguments(Option[] options) { this.options = options; } public Arguments(Option[] options, boolean caseSensitive) { this.options = options; this.caseSensitive = caseSensitive; } public void addOption(String label, String description, String positionLabel) { ArrayList<Option> optionsList = new ArrayList<Option>(Arrays.asList(this.options)); optionsList.add(findOption(positionLabel), new Arguments.Option(label, description)); this.options = new Option[optionsList.size()]; optionsList.toArray(this.options); ; } /** * Parse a list of arguments ready for accessing */ public int parseArguments(String[] arguments) throws ArgumentException { int[] optionIndex = new int[arguments.length]; for (int i = 0; i < optionIndex.length; i++) { optionIndex[i] = -1; } for (int i = 0; i < options.length; i++) { Option option = options[i]; int index = findArgument(arguments, option.label); if (index != -1) { if (optionIndex[index] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } // the first value may be appended to the option label (e.g., '-t1.0'): String arg = arguments[index].substring(option.label.length() + 1); optionIndex[index] = i; option.isAvailable = true; if (option instanceof IntegerArrayOption) { IntegerArrayOption o = (IntegerArrayOption) option; o.values = new int[o.count]; int k = index; int j = 0; while (j < o.count) { if (arg.length() > 0) { StringTokenizer tokenizer = new StringTokenizer(arg, ",\t "); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (token.length() > 0) { try { o.values[j] = Integer.parseInt(token); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad integer value: " + token); } if (o.values[j] > o.maxValue || o.values[j] < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad integer value: " + token); } j++; } } } k++; if (j < o.count) { if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing one or more values: expecting " + o.count + " integers"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } } } else if (option instanceof IntegerOption) { IntegerOption o = (IntegerOption) option; if (arg.length() == 0) { int k = index + 1; if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing its value: expecting an integer"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } try { o.value = Integer.parseInt(arg); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad integer value: " + arg); } if (o.value > o.maxValue || o.value < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad integer value: " + arg); } } else if (option instanceof LongOption) { LongOption o = (LongOption) option; if (arg.length() == 0) { int k = index + 1; if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing its value: expecting a long integer"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } try { o.value = Long.parseLong(arg); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad integer value: " + arg); } if (o.value > o.maxValue || o.value < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad long integer value: " + arg); } } else if (option instanceof RealArrayOption) { // I fixed only the real case to handle a variable sized array // I don't have the time to figure out the right way, so I duplicated some code so // that I do not break code by mistake RealArrayOption o = (RealArrayOption) option; if (o.count >= 0) { final int count = o.count; o.values = new double[count]; int k = index; int j = 0; while (j < count) { if (arg.length() > 0) { StringTokenizer tokenizer = new StringTokenizer(arg, ",\t "); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (token.length() > 0) { try { o.values[j] = Double.parseDouble(token); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + token); } if (o.values[j] > o.maxValue || o.values[j] < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + token); } j++; } } } k++; if (j < count) { if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing one or more values: expecting " + count + " integers"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } } } else { double[] values = new double[100]; index += 1; arg = arguments[index]; optionIndex[index] = i; int j = 0; if (arg.length() > 0) { StringTokenizer tokenizer = new StringTokenizer(arg, ",\t "); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (token.length() > 0) { try { values[j] = Double.parseDouble(token); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + token); } if (values[j] > o.maxValue || values[j] < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + token); } j++; } } } o.values = new double[j]; System.arraycopy(values, 0, o.values, 0, j); } } else if (option instanceof RealOption) { RealOption o = (RealOption) option; if (arg.length() == 0) { int k = index + 1; if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing its value: expecting a real number"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } try { o.value = Double.parseDouble(arg); } catch (NumberFormatException nfe) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + arg); } if (o.value > o.maxValue || o.value < o.minValue) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad real value: " + arg); } } else if (option instanceof StringOption) { StringOption o = (StringOption) option; if (arg.length() == 0) { int k = index + 1; if (k >= arguments.length) { throw new ArgumentException("Argument, " + arguments[index] + " is missing its value: expecting a string"); } if (optionIndex[k] != -1) { throw new ArgumentException("Argument, " + arguments[index] + " overlaps with another argument"); } arg = arguments[k]; optionIndex[k] = i; } o.value = arg; if (o.options != null) { boolean found = false; for (String option1 : o.options) { if ((!caseSensitive && option1.equalsIgnoreCase(o.value)) || option1.equals(o.value)) { found = true; break; } } if (!found) { throw new ArgumentException("Argument, " + arguments[index] + " has a bad string value: " + arg); } } } else { // is simply an Option - nothing to do... } } } int n = 0; int i = arguments.length - 1; while (i >= 0 && optionIndex[i] == -1 && !arguments[i].startsWith(ARGUMENT_CHARACTER)) { n++; i--; } leftoverArguments = new String[n]; for (i = 0; i < n; i++) { leftoverArguments[i] = arguments[arguments.length - n + i]; } for (i = 0; i < arguments.length - n; i++) { if (optionIndex[i] == -1) { throw new ArgumentException("Unrecognized argument: " + arguments[i]); } } return n; } private int findArgument(String[] arguments, String label) { for (int i = 0; i < arguments.length; i++) { if (arguments[i].length() - 1 >= label.length()) { if (arguments[i].startsWith(ARGUMENT_CHARACTER)) { // String l = arguments[i].substring(1, label.length() + 1); // String l = arguments[i]; String l = arguments[i].substring(1, arguments[i].length()); if ((!caseSensitive && label.equalsIgnoreCase(l)) || label.equals(l)) { return i; } } } } return -1; } /** * Does an argument with label exist? */ public boolean hasOption(String label) { int n = findOption(label); if (n == -1) { return false; } return options[n].isAvailable; } /** * Return the value of an integer option */ public int getIntegerOption(String label) { IntegerOption o = (IntegerOption) options[findOption(label)]; return o.value; } /** * Return the value of an integer array option */ public int[] getIntegerArrayOption(String label) { IntegerArrayOption o = (IntegerArrayOption) options[findOption(label)]; return o.values; } /** * Return the value of an integer option */ public long getLongOption(String label) { LongOption o = (LongOption) options[findOption(label)]; return o.value; } /** * Return the value of an real number option */ public double getRealOption(String label) { RealOption o = (RealOption) options[findOption(label)]; return o.value; } /** * Return the value of an real array option */ public double[] getRealArrayOption(String label) { RealArrayOption o = (RealArrayOption) options[findOption(label)]; return o.values; } /** * Return the value of an string option */ public String getStringOption(String label) { StringOption o = (StringOption) options[findOption(label)]; return o.value; } /** * Return any arguments leftover after the options */ public String[] getLeftoverArguments() { return leftoverArguments; } public void printUsage(String name, String commandLine) { System.out.print(" Usage: " + name); for (Option option : options) { System.out.print(" [-" + option.label); if (option instanceof IntegerArrayOption) { IntegerArrayOption o = (IntegerArrayOption) option; for (int j = 1; j <= o.count; j++) { System.out.print(" <i" + j + ">"); } System.out.print("]"); } else if (option instanceof IntegerOption) { System.out.print(" <i>]"); } else if (option instanceof RealArrayOption) { RealArrayOption o = (RealArrayOption) option; for (int j = 1; j <= o.count; j++) { System.out.print(" <r" + j + ">"); } System.out.print("]"); } else if (option instanceof RealOption) { System.out.print(" <r>]"); } else if (option instanceof StringOption) { StringOption o = (StringOption) option; if (o.options != null) { System.out.print(" <" + o.options[0]); for (int j = 1; j < o.options.length; j++) { System.out.print("|" + o.options[j]); } System.out.print(">]"); } else { System.out.print(" <" + o.tag + ">]"); } } else { System.out.print("]"); } } System.out.println(" " + commandLine); for (Option option : options) { System.out.println(" -" + option.label + " " + option.description); } } private int findOption(String label) { for (int i = 0; i < options.length; i++) { String l = options[i].label; if ((!caseSensitive && label.equalsIgnoreCase(l)) || label.equals(l)) { return i; } } return -1; } private Option[] options = null; private String[] leftoverArguments = null; private boolean caseSensitive = false; }
maxbiostat/beast-mcmc
src/dr/app/util/Arguments.java
214,511
/* * DBeaver - Universal Database Manager * Copyright (C) 2017 Andrew Khitrin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.ui.locks.graph; import org.eclipse.draw2d.ConnectionAnchor; import org.eclipse.draw2d.GridData; import org.eclipse.draw2d.IFigure; import org.eclipse.gef.ConnectionEditPart; import org.eclipse.gef.EditPolicy; import org.eclipse.gef.Request; import org.eclipse.gef.editparts.AbstractGraphicalEditPart; import org.eclipse.gef.editpolicies.NonResizableEditPolicy; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraph; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraphEdge; import org.jkiss.dbeaver.model.impl.admin.locks.LockGraphNode; import java.util.List; public class LockGraphNodeEditPart extends AbstractGraphicalEditPart { private LockGraphConnectionAnchor sourceAnchor; private LockGraphConnectionAnchor targetAnchor; @Override public boolean isSelectable() { return true; } @Override protected IFigure createFigure() { LockGraphNode node = (LockGraphNode) getModel(); LockGraph graph = (LockGraph) getParent().getModel(); LockGraphNodeFigure nodeFigure = new LockGraphNodeFigure(node.getTitle(),(node == graph.getSelection())); this.targetAnchor = new LockGraphConnectionAnchor(nodeFigure); this.sourceAnchor = new LockGraphConnectionAnchor(nodeFigure); return nodeFigure; } @Override protected List<LockGraphEdge> getModelSourceConnections() { return ((LockGraphNode) getModel()).getSourceEdges(); } @Override protected List<LockGraphEdge> getModelTargetConnections() { return ((LockGraphNode) getModel()).getTargetEdges(); } public ConnectionAnchor getSourceConnectionAnchor( ConnectionEditPart connection) { return this.sourceAnchor; } public ConnectionAnchor getSourceConnectionAnchor(Request request) { return this.sourceAnchor; } public ConnectionAnchor getTargetConnectionAnchor( ConnectionEditPart connection) { return this.targetAnchor; } public ConnectionAnchor getTargetConnectionAnchor(Request request) { return this.targetAnchor; } @Override protected void refreshVisuals() { LockGraphNode node = (LockGraphNode) getModel(); LockGraph lgraph = (LockGraph)((LockGraphEditPart) getParent()).getModel(); LockGraphNodeFigure nodeFigure = (LockGraphNodeFigure) getFigure(); LockGraphEditPart graph = (LockGraphEditPart) getParent(); GridData gridData = new GridData(55,30); gridData.horizontalAlignment = GridData.CENTER; gridData.verticalAlignment = GridData.CENTER; gridData.verticalSpan = 10; gridData.grabExcessHorizontalSpace = true; gridData.grabExcessVerticalSpace = true; int span = lgraph.getMaxWidth() / node.getSpan(); int spanMod = lgraph.getMaxWidth() % node.getSpan(); gridData.horizontalSpan = 0 ; if (span > 1 && node.getLevelPosition() != LockGraphNode.LevelPosition.RIGHT) { gridData.horizontalSpan = span; } else if (spanMod > 0 && node.getLevelPosition() == LockGraphNode.LevelPosition.RIGHT) { gridData.horizontalSpan = span + spanMod; } graph.setLayoutConstraint(this, nodeFigure,gridData); } @Override protected void createEditPolicies() { SelectionPolicy selectionPolicy = new SelectionPolicy(); selectionPolicy.setDragAllowed(false); installEditPolicy(EditPolicy.SELECTION_FEEDBACK_ROLE, selectionPolicy); } static class SelectionPolicy extends NonResizableEditPolicy { @Override protected void hideSelection() { } @Override protected void showSelection() { /* LockManagerViewer viewer = ((LockGraph)getHost().getParent().getModel()).getLockManagerViewer(); if (viewer != null) { viewer.setTableLockSelect(((LockGraphNode)getHost().getModel()).getLock()); } */ } } }
dbeaver/dbeaver
plugins/org.jkiss.dbeaver.ext.ui.locks/src/org/jkiss/dbeaver/ext/ui/locks/graph/LockGraphNodeEditPart.java
214,512
package edu.drexel.psal.anonymouth.utils; import java.io.Serializable; import java.util.ArrayList; import java.util.HashSet; import edu.drexel.psal.anonymouth.gooie.GUIMain; import edu.drexel.psal.anonymouth.utils.TextWrapper; import edu.drexel.psal.jstylo.generics.Logger; /** * Tracks all EOS characters in the document to anonymize. This includes ALL * instances of '.', '!', and '?' in a document, so it's not restricted to * just sentence ends but rather any time those appear (so abbreviations, * ellipses, etc. all are kept here as well). We use this to determine which * EOS characters are "real". Instead of using swap characters we are instead * opting to preserve the original text for the tagged sentences and backend * and instead keep an "ignore" boolean tied to each index of an EOS * character, and SentenceMaker will then skip over these since they aren't * actually and EOS.<br><br> * * Now also tracks all TextWrappers in the similar way with a few additional * stuff like extend highlights and whatnot. For more details on what the hell * TextWrappers are and why they exist at all, see TextWrapper.java * * @author Marc Barrowclift * @author Andrew W.E. McDonald */ public class SpecialCharTracker implements Serializable { public final String NAME = "( " + this.getClass().getSimpleName() +" ) - "; private static final long serialVersionUID = -5900337779583604917L; /** * The characters we acknowledge as possible was to end a sentence */ private final HashSet<Character> EOS; private GUIMain main; /** * The character we acknowledge as a quote */ private final char QUOTE = '"'; /** * The characters we acknowledge as parenthesis */ private final String PARENTHESIS = "()"; private final String BRACKETS = "[]"; private final String SQUIGGLIES = "{}"; /** * Our array list of EOS character objects */ public ArrayList<EOS> eoses; /** * Our array list of quotes */ public ArrayList<TextWrapper> quotes; /** * Our array list of parenthesis */ private ArrayList<TextWrapper> parenthesis; private ArrayList<TextWrapper> brackets; private ArrayList<TextWrapper> squigglies; private ArrayList<ArrayList<TextWrapper>> allTextWrappers; private int[] allTextWrapperSizes; private final int NUM_OF_TEXT_WRAPPERS = 4; /** * The number of EOSes we are currently tracking */ public int eosSize; /** * Constructor */ public SpecialCharTracker(GUIMain main) { EOS = new HashSet<Character>(3); EOS.add('.'); EOS.add('!'); EOS.add('?'); eoses = new ArrayList<EOS>(100); quotes = new ArrayList<TextWrapper>(50); parenthesis = new ArrayList<TextWrapper>(50); brackets = new ArrayList<TextWrapper>(50); squigglies = new ArrayList<TextWrapper>(50); eosSize = 0; initAllTextWrappers(); initAllTextWrapperSizes(); this.main = main; } /** * Constructor, takes another instance of SpecialCharTracker and does a deep copy * of it's contents. * * @param eosCT * SpecialCharTracker instance */ public SpecialCharTracker(SpecialCharTracker specialCharTracker) { EOS = new HashSet<Character>(3); EOS.add('.'); EOS.add('!'); EOS.add('?'); this.main = specialCharTracker.main; quotes = new ArrayList<TextWrapper>(50); parenthesis = new ArrayList<TextWrapper>(50); brackets = new ArrayList<TextWrapper>(50); squigglies = new ArrayList<TextWrapper>(50); initAllTextWrappers(); initAllTextWrapperSizes(); //EOS characters int tempSize = specialCharTracker.eosSize; eoses = new ArrayList<EOS>(tempSize); eosSize = 0; for (int i = 0; i < tempSize; i++) { if (specialCharTracker.eoses.get(i) == null) { break; } eosSize++; eoses.add(new EOS(specialCharTracker.eoses.get(i))); } //Text Wrappers for (int i = 0; i < NUM_OF_TEXT_WRAPPERS; i++) { deepCopy(specialCharTracker, i); } } /** * Utilities method for the SpecialCharTracker constructor above and * performs a deep copy for the current TextWrapper object ArrayList. * * @param specialCharTracker * The instance of specialCharTracker we're deep copying * @param curIndex * The current index we're on, representing the current * TextWrapper array and size we're working with (Quotes, * Parenthesis, Brackets, etc.) */ private void deepCopy(SpecialCharTracker specialCharTracker, int curIndex) { int tempSize = specialCharTracker.allTextWrapperSizes[curIndex]; for (int i = 0; i < tempSize; i++) { if (specialCharTracker.allTextWrappers.get(curIndex) == null) { break; } allTextWrapperSizes[curIndex]++; allTextWrappers.get(curIndex).add(specialCharTracker.allTextWrappers.get(curIndex).get(i)); } } /** * Initialies the textWrappers array with empty * TextWrapper Arrays */ private void initAllTextWrappers() { allTextWrappers = new ArrayList<ArrayList<TextWrapper>>(NUM_OF_TEXT_WRAPPERS); allTextWrappers.add(quotes); //Quotes allTextWrappers.add(parenthesis); //Parenthesis allTextWrappers.add(brackets); //Brackets allTextWrappers.add(squigglies); //Squigglies } /** * Initializes the textWrapperSizes array with 0's for * each one. */ private void initAllTextWrapperSizes() { allTextWrapperSizes = new int[NUM_OF_TEXT_WRAPPERS]; allTextWrapperSizes[0] = 0; //Quotes allTextWrapperSizes[1] = 0; //Parenthesis allTextWrapperSizes[2] = 0; //Brackets allTextWrapperSizes[3] = 0; //Squigglies } /** * Shifts all EOS object indices by the given amount left from the given index * * @param index * The index past which you want all EOS objects tracked to be shifted * @param shiftAmount * The amount of spaces you want to shift them by (negative if you want left) */ public void shiftAll(int index, int shiftAmount) { Logger.logln(NAME+"Shifting all EOS character from " + index + " by " + shiftAmount); //EOS characters for (int i = 0; i < eosSize; i++) { if (eoses.get(i).location >= index) eoses.get(i).location += shiftAmount; } //Text Wrappers for (int i = 0; i < NUM_OF_TEXT_WRAPPERS; i++) { shift(allTextWrappers.get(i), allTextWrapperSizes[i], index, shiftAmount); } } private void shift(ArrayList<TextWrapper> curTextWrapper, int curSize, int index, int shiftAmount) { for (int i = 0; i < curSize; i++) { if (curTextWrapper.get(i).startIndex >= index) curTextWrapper.get(i).startIndex += shiftAmount; if (curTextWrapper.get(i).closed == true && curTextWrapper.get(i).endIndex >= index) curTextWrapper.get(i).endIndex += shiftAmount; } } //================ RESET METHODS ================ /** * Clears all EOS characters so we can recalculate them and reset the tracker */ public void resetEOSCharacters() { eoses.clear(); eosSize = 0; } /** * Clears all Quote so we can recalculate them and reset the tracker */ public void resetQuotes() { quotes.clear(); allTextWrapperSizes[0] = 0; } /** * Clears all Parenthesis so we can recalculate them and reset the tracker */ public void resetParenthesis() { parenthesis.clear(); allTextWrapperSizes[1] = 0; } /** * Clears all Brackets so we can recalculate them and reset the tracker */ public void resetBrackets() { brackets.clear(); allTextWrapperSizes[2] = 0; } /** * Clears all Squigglies so we can recalculate them and reset the tracker */ public void resetSquigglies() { squigglies.clear(); allTextWrapperSizes[3] = 0; } /** * Resets the entire tracker by clearing all tracked characters */ public void resetAll() { resetEOSCharacters(); resetQuotes(); resetParenthesis(); resetBrackets(); resetSquigglies(); } //================ ASSORTED ================ /** * Returns a string representation of this SpecialCharTracker */ public String toString() { String toReturn = NAME+"EOSES:\n"; for (int i = 0; i < eosSize; i++) { toReturn += NAME+ " " + eoses.get(i) + "\n"; } toReturn += NAME+"QUOTES:\n"; for (int i = 0; i < allTextWrapperSizes[0]; i++) { toReturn += NAME+ " " + quotes.get(i) + "\n"; } toReturn += NAME+"PARENTHESIS:\n"; for (int i = 0; i < allTextWrapperSizes[1]; i++) { toReturn += NAME+ " " + parenthesis.get(i) + "\n"; } toReturn += NAME+"BRACKETS:\n"; for (int i = 0; i < allTextWrapperSizes[2]; i++) { toReturn += NAME+ " " + brackets.get(i) + "\n"; } toReturn += NAME+"SQUIGGLIES:\n"; for (int i = 0; i < allTextWrapperSizes[3]; i++) { toReturn += NAME+ " " + squigglies.get(i) + "\n"; } return toReturn; } //======================================================================= //* TEXT WRAPPER GENERAL METHODS * //======================================================================= /** * To be called by EditorDriver's getSentenceIndices() every single time * the sentence highlight is being changed (new sentence, sentence * changes, etc). This is to ensure that the highlight extends to what the * user precieves as a full sentence to give the illusion of a complete * one (while in actuality they are separate tagged sentence objects). We * do it this way since, in the past, we were actually combining and * splitting this sentences based on text wrapper positions but to be * quite honest it sucked. This way, it's faster, smarter, and all around * easier to understand. * * Say a sentence like "Hi."'s indices is passed. This will determine if * it's in between text wrappers, like "She said "Nice to meet you. Hi. My * name's Lucy." kind of loud." If this is the case, instead of just * highlighting the sentence "Hi.", we extend the highlight to all the * tagged sentences we deem to be part of a single one. The best way to * understand how this is done is simply to read the code. * * @param start * The start index of the sentence * @param end * The end index of the sentence * @param allSentIndices * The allSentIndices int array straight from getSentenceIndices() * @param selectedSentence * The selectedSentence int straight from getSentenceIndices() * * @return * An integer array representing the new highlights. The indices represent:<br> * [0] = Where the start of the highlight should be<br> * [1] = Where the end of the highlight should be<br> * [2] = The number of extra sentences included in the extended highlight to the LEFT<br> * [3] = The number of extra sentences included in the extended highlight to the RIGHT<br><br> * * The last two indices of the array are specifically for the automatic word to * remove highlights, as they require the sentence numbers to obtain the words * to remove. If that could be updated with no loss in performance to instead * use just indices then we could get ride of these "extra sentences" portion of * this return array. */ public int[] extendHighlights(int start, int end, int[] allSentIndices, int[] sentenceLengths, int selectedSentence) { int[] highlightIndices = {start, end, 0, 0}; for (int i = 0; i < NUM_OF_TEXT_WRAPPERS; i++) { highlightIndices = extendHighlights(highlightIndices[0], highlightIndices[1], allSentIndices, sentenceLengths, selectedSentence, allTextWrappers.get(i), allTextWrapperSizes[i], highlightIndices); } return highlightIndices; } private int[] extendHighlights(int asdf, int endgf, int[] allSentIndices, int[] sentenceLengths, int selectedSentence, ArrayList<TextWrapper> curTextWrapper, int curSize, int[] highlightIndices) { for (int t = 0; t < curSize; t++) { if (curTextWrapper.get(t).closed) { //Full text wrapper within sentence if (curTextWrapper.get(t).startIndex > highlightIndices[0] && curTextWrapper.get(t).endIndex < highlightIndices[1]) { continue; //Whole sentence between text wrapper } else if (highlightIndices[0] >= curTextWrapper.get(t).startIndex && highlightIndices[1] <= curTextWrapper.get(t).endIndex) { highlightIndices[0] = curTextWrapper.get(t).startIndex; for (int sent = selectedSentence; sent >= 0; sent--) { if (highlightIndices[0] > (allSentIndices[sent] - sentenceLengths[sent]) && highlightIndices[0] < allSentIndices[sent]) { highlightIndices[0] = allSentIndices[sent] - sentenceLengths[sent]; highlightIndices[2] = selectedSentence - sent; break; } } highlightIndices[1] = curTextWrapper.get(t).endIndex; for (int sent = selectedSentence; sent < main.editorDriver.taggedDoc.numOfSentences; sent++) { if (highlightIndices[1] > (allSentIndices[sent] - sentenceLengths[sent]) && highlightIndices[1] < allSentIndices[sent]) { highlightIndices[1] = allSentIndices[sent]; highlightIndices[3] = sent - selectedSentence; break; } } //Start of text wrapper in sentence } else if (highlightIndices[0] < curTextWrapper.get(t).startIndex && highlightIndices[1] > curTextWrapper.get(t).startIndex) { highlightIndices[1] = curTextWrapper.get(t).endIndex; for (int sent = selectedSentence; sent < main.editorDriver.taggedDoc.numOfSentences; sent++) { if (highlightIndices[1] > (allSentIndices[sent] - sentenceLengths[sent]) && highlightIndices[1] < allSentIndices[sent]) { highlightIndices[1] = allSentIndices[sent]; highlightIndices[3] = sent - selectedSentence; break; } } //End of text wrapper in sentence } else if (highlightIndices[0] <= curTextWrapper.get(t).endIndex && highlightIndices[1] > curTextWrapper.get(t).endIndex) { highlightIndices[0] = curTextWrapper.get(t).startIndex; for (int sent = selectedSentence; sent >= 0; sent--) { if (highlightIndices[0] > (allSentIndices[sent] - sentenceLengths[sent]) && highlightIndices[0] < allSentIndices[sent]) { highlightIndices[0] = allSentIndices[sent] - sentenceLengths[sent]; highlightIndices[2] = selectedSentence - sent; break; } } } } } return highlightIndices; } /** * Removes any Text Wrapper Objects between the given indices as [5, 10), * meaning inclusive for the first and exclusive for the last. * * @param lowerBound * The beginning of hte range you want to remove from (includes this position) * @param upperBound * The end fo the range you want to remove from (does not include this position) * * @return * Whether or not any Text Wrapper Objects were removed from the given range */ public void removeTextWrappersInRange(int lowerBound, int upperBound) { Logger.logln(NAME+"Removing Text Wrappers in range " + lowerBound + " - " + upperBound); for (int i = 0; i < NUM_OF_TEXT_WRAPPERS; i++) { removeTextWrappersInRange(lowerBound, upperBound, allTextWrappers.get(i), i); } } public void removeTextWrappersInRange(int lowerBound, int upperBound, ArrayList<TextWrapper> curTextWrapper, int curSize) { for (int i = 0; i < allTextWrapperSizes[curSize]; i++) { if (curTextWrapper.get(i).startIndex >= lowerBound && curTextWrapper.get(i).startIndex < upperBound) { if (curTextWrapper.get(i).closed) { if (curTextWrapper.get(i).endIndex >= lowerBound && curTextWrapper.get(i).endIndex < upperBound) { Logger.logln(NAME+"Removed TextWrapper Object at " + curTextWrapper.get(i).startIndex + " - " + curTextWrapper.get(i).endIndex); curTextWrapper.remove(i); i--; // decrement 'i' so that we don't miss the object that shifts down into the spot just freed. allTextWrapperSizes[curSize]--; // also decrement quoteSize } else { curTextWrapper.get(i).startIndex = -1; curTextWrapper.get(i).closed = false; } } else { Logger.logln(NAME+"Removed TextWrapper Object at " + curTextWrapper.get(i).startIndex + " - " + curTextWrapper.get(i).endIndex); curTextWrapper.remove(i); i--; // decrement 'i' so that we don't miss the object that shifts down into the spot just freed. allTextWrapperSizes[curSize]--; // also decrement quoteSize } } else if (curTextWrapper.get(i).closed) { if (curTextWrapper.get(i).endIndex >= lowerBound && curTextWrapper.get(i).endIndex < upperBound) { curTextWrapper.get(i).endIndex = -1; curTextWrapper.get(i).closed = false; } } } } //======================================================================= //* QUOTES * //======================================================================= /** * Adds a Quote to the tracker, should be called EVERY TIME a new quote is * typed, regardless of whether or not it's the starting quote or a * closing one. * * addQuote() checks to see if there there is a non-closed quote prior to * this one's index, and if there is it will treat this ass the closing * index and add that index as endIndex to that TextWrapper object, * otherwise it will create a new Quote object with this passed index as * the startIndex, endIndex = -1, and closed = false * * @param index * The index of the newly added Quote */ public void addQuote(int index) { int closingQuote = -1; for (int i = 0; i < allTextWrapperSizes[0]; i++) { if (!quotes.get(i).closed) { //If the text wrapper doesn't have an end index if (quotes.get(i).endIndex == -1) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (closingQuote == -1) { closingQuote = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (quotes.get(i).startIndex > quotes.get(closingQuote).startIndex) { closingQuote = i; } } //If the text wrapper doesn't have a start index } else if (quotes.get(i).startIndex == -1) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (closingQuote == -1) { closingQuote = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (quotes.get(i).endIndex < quotes.get(closingQuote).endIndex) { closingQuote = i; } } } } } //It's a new quote, create a new TextWrapper Object with this as start index if (closingQuote == -1) { quotes.add(new TextWrapper(index, '"')); allTextWrapperSizes[0]++; //It's a closing quote, simply add this as the endIndex of that Object } else { quotes.get(closingQuote).setClosingWrapper(index); } } /** * Checks if a given character is a quote. * * @param unknownChar * The character you want to test * * @return * True or false, depending on whether or not it's a quote */ public boolean isQuote(char unknownChar) { boolean result = false; if (QUOTE == unknownChar) { result = true; } return result; } //======================================================================= //* PARENTHESIS * //======================================================================= /** * Checks if a given character is a parenthesis * * @param unknownChar * The character you want to test * * @return * True or false, depending on whether or not it's a parenthesis */ public boolean isParenthesis(char unknownChar) { boolean result = false; if (PARENTHESIS.charAt(0) == unknownChar || PARENTHESIS.charAt(1) == unknownChar) { result = true; } return result; } /** * Determines whether or not the passed paren is a closing * one or not * @param paren * The paren char you want to check * * @return * True or false, depending on whether or not it's a closing * paren */ private boolean isClosingParenthesis(char paren) { boolean result = false; if (PARENTHESIS.charAt(1) == paren) { result = true; } return result; } /** * Adds a Parenthesis to the tracker, should be called EVERY TIME a new quote is * typed, regardless of whether or not it's the starting quote or a * closing one. * * addQuote() checks to see if there there is a non-closed quote prior to * this one's index, and if there is it will treat this ass the closing * index and add that index as endIndex to that TextWrapper object, * otherwise it will create a new Quote object with this passed index as * the startIndex, endIndex = -1, and closed = false * * @param index * The index of the newly added Quote */ public void addParenthesis(int index, char paren) { int parenthesisOfInterest = -1; for (int i = 0; i < allTextWrapperSizes[1]; i++) { if (!parenthesis.get(i).closed) { //If the text wrapper doesn't have an end index and the new character is a closing one if (parenthesis.get(i).endIndex == -1 && isClosingParenthesis(paren)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (parenthesisOfInterest == -1) { parenthesisOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (parenthesis.get(i).startIndex > parenthesis.get(parenthesisOfInterest).startIndex) { parenthesisOfInterest = i; } } //If the text wrapper doesn't have a start index and the new character is a starting one } else if (parenthesis.get(i).startIndex == -1 && !isClosingParenthesis(paren)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (parenthesisOfInterest == -1) { parenthesisOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (parenthesis.get(i).endIndex < parenthesis.get(parenthesisOfInterest).endIndex) { parenthesisOfInterest = i; } } } } } //It won't fit into any existing objects, create a new one for it. if (parenthesisOfInterest == -1) { parenthesis.add(new TextWrapper(index, paren)); allTextWrapperSizes[1]++; //It's closuing up (ether with a start or end) and existing wrapper object } else { parenthesis.get(parenthesisOfInterest).setClosingWrapper(index); } } //======================================================================= //* BRACKETS * //======================================================================= /** * Checks if a given character is a bracket * * @param unknownChar * The character you want to test * * @return * True or false, depending on whether or not it's a bracket */ public boolean isBracket(char unknownChar) { boolean result = false; if (BRACKETS.charAt(0) == unknownChar || BRACKETS.charAt(1) == unknownChar) { result = true; } return result; } /** * Determines whether or not the passed bracket is a closing * one or not * @param bracket * The bracket char you want to check * * @return * True or false, depending on whether or not it's a closing * bracket */ private boolean isClosingBracket(char bracket) { boolean result = false; if (BRACKETS.charAt(1) == bracket) { result = true; } return result; } /** * Adds a Bracket to the tracker, should be called EVERY TIME a new bracket is * typed, regardless of whether or not it's the opening bracket or a * closing one. * * addBracket() checks to see if there there is a non-closed bracket prior to * this one's index, and if there is it will treat this ass the closing * index and add that index as endIndex to that TextWrapper object, * otherwise it will create a new Bracket object with this passed index as * the startIndex, endIndex = -1, and closed = false * * @param index * The index of the newly added Bracket */ public void addBracket(int index, char bracket) { int bracketOfInterest = -1; for (int i = 0; i < allTextWrapperSizes[2]; i++) { if (!brackets.get(i).closed) { //If the text wrapper doesn't have an end index and the new character is a closing one if (brackets.get(i).endIndex == -1 && isClosingBracket(bracket)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (bracketOfInterest == -1) { bracketOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (brackets.get(i).startIndex > brackets.get(bracketOfInterest).startIndex) { bracketOfInterest = i; } } //If the text wrapper doesn't have a start index and the new character is a starting one } else if (brackets.get(i).startIndex == -1 && !isClosingBracket(bracket)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (bracketOfInterest == -1) { bracketOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (brackets.get(i).endIndex < brackets.get(bracketOfInterest).endIndex) { bracketOfInterest = i; } } } } } //It won't fit into any existing objects, create a new one for it. if (bracketOfInterest == -1) { brackets.add(new TextWrapper(index, bracket)); allTextWrapperSizes[2]++; //It's closing up (either with a start or end) and existing wrapper object } else { brackets.get(bracketOfInterest).setClosingWrapper(index); } } //======================================================================= //* SQUIGGLIES * //======================================================================= /** * Checks if a given character is a squiggly * * @param unknownChar * The character you want to test * * @return * True or false, depending on whether or not it's a squiggly */ public boolean isSquiggly(char unknownChar) { boolean result = false; if (SQUIGGLIES.charAt(0) == unknownChar || SQUIGGLIES.charAt(1) == unknownChar) { result = true; } return result; } /** * Determines whether or not the passed squiggly is a closing * one or not. * @param squiggly * The squiggly char you want to check * * @return * True or false, depending on whether or not it's a closing * squiggly */ private boolean isClosingSquiggly(char squiggly) { boolean result = false; if (SQUIGGLIES.charAt(1) == squiggly) { result = true; } return result; } /** * Adds a squiggly to the tracker, should be called EVERY TIME a new squiggly is * typed, regardless of whether or not it's the opening squiggly or a * closing one. * * addsquiggly() checks to see if there there is a non-closed squiggly prior to * this one's index, and if there is it will treat this ass the closing * index and add that index as endIndex to that TextWrapper object, * otherwise it will create a new squiggly object with this passed index as * the startIndex, endIndex = -1, and closed = false * * @param index * The index of the newly added Bracket */ public void addSquiggly(int index, char squiggly) { int squigglyOfInterest = -1; for (int i = 0; i < allTextWrapperSizes[3]; i++) { if (!squigglies.get(i).closed) { //If the text wrapper doesn't have an end index and the new character is a closing one if (squigglies.get(i).endIndex == -1 && isClosingSquiggly(squiggly)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (squigglyOfInterest == -1) { squigglyOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (squigglies.get(i).startIndex > squigglies.get(squigglyOfInterest).startIndex) { squigglyOfInterest = i; } } //If the text wrapper doesn't have a start index and the new character is a starting one } else if (squigglies.get(i).startIndex == -1 && !isClosingSquiggly(squiggly)) { //We need to check to make sure we are closing the closest available unclosed text wrapper if (squigglyOfInterest == -1) { squigglyOfInterest = i; } else { //If it's closer, then we're closing this text wrapper and not the one farther away if (squigglies.get(i).endIndex < squigglies.get(squigglyOfInterest).endIndex) { squigglyOfInterest = i; } } } } } //It won't fit into any existing objects, create a new one for it. if (squigglyOfInterest == -1) { squigglies.add(new TextWrapper(index, squiggly)); allTextWrapperSizes[3]++; //It's closing up (ether with a start or end) and existing wrapper object } else { squigglies.get(squigglyOfInterest).setClosingWrapper(index); } } //======================================================================= //* EOS METHODS * //======================================================================= /** * Adds the EOS character to the tracker, this should be called EVERY TIME * a new EOS character is typed, regardless of whether or not it's part of * an abbreviation, an end of a sentence, etc.<br><br> * * The default ignore value for every newly added EOS is true since we never * know at first, EditorDriver's insert() method called from the caret event * handles all of this. * * @param eosChar * The EOS character you want to add to the tracker * @param location * The index of the EOS character you are adding * @param ignore * Whether or not to skip the EOS character as an actual sentence end. */ public void addEOS(char eosChar, int location, boolean ignore) { Logger.logln(NAME+"EOS character added at " + location); eoses.add(new EOS(eosChar, location, ignore)); eosSize++; } /** * Obtains the EOS character at the given index and sets whether or not to ignore it. * * @param index * The index of the EOS character you want to edit * @param ignore * Whether or not the EOS character represents the end of a sentence * * @return * Whether or not an EOS character was found at the given index */ public boolean setIgnoreEOS(int index, boolean ignore) { boolean found = false; for (int i = 0; i < eosSize; i++) { if (index == eoses.get(i).location) { Logger.logln(NAME+"Will ignore EOS character at " + index + ": " + ignore); eoses.get(i).ignore = ignore; found = true; break; } } return found; } /** * Checks if a given character could be used to end a sentence. * * @param unknownChar * The character you want to test * * @return * True or false, depending on whether or not it's an end of sentence character */ public boolean isEOS(char unknownChar) { boolean result = false; if (EOS.contains(unknownChar)) { result = true; } return result; } /** * Checks the given index for an EOS character and returns true if it finds one and it is not set to be ignored. * @param index - The index where we want to check if there is an EOS character present * @return result - whether or not an EOS character that is NOT set to be ignored exists in the index given. */ /** * Checks whether or not a sentence ends at the given index (meaning we * are checking to see if an EOS character exists in our tracker at the * given index AND if it's not being ignored). * * @param index * The index you want to check for a sentence end. * * @return * Whether or not a sentence ends at the given index */ public boolean isSentenceEndAtIndex(int index) { boolean result = false; /** * For first processing (TODO and possibly reprocessing, if this is * the case then this check will only work for the first process), we * don't want the code below to run and check everything, just return * true every time. (Just trust me, it works). */ if (eosSize == 0) { result = true; } else { for (int i = 0; i < eosSize; i++) { if (index == eoses.get(i).location) { if (!eoses.get(i).ignore) { result = true; } break; } } } return result; } public int getLocation(int index) { return eoses.get(index).location; } /** * Removes any EOS objects in between the given indices as [5, 10), * meaning inclusive for the first and exclusive for the last. * * @param lowerBound * The beginning of the range you want to remove from (includes this position) * @param upperBound * The end of the range you want to remove from (does not include this position) * * @return * Whether or not any EOSes were removed from the given range */ public boolean removeEOSesInRange(int lowerBound, int upperBound) { Logger.logln(NAME+"Removing EOSes in range " + lowerBound + " - " + upperBound); int location; boolean removed = false; for (int i = 0; i < eosSize; i++) { location = eoses.get(i).location; if (location >= lowerBound && location < upperBound) { Logger.logln(NAME+"EOS removed at " + location); eoses.remove(i); i--; // decrement 'i' so that we don't miss the object that shifts down into the spot just freed. eosSize--; // also decrement eosSize removed = true; } } return removed; } } /** * Holds the EOS characters at a given location within the document with * respect to the first index of the document (0). * * @author Andrew W.E. McDonald * @author Marc Barrowclift */ class EOS implements Serializable { private static final long serialVersionUID = -3147071940148952343L; public char eos; public int location; public boolean ignore; /** * Constructor * @param eos * The EOS you want to track * @param location * The location of the EOS you want to track */ public EOS( char eos, int location, boolean ignore) { this.eos = eos; this.location = location; this.ignore = ignore; } /** * Constructor * * @param eosObj * An EOS instance */ public EOS( EOS eosObj) { this.eos = eosObj.eos; this.location = eosObj.location; this.ignore = eosObj.ignore; } /** * Our custom toString() method to make printing these * things easier for debugging * * @return * Our modified string representation of the EOS character */ @Override public String toString() { return "[ "+eos+" : "+location+", is end of sentence = " + !ignore + " ]"; } }
spencermwoo/anonymouth
src/edu/drexel/psal/anonymouth/utils/SpecialCharTracker.java
214,514
/* * Copyright IBM Corp. and others 2006 * * This program and the accompanying materials are made available under * the terms of the Eclipse Public License 2.0 which accompanies this * distribution and is available at https://www.eclipse.org/legal/epl-2.0/ * or the Apache License, Version 2.0 which accompanies this distribution and * is available at https://www.apache.org/licenses/LICENSE-2.0. * * This Source Code may also be made available under the following * Secondary Licenses when the conditions for such availability set * forth in the Eclipse Public License, v. 2.0 are satisfied: GNU * General Public License, version 2 with the GNU Classpath * Exception [1] and GNU General Public License, version 2 with the * OpenJDK Assembly Exception [2]. * * [1] https://www.gnu.org/software/classpath/license.html * [2] https://openjdk.org/legal/assembly-exception.html * * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0 OR GPL-2.0-only WITH OpenJDK-assembly-exception-1.0 */ package com.ibm.j9ddr.corereaders.tdump.zebedee.mvs; import java.util.logging.*; /** * This class represents a set of registers for a failed thread. */ public class RegisterSet { long[] registers = new long[16]; long psw; String whereFound; /** Logger */ private static Logger log = Logger.getLogger(com.ibm.j9ddr.corereaders.ICoreFileReader.J9DDR_CORE_READERS_LOGGER_NAME); /** * Return an array of the register values. */ public long[] getRegisters() { return registers; } /** * Get the value of the specified register. */ public long getRegister(int index) { return registers[index]; } /** * Get the value of the specified register for use as an address. */ public long getRegisterAsAddress(int index) { /* I think this was added by Andrew J? Not sure about this, we don't always * have a valid PSW. Null out for now. XXX */ /* int addressMode = (int)(this.getPSW() >>> 31) & 3; switch (addressMode) { case 0: return registers[index] & 0xffffff; case 1: return registers[index] & 0x7fffffff; case 2: assert false; case 3: } */ return registers[index]; } /** * Sets the specified register. * @param index the register whose value is to be set * @param value the value to set it to */ public void setRegister(int index, long value) { registers[index] = value; log.fine("set register " + index + " to 0x" + hex(value)); } /** * Returns the PSW. XXX How big is the PSW on a 64-bit machine? */ public long getPSW() { assert psw != 0; return psw; } /** * Sets the PSW. */ public void setPSW(long psw) { this.psw = psw; } /** * Sets the whereFound string. */ public void setWhereFound(String whereFound) { this.whereFound = whereFound; } /** * Returns a string indicating where the registers were found. This is mainly for * debugging purposes. */ public String whereFound() { return whereFound; } private static String hex(int i) { return Integer.toHexString(i); } private static String hex(long i) { return Long.toHexString(i); } }
eclipse-openj9/openj9
debugtools/DDR_VM/src/com/ibm/j9ddr/corereaders/tdump/zebedee/mvs/RegisterSet.java
214,515
package hex.nb; import hex.FrameTask.DataInfo; import water.*; import water.api.DocGen; import water.fvec.*; import water.util.RString; import water.util.Utils; /** * Naive Bayes * This is an algorithm for computing the conditional a-posterior probabilities of a categorical * response from independent predictors using Bayes rule. * <a href = "http://en.wikipedia.org/wiki/Naive_Bayes_classifier">Naive Bayes on Wikipedia</a> * <a href = "http://cs229.stanford.edu/notes/cs229-notes2.pdf">Lecture Notes by Andrew Ng</a> * @author anqi_fu * */ public class NaiveBayes extends Job.ModelJobWithoutClassificationField { static final int API_WEAVER = 1; static public DocGen.FieldDoc[] DOC_FIELDS; static final String DOC_GET = "naive bayes"; @API(help = "Laplace smoothing parameter", filter = Default.class, lmin = 0, lmax = 100000, json = true) public int laplace = 0; @API(help = "Min. standard deviation to use for observations with not enough data", filter = Default.class, dmin = 1e-10, json = true) public double min_std_dev = 1e-3; @API(help = "Drop columns with more than 20% missing values", filter = Default.class) public boolean drop_na_cols = true; @Override protected void execImpl() { long before = System.currentTimeMillis(); Frame fr = DataInfo.prepareFrame(source, response, ignored_cols, false, true /*drop const*/, drop_na_cols); DataInfo dinfo = new DataInfo(fr, 1, false, true, DataInfo.TransformType.NONE, DataInfo.TransformType.NONE); NBTask tsk = new NBTask(this, dinfo).doAll(dinfo._adaptedFrame); NBModel myModel = buildModel(dinfo, tsk, laplace, min_std_dev); myModel.start_training(before); myModel.stop_training(); myModel.delete_and_lock(self()); myModel.unlock(self()); } @Override protected void init() { super.init(); if(!response.isEnum()) throw new IllegalArgumentException("Response must be a categorical column"); if (laplace < 0) throw new IllegalArgumentException("Laplace smoothing must be an integer >= 0."); if (min_std_dev <= 1e-10) throw new IllegalArgumentException("Min. standard deviation must be at least 1e-10."); } @Override protected Response redirect() { return NBProgressPage.redirect(this, self(), dest()); } public static String link(Key src_key, String content) { RString rs = new RString("<a href='/2/NaiveBayes.query?%key_param=%$key'>%content</a>"); rs.replace("key_param", "source"); rs.replace("key", src_key.toString()); rs.replace("content", content); return rs.toString(); } public NBModel buildModel(DataInfo dinfo, NBTask tsk, double laplace, double min_std_dev) { logStart(); double[] pprior = tsk._rescnt.clone(); double[][][] pcond = tsk._jntcnt.clone(); String[][] domains = dinfo._adaptedFrame.domains(); // A-priori probability of response y for(int i = 0; i < pprior.length; i++) pprior[i] = (pprior[i] + laplace)/(tsk._nobs + tsk._nres*laplace); // pprior[i] = pprior[i]/tsk._nobs; // Note: R doesn't apply laplace smoothing to priors, even though this is textbook definition // Probability of categorical predictor x_j conditional on response y for(int col = 0; col < dinfo._cats; col++) { assert pcond[col].length == tsk._nres; for(int i = 0; i < pcond[col].length; i++) { for(int j = 0; j < pcond[col][i].length; j++) pcond[col][i][j] = (pcond[col][i][j] + laplace)/(tsk._rescnt[i] + domains[col].length*laplace); } } // Mean and standard deviation of numeric predictor x_j for every level of response y for(int col = 0; col < dinfo._nums; col++) { for(int i = 0; i < pcond[0].length; i++) { int cidx = dinfo._cats + col; double num = tsk._rescnt[i]; double pmean = pcond[cidx][i][0]/num; pcond[cidx][i][0] = pmean; // double pvar = pcond[cidx][i][1]/num - pmean*pmean; double pvar = pcond[cidx][i][1]/(num - 1) - pmean*pmean*num/(num - 1); pcond[cidx][i][1] = Math.sqrt(pvar); } } Key dataKey = input("source") == null ? null : Key.make(input("source")); return new NBModel(destination_key, dataKey, dinfo, tsk, pprior, pcond, laplace, min_std_dev); } // Note: NA handling differs from R for efficiency purposes // R's method: For each predictor x_j, skip counting that row for p(x_j|y) calculation if x_j = NA. If response y = NA, skip counting row entirely in all calculations // H2O's method: Just skip all rows where any x_j = NA or y = NA. Should be more memory-efficient, but results incomparable with R. public static class NBTask extends MRTask2<NBTask> { final Job _job; final protected DataInfo _dinfo; final int _nres; // Number of levels for the response y public int _nobs; // Number of rows counted in calculation public double[] _rescnt; // Count of each level in the response public double[][][] _jntcnt; // For each categorical predictor, joint count of response and predictor levels // For each numeric predictor, sum of entries for every response level public NBTask(Job job, DataInfo dinfo) { _job = job; _dinfo = dinfo; _nobs = 0; String[][] domains = dinfo._adaptedFrame.domains(); int ncol = dinfo._adaptedFrame.numCols(); assert ncol-1 == dinfo._nums + dinfo._cats; // ncol-1 because we drop response col _nres = domains[ncol-1].length; _rescnt = new double[_nres]; _jntcnt = new double[ncol-1][][]; for(int i = 0; i < _jntcnt.length; i++) { int ncnt = domains[i] == null ? 2 : domains[i].length; _jntcnt[i] = new double[_nres][ncnt]; } } @Override public void map(Chunk[] chks) { int res_idx = chks.length - 1; Chunk res = chks[res_idx]; OUTER: for(int row = 0; row < chks[0]._len; row++) { // Skip row if any entries in it are NA for(int col = 0; col < chks.length; col++) { if(chks[col].isNA0(row)) continue OUTER; } // Record joint counts of categorical predictors and response int rlevel = (int)res.at0(row); for(int col = 0; col < _dinfo._cats; col++) { int plevel = (int)chks[col].at0(row); _jntcnt[col][rlevel][plevel]++; } // Record sum for each pair of numerical predictors and response for(int col = 0; col < _dinfo._nums; col++) { int cidx = _dinfo._cats + col; double x = chks[cidx].at0(row); _jntcnt[cidx][rlevel][0] += x; _jntcnt[cidx][rlevel][1] += x*x; } _rescnt[rlevel]++; _nobs++; } } @Override public void reduce(NBTask nt) { _nobs += nt._nobs; Utils.add(_rescnt, nt._rescnt); for(int col = 0; col < _jntcnt.length; col++) _jntcnt[col] = Utils.add(_jntcnt[col], nt._jntcnt[col]); } } }
johnugeorge/h2o
src/main/java/hex/nb/NaiveBayes.java
214,528
package annotations; @DoeDeGroeten("Hello, class!") public class Hallo { @DoeDeGroeten("Hello, field!") public String groetToestand; @DoeDeGroeten("Hello, constructor!") public Hallo() { } @DoeDeGroeten("Hello, method!") public void groet() { } }
gvdhaege/KdG
Java Programming 2/module 03 - Reflection/demo/Voorbeelden_03_Reflection_Annotations/06_doeDeGroeten/src/annotations/Hallo.java
214,534
package annotations; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @Target({ElementType.FIELD, ElementType.METHOD, ElementType.TYPE, ElementType.CONSTRUCTOR}) @Retention(RetentionPolicy.RUNTIME) public @interface DoeDeGroeten { public String value(); }
gvdhaege/KdG
Java Programming 2/module 03 - Reflection/demo/Voorbeelden_03_Reflection_Annotations/06_doeDeGroeten/src/annotations/DoeDeGroeten.java
214,537
package annotations; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; public class ReflectieDemo { public static void main(String[] args) throws Exception { Class<Hallo> clazz = Hallo.class; System.out.println(clazz.getAnnotation(DoeDeGroeten.class)); Constructor<Hallo> constructor = clazz.getConstructor(); System.out.println(constructor.getAnnotation(DoeDeGroeten.class)); Method method = clazz.getMethod("groet"); System.out.println(method.getAnnotation(DoeDeGroeten.class)); Field field = clazz.getField("groetToestand"); System.out.println(field.getAnnotation(DoeDeGroeten.class)); } }
gvdhaege/KdG
Java Programming 2/module 03 - Reflection/demo/Voorbeelden_03_Reflection_Annotations/06_doeDeGroeten/src/annotations/ReflectieDemo.java
214,541
package info.rsdev.playlists.services; import info.rsdev.playlists.domain.Song; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; public class SongComparatorTest { @Test void equalWhenArtistsAreSimilarEnough() { var thiz = new Song("The Scr!pt", "Arms Open"); var that = new Song("The Script", "Arms Open"); assertEquals(0, SongComparator.INSTANCE.compare(thiz, that)); } @Test void equalWhenArtistsAndTitleAreSimilarEnough() { var thiz = new Song("New Kids", "Groeten uit Brabant"); var that = new Song("The New Kids", "Groeten uit Brabant!"); assertEquals(0, SongComparator.INSTANCE.compare(thiz, that)); } }
dschoorl/playlists
src/test/java/info/rsdev/playlists/services/SongComparatorTest.java
214,543
/******************************************************************************* * Copyright 2015 DANS - Data Archiving and Networked Services * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package nl.knaw.dans.common.lang.mail; import static org.junit.Assert.assertEquals; import java.io.FileInputStream; import java.io.InputStream; import org.junit.Test; public class MailComposerTest { @Test public void getRawString() throws Exception { MailComposer composer = new MailComposer(); InputStream inStream = new FileInputStream("src/test/resources/test-files/mail/test-mail_01.txt"); String raw = composer.composeMessage(inStream, false); assertEquals("Hallo,\n\nDit is een mailbericht.\n\ngroeten,\nde afzender", raw); } @Test public void getValue() throws Exception { TestObject to = new TestObject(); MailComposer composer = new MailComposer(to); String placeHolder = "TestObject.getThis"; assertEquals("something", composer.getValue(placeHolder)); placeHolder = "TestObject.getThat"; assertEquals("another thing", composer.getValue(placeHolder)); placeHolder = "TestObject.getInt"; assertEquals("6", composer.getValue(placeHolder)); placeHolder = "TestObject.getBoolean"; assertEquals("true", composer.getValue(placeHolder)); placeHolder = "TestObject.getInteger"; assertEquals("42", composer.getValue(placeHolder)); placeHolder = "TestObject.getNull"; assertEquals("", composer.getValue(placeHolder)); } @Test(expected = MailComposerException.class) public void getValueWithWrongObject() throws Exception { TestObject to = new TestObject(); MailComposer composer = new MailComposer(to); String placeHolder = "FooBar.getThis"; composer.getValue(placeHolder); } @Test(expected = MailComposerException.class) public void getValueWithWrongMethod() throws Exception { TestObject to = new TestObject(); MailComposer composer = new MailComposer(to); String placeHolder = "TestObject.getFooBar"; composer.getValue(placeHolder); } @Test public void compose() throws Exception { TestObject to = new TestObject(); MailComposer composer = new MailComposer(to); InputStream inStream = new FileInputStream("src/test/resources/test-files/mail/test-mail_02.txt"); String message = composer.compose(inStream); assertEquals("something ought to be done.\n\nAnd there is another thing: 42 > 6.\n\nAnd that's true!\n\ngreetings", message); //System.out.println(message); } static class TestObject { public String getThis() { return "something"; } public String getThat() { return "another thing"; } public int getInt() { return 6; } public boolean getBoolean() { return true; } public Integer getInteger() { return new Integer(42); } public Object getNull() { return null; } } }
DANS-KNAW/dccd-legacy-libs
lang/src/test/java/nl/knaw/dans/common/lang/mail/MailComposerTest.java