data
dict |
---|
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1jrTVLo1tpC",
"fno": "09082802"
},
"previous": null
},
"article": {
"__typename": "ArticleType",
"abstract": "Presents the introductory editorial for this issue of the publication.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Presents the introductory editorial for this issue of the publication."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Klaus Mueller",
"givenName": "Klaus",
"surname": "Mueller"
}
],
"doi": "10.1109/TVCG.2020.2973745",
"fno": "09082801",
"hasPdf": true,
"id": "1jrU0RsEpnG",
"idPrefix": "tg",
"isOpenAccess": true,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [],
"normalizedAbstract": "Presents the introductory editorial for this issue of the publication.",
"normalizedTitle": "Editor's Note",
"notes": null,
"notesType": null,
"pages": "2135-2141",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"title": "Editor's Note",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"introductory",
"DATA"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2022/03/09875139/1GlbXTIEwaQ",
"doi": null,
"id": "mags/an/2022/03/09875139",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2022/04/09972860/1ISVNzFCZu8",
"doi": null,
"id": "mags/an/2022/04/09972860",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/td/2020/02/08956009/1gtJY06WATe",
"doi": null,
"id": "trans/td/2020/02/08956009",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems"
},
"title": "Editor's Note"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2020/01/09031986/1i6VhktGnkc",
"doi": null,
"id": "mags/an/2020/01/09031986",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2020/02/09103673/1keqEV28ioE",
"doi": null,
"id": "mags/an/2020/02/09103673",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/td/2021/04/09257115/1oFCKncAhqM",
"doi": null,
"id": "trans/td/2021/04/09257115",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems"
},
"title": "Editor's Note"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2020/04/09263260/1oReM0ot75m",
"doi": null,
"id": "mags/an/2020/04/09263260",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/td/2021/10/09408530/1sVEVpV9zNK",
"doi": null,
"id": "trans/td/2021/10/09408530",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems"
},
"title": "Editor's Note"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/an/2021/03/09546090/1x6zEFuXbH2",
"doi": null,
"id": "mags/an/2021/03/09546090",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/an",
"title": "IEEE Annals of the History of Computing"
},
"title": "From the Editor's Desk"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2021/12/09586410/1y11sTji3vO",
"doi": null,
"id": "trans/tg/2021/12/09586410",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Editor's Note"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1haUx0fpghW",
"fno": "08978585"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1jrU0RsEpnG",
"fno": "09082801"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Paluno - The Ruhr Institute for Software Technology, University of Duisburg-Essen, Duisburg, Germany",
"fullName": "Fabian Beck",
"givenName": "Fabian",
"surname": "Beck"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, Korea",
"fullName": "Jinwook Seo",
"givenName": "Jinwook",
"surname": "Seo"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN, USA",
"fullName": "Chaoli Wang",
"givenName": "Chaoli",
"surname": "Wang"
}
],
"doi": "10.1109/TVCG.2020.2974638",
"fno": "09082802",
"hasPdf": true,
"id": "1jrTVLo1tpC",
"idPrefix": "tg",
"isOpenAccess": true,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Visualization",
"Computer Graphics"
],
"normalizedAbstract": "The five papers in this special section were from the 2020 IEEE Pacific Visualization Symposium (IEEE PacificVis), which was scheduled to be hosted by Tianjin University and held in Tianjin, China, from April 14 to 17, 2020.",
"normalizedTitle": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020",
"notes": null,
"notesType": null,
"pages": "2142-2143",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"EEE",
"VISUALIZATION"
],
[
"Visualization",
"VISUALIZATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"doi": null,
"id": "trans/tg/2013/06/ttg2013060898",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf",
"doi": null,
"id": "trans/tg/2015/08/07138667",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ",
"doi": null,
"id": "trans/tg/2014/08/06847259",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2018/06/08352605/13rRUxlgxOp",
"doi": null,
"id": "trans/tg/2018/06/08352605",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2018"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a",
"doi": null,
"id": "trans/tg/2019/06/08703194",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tb/2022/01/09702708/1AH375DQaGY",
"doi": null,
"id": "trans/tb/2022/01/09702708",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics"
},
"title": "Guest Editors’ Introduction to the Special Section on Bioinformatics Research and Applications"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ",
"doi": null,
"id": "trans/tg/2022/06/09766260",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tb/2022/03/09788108/1DU9k5pRa4o",
"doi": null,
"id": "trans/tb/2022/03/09788108",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics"
},
"title": "Editorial"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tp/2020/07/09108341/1koL3gQqTHa",
"doi": null,
"id": "trans/tp/2020/07/09108341",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence"
},
"title": "Guest Editors' Introduction to the Special Section on Computational Photography"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs",
"doi": null,
"id": "trans/tg/2021/06/09430173",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1iaeBQ4H756",
"fno": "09035636"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1jrTVLo1tpC",
"fno": "09082802"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Leipzig University",
"fullName": "Martin Reckziegel",
"givenName": "Martin",
"surname": "Reckziegel"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "German Aerospace Center DLR",
"fullName": "Linda Pfeiffer",
"givenName": "Linda",
"surname": "Pfeiffer"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Leipzig University",
"fullName": "Christian Heine",
"givenName": "Christian",
"surname": "Heine"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of Southern Denmark",
"fullName": "Stefan Jänicke",
"givenName": "Stefan",
"surname": "Jänicke"
}
],
"doi": "10.1109/TVCG.2020.2970509",
"fno": "08978585",
"hasPdf": true,
"id": "1haUx0fpghW",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Cartography",
"Data Visualisation",
"Distance Measurement",
"Point Cloud Visualizations",
"Information Visualization Applications",
"Labeled Point Cloud Representations",
"Complex Representations",
"Point Clusters",
"Label Size",
"Label Centers",
"Dot Label Relations",
"Visualization",
"Three Dimensional Displays",
"Labeling",
"Task Analysis",
"Predictive Models",
"Urban Areas",
"Lenses",
"Human Judgment Model",
"Document Visualization",
"Label Placement"
],
"normalizedAbstract": "When point clouds are labeled in information visualization applications, sophisticated guidelines as in cartography do not yet exist. Existing naive strategies may mislead as to which points belong to which label. To inform improved strategies, we studied factors influencing this phenomenon. We derived a class of labeled point cloud representations from existing applications and we defined different models predicting how humans interpret such complex representations, focusing on their geometric properties. We conducted an empirical study, in which participants had to relate dots to labels in order to evaluate how well our models predict. Our results indicate that presence of point clusters, label size, and angle to the label have an effect on participants' judgment as well as that the distance measure types considered perform differently discouraging the use of label centers as reference points.",
"normalizedTitle": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations",
"notes": null,
"notesType": null,
"pages": "2144-2155",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Modeling How Humans Judge Dot-Label Relations in Point Cloud Visualizations",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"cartography",
"APPLICATION"
],
[
"point cloud",
"DATA"
],
[
"information visualization applications",
"VISUALIZATION"
],
[
"naive strategies",
"METHOD"
],
[
"geometric properties",
"DATA"
],
[
"point cloud representations",
"VISUALIZATION"
],
[
"empirical study",
"EVALUATION"
],
[
"point clusters",
"DATA"
],
[
"label centers",
"DATA"
],
[
"label size",
"DATA"
],
[
"reference points",
"DATA"
],
[
"distance measure types",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icdmw/2017/3800a850/12OmNvFHfGd",
"doi": null,
"id": "proceedings/icdmw/2017/3800/0/3800a850",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icdmw/2017/3800/0",
"title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)"
},
"title": "Combining Active Learning and Semi-Supervised Learning by Using Selective Label Spreading"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2008/06/ttg2008061237/13rRUwbaqUM",
"doi": null,
"id": "trans/tg/2008/06/ttg2008061237",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Particle-based labeling: Fast point-feature labeling without obscuring other visual features"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2017/01/07539393/13rRUwjGoLK",
"doi": null,
"id": "trans/tg/2017/01/07539393",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "An Evaluation of Visual Search Support in Maps"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/wacv/2019/197500b743/18j8NGRjKve",
"doi": null,
"id": "proceedings/wacv/2019/1975/0/197500b743",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)"
},
"title": "IDD: A Dataset for Exploring Problems of Autonomous Navigation in Unconstrained Environments"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ase/2021/033700a943/1AjTfGOSCwU",
"doi": null,
"id": "proceedings/ase/2021/0337/0/033700a943",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ase/2021/0337/0",
"title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)"
},
"title": "Unsupervised Labeling and Extraction of Phrase-based Concepts in Vulnerability Descriptions"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2023/01/09904455/1H1gjlaBqVO",
"doi": null,
"id": "trans/tg/2023/01/09904455",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Multiple Forecast Visualizations (MFVs): Trade-offs in Trust and Performance in Multiple COVID-19 Forecast Visualizations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2023/01/09917516/1HrexIf2zZe",
"doi": null,
"id": "trans/tg/2023/01/09917516",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Geo-Storylines: Integrating Maps into Storyline Visualizations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/ai/5555/01/10113715/1MNbV9nYrXq",
"doi": null,
"id": "trans/ai/5555/01/10113715",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/ai",
"title": "IEEE Transactions on Artificial Intelligence"
},
"title": "Noisy Label Detection and Counterfactual Correction"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2020/01/08809750/1cHEu5CRoFq",
"doi": null,
"id": "trans/tg/2020/01/08809750",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Pattern-Driven Navigation in 2D Multiscale Visualizations with Scalable Insets"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tp/2022/12/09573413/1xH5E3Yjgek",
"doi": null,
"id": "trans/tp/2022/12/09573413",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence"
},
"title": "Adaptive Graph Guided Disambiguation for Partial Label Learning"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AIkwYg4E",
"fno": "08977320"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1haUx0fpghW",
"fno": "08978585"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA",
"fullName": "Liang Zhou",
"givenName": "Liang",
"surname": "Zhou"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany",
"fullName": "Marc Rivinius",
"givenName": "Marc",
"surname": "Rivinius"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA",
"fullName": "Chris R. Johnson",
"givenName": "Chris R.",
"surname": "Johnson"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Stuttgart, Germany",
"fullName": "Daniel Weiskopf",
"givenName": "Daniel",
"surname": "Weiskopf"
}
],
"doi": "10.1109/TVCG.2020.2970522",
"fno": "09035636",
"hasPdf": true,
"id": "1iaeBQ4H756",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Data Visualisation",
"Image Colour Analysis",
"Color Mapping",
"Simulated Glares",
"Photographic HDR Visualization",
"Photographic High Dynamic Range Scalar Visualization",
"2 D Visualization",
"Tone Mapping Operators",
"Data Visualization",
"Image Color Analysis",
"Pipelines",
"Dynamic Range",
"Visualization",
"Two Dimensional Displays",
"Monitoring",
"Tone Mapping",
"Glare",
"High Dynamic Range Visualization",
"2 D Diagrams"
],
"normalizedAbstract": "We propose a photographic method to show scalar values of high dynamic range (HDR) by color mapping for 2D visualization. We combine (1) tone-mapping operators that transform the data to the display range of the monitor while preserving perceptually important features, based on a systematic evaluation, and (2) simulated glares that highlight high-value regions. Simulated glares are effective for highlighting small areas (of a few pixels) that may not be visible with conventional visualizations; through a controlled perception study, we confirm that glare is preattentive. The usefulness of our overall photographic HDR visualization is validated through the feedback of expert users.",
"normalizedTitle": "Photographic High-Dynamic-Range Scalar Visualization",
"notes": null,
"notesType": null,
"pages": "2156-2167",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Photographic High-Dynamic-Range Scalar Visualization",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"color mapping",
"VISUALIZATION"
],
[
"2D visualization",
"VISUALIZATION"
],
[
"scalar values",
"DATA"
],
[
"photographic method",
"METHOD"
],
[
"high dynamic range",
"DATA"
],
[
"high-value regions",
"DATA"
],
[
"simulated glares",
"VISUALIZATION"
],
[
"systematic evaluation",
"EVALUATION"
],
[
"tone-mapping operators",
"METHOD"
],
[
"gla",
"EVALUATION"
],
[
"conventional visualizations",
"VISUALIZATION"
],
[
"Simulated glares",
"VISUALIZATION"
],
[
"controlled perception study",
"EVALUATION"
],
[
"expert users",
"EVALUATION"
],
[
"photographic HDR visualization",
"VISUALIZATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/aipr/2014/07041912/12OmNA14Aip",
"doi": null,
"id": "proceedings/aipr/2014/5921/0/07041912",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/aipr/2014/5921/0",
"title": "2014 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)"
},
"title": "High dynamic range (HDR) video processing for the exploitation of high bit-depth sensors in human-monitored surveillance"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/csie/2009/3507f583/12OmNCctfaE",
"doi": null,
"id": "proceedings/csie/2009/3507/6/3507f583",
"parentPublication": {
"__typename": "ParentPublication",
"id": null,
"title": null
},
"title": "Color Vision Based High Dynamic Range Images Rendering"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iccp/2009/05559003/12OmNCuDzub",
"doi": null,
"id": "proceedings/iccp/2009/4534/0/05559003",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)"
},
"title": "Artifact-free High Dynamic Range imaging"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/dcc/2016/07786173/12OmNxcMSkC",
"doi": null,
"id": "proceedings/dcc/2016/1853/0/07786173",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/dcc/2016/1853/0",
"title": "2016 Data Compression Conference (DCC)"
},
"title": "High Dynamic Range Video Coding with Backward Compatibility"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icis/2016/07550796/12OmNxw5Bpw",
"doi": null,
"id": "proceedings/icis/2016/0806/0/07550796",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icis/2016/0806/0",
"title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)"
},
"title": "High dynamic range image composition using a linear interpolation approach"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iv/2004/21770269/12OmNyo1nMX",
"doi": null,
"id": "proceedings/iv/2004/2177/0/21770269",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004."
},
"title": "Two-Channel Technique for High Dynamic Range Image Visualization"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvprw/2022/873900a546/1G56FK3UGPe",
"doi": null,
"id": "proceedings/cvprw/2022/8739/0/873900a546",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)"
},
"title": "Multi-Bracket High Dynamic Range Imaging with Event Cameras"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvprw/2022/873900b031/1G56nGzWShG",
"doi": null,
"id": "proceedings/cvprw/2022/8739/0/873900b031",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)"
},
"title": "Gamma-enhanced Spatial Attention Network for Efficient High Dynamic Range Imaging"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2021/450900h706/1yeJuGu5Xvq",
"doi": null,
"id": "proceedings/cvpr/2021/4509/0/450900h706",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "Neural Auto-Exposure for High-Dynamic Range Object Detection"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2021/450900g293/1yeK6nSzK1y",
"doi": null,
"id": "proceedings/cvpr/2021/4509/0/450900g293",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "End-to-end High Dynamic Range Camera Pipeline Optimization"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AIHeB46A",
"fno": "08977505"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1iaeBQ4H756",
"fno": "09035636"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Ayshwarya Saktheeswaran",
"givenName": "Ayshwarya",
"surname": "Saktheeswaran"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Arjun Srinivasan",
"givenName": "Arjun",
"surname": "Srinivasan"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "John Stasko",
"givenName": "John",
"surname": "Stasko"
}
],
"doi": "10.1109/TVCG.2020.2970512",
"fno": "08977320",
"hasPdf": true,
"id": "1h2AIkwYg4E",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Data Visualisation",
"Graphical User Interfaces",
"Mobile Handsets",
"Visual Network Exploration",
"Interactive Displays",
"Multimodal Input",
"Network Visualization Tool",
"Multimodal Interface",
"Interaction Patterns Participants",
"Multimodal Network Visualization Systems",
"Network Visualization Operations",
"Visualization",
"Encoding",
"Tools",
"Data Visualization",
"Speech Recognition",
"Natural Languages",
"Task Analysis",
"Multimodal Interaction",
"Network Visualizations",
"Natural Language Interfaces"
],
"normalizedAbstract": "Interaction plays a vital role during visual network exploration as users need to engage with both elements in the view (e.g., nodes, links) and interface controls (e.g., sliders, dropdown menus). Particularly as the size and complexity of a network grow, interactive displays supporting multimodal input (e.g., touch, speech, pen, gaze) exhibit the potential to facilitate fluid interaction during visual network exploration and analysis. While multimodal interaction with network visualization seems like a promising idea, many open questions remain. For instance, do users actually prefer multimodal input over unimodal input, and if so, why? Does it enable them to interact more naturally, or does having multiple modes of input confuse users? To answer such questions, we conducted a qualitative user study in the context of a network visualization tool, comparing speech- and touch-based unimodal interfaces to a multimodal interface combining the two. Our results confirm that participants strongly prefer multimodal input over unimodal input attributing their preference to: 1) the freedom of expression, 2) the complementary nature of speech and touch, and 3) integrated interactions afforded by the combination of the two modalities. We also describe the interaction patterns participants employed to perform common network visualization operations and highlight themes for future multimodal network visualization systems to consider.",
"normalizedTitle": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis",
"notes": null,
"notesType": null,
"pages": "2168-2179",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Touch? Speech? or Touch and Speech? Investigating Multimodal Interaction for Visual Network Exploration and Analysis",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"dropdown menu",
"VISUALIZATION"
],
[
"visual network exploration",
"APPLICATION"
],
[
"visual network exploration and analysis",
"APPLICATION"
],
[
"multimodal input",
"DATA"
],
[
"interactive displays",
"VISUALIZATION"
],
[
"fluid interaction",
"APPLICATION"
],
[
"network visualization",
"VISUALIZATION"
],
[
"multimodal interaction",
"METHOD"
],
[
"multimodal interface",
"METHOD"
],
[
"unimodal input",
"METHOD"
],
[
"speech- and touch-based unimodal interfaces",
"METHOD"
],
[
"multimodal input",
"METHOD"
],
[
"qualitative user study",
"EVALUATION"
],
[
"network visualization tool",
"VISUALIZATION"
],
[
"multimodal input",
"METHOD"
],
[
"unimodal input",
"METHOD"
],
[
"integrated interactions",
"EVALUATION"
],
[
"network visualization operation",
"VISUALIZATION"
],
[
"multimodal network visualization systems",
"VISUALIZATION"
],
[
"interaction",
"VISUALIZATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cscs/2017/07968566/12OmNARRYpY",
"doi": null,
"id": "proceedings/cscs/2017/1839/0/07968566",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cscs/2017/1839/0",
"title": "2017 21st International Conference on Control Systems and Computer Science (CSCS)"
},
"title": "Multimodal Interface for Ambient Assisted Living"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/haptics/2003/18900151/12OmNzDehah",
"doi": null,
"id": "proceedings/haptics/2003/1890/0/18900151",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/haptics/2003/1890/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on"
},
"title": "Relative Performance Using Haptic and/or Touch-Produced Auditory Cues in a Remote Absolute Texture Identification Task"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2018/01/08019860/13rRUx0gefo",
"doi": null,
"id": "trans/tg/2018/01/08019860",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Orko: Facilitating Multimodal Interaction for Visual Exploration and Analysis of Networks"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvprw/2022/873900e578/1G561ezEc9O",
"doi": null,
"id": "proceedings/cvprw/2022/8739/0/873900e578",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)"
},
"title": "Improving Multimodal Speech Recognition by Data Augmentation and Speech Representations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/5555/01/09894081/1GIqtQDhf8I",
"doi": null,
"id": "trans/tg/5555/01/09894081",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Putting Vision and Touch Into Conflict: Results from a Multimodal Mixed Reality Setup"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cmbs/2022/677000a199/1GhW8bBO4iQ",
"doi": null,
"id": "proceedings/cmbs/2022/6770/0/677000a199",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cmbs/2022/6770/0",
"title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)"
},
"title": "Leveraging Clinical BERT in Multimodal Mortality Prediction Models for COVID-19"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2021/08/09023002/1hTHRTEQgRG",
"doi": null,
"id": "trans/tg/2021/08/09023002",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Interweaving Multimodal Interaction With Flexible Unit Visualizations for Data Exploration"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/wacv/2020/09093414/1jPbxi0Vk40",
"doi": null,
"id": "proceedings/wacv/2020/6553/0/09093414",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)"
},
"title": "Exploring Hate Speech Detection in Multimodal Publications"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2020/716800n3286/1m3ojQrj4iY",
"doi": null,
"id": "proceedings/cvpr/2020/7168/0/716800n3286",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "MMTM: Multimodal Transfer Module for CNN Fusion"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iv/2020/913400a714/1rSR8lx5snS",
"doi": null,
"id": "proceedings/iv/2020/9134/0/913400a714",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)"
},
"title": "MIVA: Multimodal Interactions for Facilitating Visual Analysis with Multiple Coordinated Views"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AJ4jdnFK",
"fno": "08977377"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AIkwYg4E",
"fno": "08977320"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "University of British Columbia",
"fullName": "Zipeng Liu",
"givenName": "Zipeng",
"surname": "Liu"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Ochanomizu University",
"fullName": "Takayuki Itoh",
"givenName": "Takayuki",
"surname": "Itoh"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of British Columbia",
"fullName": "Jessica Q. Dawson",
"givenName": "Jessica Q.",
"surname": "Dawson"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of British Columbia",
"fullName": "Tamara Munzner",
"givenName": "Tamara",
"surname": "Munzner"
}
],
"doi": "10.1109/TVCG.2020.2970523",
"fno": "08977505",
"hasPdf": true,
"id": "1h2AIHeB46A",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Telecommunication Network Topology",
"Node Link Layouts",
"Count Based Metrics",
"Node Edge Crossings",
"Integer Counts",
"Fine Grained Level",
"Current Metrics Focus",
"Single Level Topological Structure",
"Multilevel Structure",
"Clutter Metrics",
"Geometric Overlaps",
"Edge Edge Pairs",
"Variable Size Nodes",
"Leaf Nodes",
"Sprawl Metric",
"Sprawlter Metrics",
"Graph Layouts",
"Sprawlter Graph Readability Metric",
"Graph Drawing Readability Metrics",
"Area Aware Clutter Metric",
"Salient Metanodes",
"Measurement",
"Layout",
"Clutter",
"Readability Metrics",
"Compounds",
"Visualization",
"Periodic Structures",
"Graph Drawing",
"Graph Drawing Metrics",
"Readability Metrics",
"Aesthetic Criteria"
],
"normalizedAbstract": "Graph drawing readability metrics are routinely used to assess and create node-link layouts of network data. Existing readability metrics fall short in three ways. The many count-based metrics such as edge-edge or node-edge crossings simply provide integer counts, missing the opportunity to quantify the amount of overlap between items, which may vary in size, at a more fine-grained level. Current metrics focus solely on single-level topological structure, ignoring the possibility of multi-level structure such as large and thus highly salient metanodes. Most current metrics focus on the measurement of clutter in the form of crossings and overlaps, and do not take into account the trade-off between the clutter and the information sparsity of the drawing, which we refer to as sprawl. We propose an area-aware approach to clutter metrics that tracks the extent of geometric overlaps between node-node, node-edge, and edge-edge pairs in detail. It handles variable-size nodes and explicitly treats metanodes and leaf nodes uniformly. We call the combination of a sprawl metric and an area-aware clutter metric a sprawlter metric. We present an instantiation of the sprawlter metrics featuring a formal and thorough discussion of the crucial component, the penalty mapping function. We implement and validate our proposed metrics with extensive computational analysis of graph layouts, considering four layout algorithms and 56 layouts encompassing both real-world data and synthetic examples illustrating specific configurations of interest.",
"normalizedTitle": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter",
"notes": null,
"notesType": null,
"pages": "2180-2191",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "The Sprawlter Graph Readability Metric: Combining Sprawl and Area-Aware Clutter",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"node-link layouts",
"VISUALIZATION"
],
[
"network data",
"DATA"
],
[
"readability metrics",
"EVALUATION"
],
[
"readability metrics",
"EVALUATION"
],
[
"count-based metrics",
"METHOD"
],
[
"integer counts",
"DATA"
],
[
"-ed",
"METHOD"
],
[
"edge-edge",
"METHOD"
],
[
"multi-level structure",
"DATA"
],
[
"salient metanodes",
"DATA"
],
[
"level topological structure",
"DATA"
],
[
"information spars",
"DATA"
],
[
"clu",
"DATA"
],
[
"spraw",
"DATA"
],
[
"node-node",
"DATA"
],
[
"clutter metrics",
"APPLICATION"
],
[
"area-aware approach",
"METHOD"
],
[
"edge-edge pair",
"DATA"
],
[
"node-edge",
"DATA"
],
[
"geometric overlaps",
"DATA"
],
[
"leaf nodes",
"DATA"
],
[
"metanodes",
"DATA"
],
[
"variable-size nodes",
"DATA"
],
[
"area-aware clutter metric",
"METHOD"
],
[
"sprawl metric",
"METHOD"
],
[
"sprawlter metric",
"METHOD"
],
[
"sprawlter metrics",
"METHOD"
],
[
"penalty mapping function",
"METHOD"
],
[
"layout algorithms",
"METHOD"
],
[
"real-world data",
"DATA"
],
[
"synthetic examples",
"DATA"
],
[
"graph layouts",
"VISUALIZATION"
],
[
"computational analysis",
"EVALUATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/pacificvis/2015/07156354/12OmNCaLEnG",
"doi": null,
"id": "proceedings/pacificvis/2015/6879/0/07156354",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)"
},
"title": "Attribute-driven edge bundling for general graphs with applications in trail analysis"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/csit/2016/07549476/12OmNCfjev8",
"doi": null,
"id": "proceedings/csit/2016/8914/0/07549476",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/csit/2016/8914/0",
"title": "2016 7th International Conference on Computer Science and Information Technology (CSIT)"
},
"title": "Application domain and programming language readability yardsticks"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/pacificvis/2011/05742389/12OmNxj233Y",
"doi": null,
"id": "proceedings/pacificvis/2011/935/0/05742389",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)"
},
"title": "Multilevel agglomerative edge bundling for visualizing large graphs"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iv/2012/4771a093/12OmNzBOhHa",
"doi": null,
"id": "proceedings/iv/2012/4771/0/4771a093",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation"
},
"title": "Clutter Reduction in Multi-dimensional Visualization of Incomplete Data Using Sugiyama Algorithm"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/ts/2010/04/tts2010040546/13rRUygT7gV",
"doi": null,
"id": "trans/ts/2010/04/tts2010040546",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering"
},
"title": "Learning a Metric for Code Readability"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2016/01/07192724/13rRUyuegpa",
"doi": null,
"id": "trans/tg/2016/01/07192724",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "AmbiguityVis: Visualization of Ambiguity in Graph Layouts"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpc/2022/929800a214/1EpKH3lfMRO",
"doi": null,
"id": "proceedings/icpc/2022/9298/0/929800a214",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpc/2022/9298/0",
"title": "2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC)"
},
"title": "An Empirical Investigation on the Trade-off between Smart Contract Readability and Gas Consumption"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icnisc/2022/535100a155/1KYtoZqU3de",
"doi": null,
"id": "proceedings/icnisc/2022/5351/0/535100a155",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icnisc/2022/5351/0",
"title": "2022 8th Annual International Conference on Network and Information Systems for Computers (ICNISC)"
},
"title": "HED-CNN based Ionospheric Clutter Extraction for HF Range-Doppler Spectrum"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/cg/2019/04/08739137/1aXM6mNkouI",
"doi": null,
"id": "mags/cg/2019/04/08739137",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications"
},
"title": "Evaluating the Readability of Force Directed Graph Layouts: A Deep Learning Approach"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45XDIXXS",
"fno": "08567954"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AIHeB46A",
"fno": "08977505"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, University of California, Davis",
"fullName": "Yucong Ye",
"givenName": "Yucong",
"surname": "Ye"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, University of California, Davis",
"fullName": "Franz Sauer",
"givenName": "Franz",
"surname": "Sauer"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, University of California, Davis",
"fullName": "Kwan-Liu Ma",
"givenName": "Kwan-Liu",
"surname": "Ma"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Combustion Research FacilitySandia National Laboratories",
"fullName": "Konduri Aditya",
"givenName": "Konduri",
"surname": "Aditya"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Combustion Research FacilitySandia National Laboratories",
"fullName": "Jacqueline Chen",
"givenName": "Jacqueline",
"surname": "Chen"
}
],
"doi": "10.1109/TVCG.2020.2970525",
"fno": "08977377",
"hasPdf": true,
"id": "1h2AJ4jdnFK",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Data Visualisation",
"User Centred Design",
"User Centered Design Principles",
"Information Visualization",
"User Centered Design Study",
"Domain Experts",
"Visualization Solution",
"Data Visualization",
"User Centered Design",
"Visualization",
"Usability",
"Task Analysis",
"Collaboration",
"Combustion",
"Italic Xmlns Ali Http Www Niso Org Schemas Ali 1 0 Xmlns Mml Http Www W 3 Org 1998 Math Math ML Xmlns Xlink Http Www W 3 Org 1999 Xlink Xmlns Xsi Http Www W 3 Org 2001 XML Schema Instance In Situ Italic Data Visualization",
"Usability Studies",
"Design Studies",
"Qualitative Evaluation",
"User Interfaces"
],
"normalizedAbstract": "The development of usable visualization solutions is essential for ensuring both their adoption and effectiveness. User-centered design principles, which involve users throughout the entire development process, have been shown to be effective in numerous information visualization endeavors. We describe how we applied these principles in scientific visualization over a two year collaboration to develop a hybrid in situ/post hoc solution tailored towards combustion researcher needs. Furthermore, we examine the importance of user-centered design and lessons learned over the design process in an effort to aid others seeking to develop effective scientific visualization solutions.",
"normalizedTitle": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts",
"notes": null,
"notesType": null,
"pages": "2192-2203",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "A User-Centered Design Study in Scientific Visualization Targeting Domain Experts",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"usable visualization solutions",
"VISUALIZATION"
],
[
"information visualization ende",
"APPLICATION"
],
[
"User-centered design principles",
"METHOD"
],
[
"scientific visualization",
"VISUALIZATION"
],
[
"combustion researcher",
"APPLICATION"
],
[
"in situ/post hoc solution",
"METHOD"
],
[
"scientific visualization solutions",
"VISUALIZATION"
],
[
"user-centered design",
"METHOD"
],
[
"design process",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vr/1999/00930096/12OmNA2cYEt",
"doi": null,
"id": "proceedings/vr/1999/0093/0/00930096",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vr/1999/0093/0",
"title": "Proceedings of Virtual Reality"
},
"title": "User-Centered Design and Evaluation of a Real-Time Battlefield Visualization Virtual Environment"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vizsec/2015/07312771/12OmNAWH9Ev",
"doi": null,
"id": "proceedings/vizsec/2015/7599/0/07312771",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vizsec/2015/7599/0",
"title": "2015 IEEE Symposium on Visualization for Cyber Security (VizSec)"
},
"title": "Unlocking user-centered design methods for building cyber security visualizations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/srii/2012/4770a697/12OmNCvLY08",
"doi": null,
"id": "proceedings/srii/2012/4770/0/4770a697",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/srii/2012/4770/0",
"title": "Annual SRII Global Conference"
},
"title": "User Centered Design of Innovative E-Service Solutions - A Scientific Approach to User Fascination"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vizsec/2005/01532062/12OmNxR5UPi",
"doi": null,
"id": "proceedings/vizsec/2005/9477/0/01532062",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vizsec/2005/9477/0",
"title": "IEEE Workshop on Visualization for Computer Security 2005 (VizSEC 05)"
},
"title": "A user-centered look at glyph-based security visualization"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iv/2011/06004027/12OmNyQph8m",
"doi": null,
"id": "proceedings/iv/2011/0868/0/06004027",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation"
},
"title": "Developing and Applying a User-Centered Model for the Design and Implementation of Information Visualization Tools"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/e-science/2014/06972276/12OmNzzP5Hq",
"doi": null,
"id": "proceedings/e-science/2014/4288/1/06972276",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/e-science/2014/4288/1",
"title": "2014 IEEE 10th International Conference on e-Science (e-Science)"
},
"title": "Experiences with User-Centered Design for the Tigres Workflow API"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2018/01/08017610/13rRUwhHcQX",
"doi": null,
"id": "trans/tg/2018/01/08017610",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Activity-Centered Domain Characterization for Problem-Driven Scientific Visualization"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/so/2009/01/mso2009010096/13rRUwvT9eM",
"doi": null,
"id": "mags/so/2009/01/mso2009010096",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/so",
"title": "IEEE Software"
},
"title": "Usability and User-Centered Design in Scientific Software Development"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ismar/2022/532500a336/1JrR3eLmZX2",
"doi": null,
"id": "proceedings/ismar/2022/5325/0/532500a336",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)"
},
"title": "User-Centered Design and Evaluation of ARTTS: an Augmented Reality Triage Tool Suite for Mass Casualty Incidents"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/works/2022/519100a019/1KckqxKZTUY",
"doi": null,
"id": "proceedings/works/2022/5191/0/519100a019",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/works/2022/5191/0",
"title": "2022 IEEE/ACM Workshop on Workflows in Support of Large-Scale Science (WORKS)"
},
"title": "A Domain-Specific Composition Environment for Provenance Query of Scientific Workflows"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WrVg2d",
"fno": "08546802"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "1h2AJ4jdnFK",
"fno": "08977377"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Centre for Secure, Intelligent and Usable Systems, School of Computing, Engineering and Mathematics, University of Brighton, Brighton, United Kingdom",
"fullName": "Ran Song",
"givenName": "Ran",
"surname": "Song"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, Edge Hill University, Ormskirk, United Kingdom",
"fullName": "Yonghuai Liu",
"givenName": "Yonghuai",
"surname": "Liu"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom",
"fullName": "Paul L. Rosin",
"givenName": "Paul L.",
"surname": "Rosin"
}
],
"doi": "10.1109/TVCG.2018.2885750",
"fno": "08567954",
"hasPdf": true,
"id": "17D45XDIXXS",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Feature Extraction",
"Image Classification",
"Image Representation",
"Learning Artificial Intelligence",
"Markov Processes",
"Neural Nets",
"Object Detection",
"Object Recognition",
"Multiple Objects",
"Mesh Distinction",
"View Based Nature",
"Distinctive Regions",
"View Based Distinction Maps",
"3 D Object Understanding Tasks",
"Markov Random Field",
"Classification Network",
"3 D Surface Mesh",
"Human Perception",
"Three Dimensional Displays",
"Task Analysis",
"Shape",
"Two Dimensional Displays",
"Feature Extraction",
"Training",
"Markov Random Fields",
"3 D Mesh",
"Distinction",
"Neural Network",
"Markov Random Field"
],
"normalizedAbstract": "An importance measure of 3D objects inspired by human perception has a range of applications since people want computers to behave like humans in many tasks. This paper revisits a well-defined measure, distinction of 3D surface mesh, which indicates how important a region of a mesh is with respect to classification. We develop a method to compute it based on a classification network and a Markov Random Field (MRF). The classification network learns view-based distinction by handling multiple views of a 3D object. Using a classification network has an advantage of avoiding the training data problem which has become a major obstacle of applying deep learning to 3D object understanding tasks. The MRF estimates the parameters of a linear model for combining the view-based distinction maps. The experiments using several publicly accessible datasets show that the distinctive regions detected by our method are not just significantly different from those detected by methods based on handcrafted features, but more consistent with human perception. We also compare it with other perceptual measures and quantitatively evaluate its performance in the context of two applications. Furthermore, due to the view-based nature of our method, we are able to easily extend mesh distinction to 3D scenes containing multiple objects.",
"normalizedTitle": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field",
"notes": null,
"notesType": null,
"pages": "2204-2218",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Distinction of 3D Objects and Scenes via Classification Network and Markov Random Field",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"human per",
"METHOD"
],
[
"importance measure",
"METHOD"
],
[
"3D objects",
"DATA"
],
[
"3D surface mesh",
"DATA"
],
[
"mesh",
"DATA"
],
[
"Markov Random Field (MRF)",
"METHOD"
],
[
"classification network",
"METHOD"
],
[
"3D object",
"DATA"
],
[
"classification network",
"METHOD"
],
[
"view-based distinction",
"METHOD"
],
[
"training data problem",
"METHOD"
],
[
"3D object understanding tasks",
"APPLICATION"
],
[
"classification network",
"METHOD"
],
[
"deep learning",
"METHOD"
],
[
"view-based distinction maps",
"VISUALIZATION"
],
[
"linear model",
"METHOD"
],
[
"publicly accessible datasets",
"DATA"
],
[
"distinctive regions",
"DATA"
],
[
"human perception",
"EVALUATION"
],
[
"perceptual measures",
"EVALUATION"
],
[
"mesh distinction",
"METHOD"
],
[
"view-based nature",
"METHOD"
],
[
"3D scenes",
"DATA"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2014/5209a930/12OmNBUAvZ9",
"doi": null,
"id": "proceedings/icpr/2014/5209/0/5209a930",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)"
},
"title": "Fusion of Image Segmentations under Markov, Random Fields"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/camp/1997/79870220/12OmNCdBDXt",
"doi": null,
"id": "proceedings/camp/1997/7987/0/79870220",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/camp/1997/7987/0",
"title": "Computer Architectures for Machine Perception, International Workshop on"
},
"title": "Circuital Markov random fields for analog edge detection"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/fskd/2009/3735e442/12OmNxZkhti",
"doi": null,
"id": "proceedings/fskd/2009/3735/5/3735e442",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/fskd/2009/3735/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on"
},
"title": "SAR Image Segmentation Based on Markov Random Field Model and Multiscale Technology"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icdar/2007/04378735/12OmNxbEtLz",
"doi": null,
"id": "proceedings/icdar/2007/2822/1/04378735",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icdar/2007/2822/1",
"title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)"
},
"title": "Text/Non-text Ink Stroke Classification in Japanese Handwriting Based on Markov Random Fields"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2002/169540201/12OmNyfdOX0",
"doi": null,
"id": "proceedings/icpr/2002/1695/4/169540201",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2002/1695/4",
"title": "Pattern Recognition, International Conference on"
},
"title": "Face Detection and Synthesis Using Markov Random Field Models"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icassp/2004/01326913/12OmNzUgdgz",
"doi": null,
"id": "proceedings/icassp/2004/8484/4/01326913",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing"
},
"title": "On iterative source-channel image decoding with Markov random field source models"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icip/1994/00413557/12OmNzcxZhC",
"doi": null,
"id": "proceedings/icip/1994/6952/2/00413557",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing"
},
"title": "Segmentation of range and intensity images using multiscale Markov random field representations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/cc/2019/03/07872428/13rRUwh80Jx",
"doi": null,
"id": "trans/cc/2019/03/07872428",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing"
},
"title": "A Markov Random Field Based Approach for Analyzing Supercomputer System Logs"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tp/2006/11/i1830/13rRUygT7tT",
"doi": null,
"id": "trans/tp/2006/11/i1830",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence"
},
"title": "Dense Photometric Stereo: A Markov Random Field Approach"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/3dv/2018/842500a662/17D45WgziON",
"doi": null,
"id": "proceedings/3dv/2018/8425/0/842500a662",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)"
},
"title": "A Data-Driven Prior on Facet Orientation for Semantic Mesh Labeling"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45We0UEn",
"fno": "08573859"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45XDIXXS",
"fno": "08567954"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany",
"fullName": "Andrea Schnorr",
"givenName": "Andrea",
"surname": "Schnorr"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany",
"fullName": "Dirk N. Helmrich",
"givenName": "Dirk",
"surname": "N. Helmrich"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Institute for Combustion Technology, RWTH Aachen University, Aachen, Germany",
"fullName": "Dominik Denker",
"givenName": "Dominik",
"surname": "Denker"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany",
"fullName": "Torsten W. Kuhlen",
"givenName": "Torsten W.",
"surname": "Kuhlen"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "JARA – High-Performance Computing and the Visual Computing Institute, RWTH Aachen University, Aachen, Germany",
"fullName": "Bernd Hentschel",
"givenName": "Bernd",
"surname": "Hentschel"
}
],
"doi": "10.1109/TVCG.2018.2883630",
"fno": "08546802",
"hasPdf": true,
"id": "17D45WrVg2d",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Computational Complexity",
"Data Visualisation",
"Feature Extraction",
"Graph Theory",
"Optimisation",
"Two Step Optimization",
"Time Varying Data",
"Feature Definition",
"Data Domain",
"Sparse Space Filling Features",
"Graph Optimization Problems",
"Maximum Weight Matching",
"Maximum Cardinality Matching",
"Weighted Bi Partite Graph",
"Feature Tracking",
"Feature Extraction",
"Target Tracking",
"Optimization",
"Data Visualization",
"Data Models",
"Analytical Models",
"Heuristic Algorithms",
"Global Optimization",
"Simulation Output Analysis",
"Flow Visualization"
],
"normalizedAbstract": "Tracking the temporal evolution of features in time-varying data is a key method in visualization. For typical feature definitions, such as vortices, objects are sparsely distributed over the data domain. In this paper, we present a novel approach for tracking both sparse and space-filling features. While the former comprise only a small fraction of the domain, the latter form a set of objects whose union covers the domain entirely while the individual objects are mutually disjunct. Our approach determines the assignment of features between two successive time-steps by solving two graph optimization problems. It first resolves one-to-one assignments of features by computing a maximum-weight, maximum-cardinality matching on a weighted bi-partite graph. Second, our algorithm detects events by creating a graph of potentially conflicting event explanations and finding a weighted, independent set in it. We demonstrate our method's effectiveness on synthetic and simulation data sets, the former of which enables quantitative evaluation because of the availability of ground-truth information. Here, our method performs on par or better than a well-established reference algorithm. In addition, manual visual inspection by our collaborators confirm the results' plausibility for simulation data.",
"normalizedTitle": "Feature Tracking by Two-Step Optimization",
"notes": null,
"notesType": null,
"pages": "2219-2233",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Feature Tracking by Two-Step Optimization",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"time-varying data",
"DATA"
],
[
"temporal evolution",
"DATA"
],
[
"vortices",
"DATA"
],
[
"data domain",
"DATA"
],
[
"typical feature definitions",
"APPLICATION"
],
[
"space-filling features",
"DATA"
],
[
"graph optimization problems",
"APPLICATION"
],
[
"to-one assignments of features",
"APPLICATION"
],
[
"bi-partite graph",
"VISUALIZATION"
],
[
"maximum-cardinality matching",
"EVALUATION"
],
[
"maximum-weight",
"EVALUATION"
],
[
"event explanations",
"DATA"
],
[
", independent set",
"DATA"
],
[
"graph",
"DATA"
],
[
"weight",
"DATA"
],
[
"synthetic and simulation data sets",
"DATA"
],
[
"ground-truth information",
"DATA"
],
[
"quantitative evaluation",
"EVALUATION"
],
[
"reference algorithm",
"METHOD"
],
[
"manual visual inspection",
"EVALUATION"
],
[
"simulation data",
"DATA"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/visapp/2014/07295096/12OmNvsm6zh",
"doi": null,
"id": "proceedings/visapp/2014/8133/3/07295096",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)"
},
"title": "Feature matching using CO-inertia analysis for people tracking"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2010/4109d607/12OmNyv7m5x",
"doi": null,
"id": "proceedings/icpr/2010/4109/0/4109d607",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on"
},
"title": "Optimization of Target Objects for Natural Feature Tracking"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iccvw/2015/5720a751/12OmNzVoBNd",
"doi": null,
"id": "proceedings/iccvw/2015/9711/0/5720a751",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)"
},
"title": "Attributed Graphs for Tracking Multiple Objects in Structured Sports Videos"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2014/5209d678/12OmNzwHvrO",
"doi": null,
"id": "proceedings/icpr/2014/5209/0/5209d678",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)"
},
"title": "Unsupervised Tracking from Clustered Graph Patterns"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vspets/2005/01570894/12OmNzwpUhP",
"doi": null,
"id": "proceedings/vspets/2005/9424/0/01570894",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vspets/2005/9424/0",
"title": "Proceedings. 2nd Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS)"
},
"title": "Object tracking with dynamic feature graph"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icvrv/2017/263600a007/1ap5AZ64kLK",
"doi": null,
"id": "proceedings/icvrv/2017/2636/0/263600a007",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)"
},
"title": "Learning Deep Appearance Feature for Multi-target Tracking"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icme/2019/955200b774/1cdOHHPL6V2",
"doi": null,
"id": "proceedings/icme/2019/9552/0/955200b774",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)"
},
"title": "Robust Deep Tracking with Two-step Augmentation Discriminative Correlation Filters"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/cs/2021/02/09311214/1pYWIN9JCTe",
"doi": null,
"id": "mags/cs/2021/02/09311214",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/cs",
"title": "Computing in Science & Engineering"
},
"title": "A Confidence-Guided Technique for Tracking Time-Varying Features"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2021/09413110/1tmjzhcSj28",
"doi": null,
"id": "proceedings/icpr/2021/8808/0/09413110",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)"
},
"title": "Robust Visual Object Tracking with Two-Stream Residual Convolutional Networks"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ieit/2021/256300a244/1wHKqvHr7mo",
"doi": null,
"id": "proceedings/ieit/2021/2563/0/256300a244",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)"
},
"title": "Object tracking algorithm based on fusion of SiamFC and Feature Pyramid Network"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45Wda7ec",
"fno": "08565948"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WrVg2d",
"fno": "08546802"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China",
"fullName": "Xiao Zhai",
"givenName": "Xiao",
"surname": "Zhai"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China",
"fullName": "Fei Hou",
"givenName": "Fei",
"surname": "Hou"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, Stony Brook University, New York, NY, USA",
"fullName": "Hong Qin",
"givenName": "Hong",
"surname": "Qin"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China",
"fullName": "Aimin Hao",
"givenName": "Aimin",
"surname": "Hao"
}
],
"doi": "10.1109/TVCG.2018.2886322",
"fno": "08573859",
"hasPdf": true,
"id": "17D45We0UEn",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Computational Fluid Dynamics",
"Computer Graphics",
"Flow Simulation",
"Graphics Processing Units",
"Sampling Methods",
"Adaptive Staggered Power Particles",
"GPU",
"Staggered Discretization",
"Adaptive Particle Sampling",
"Power Particle Based Fluid Simulation",
"Facet Based Power Diagrams Construction Algorithm",
"Visual Quality",
"Visualization",
"Adaptation Models",
"Computational Modeling",
"Graphics Processing Units",
"Libraries",
"Liquids",
"Physically Based Modeling",
"Fluid Simulation",
"Power Diagrams",
"GPU Parallelization",
"Adaptive Sampling"
],
"normalizedAbstract": "This paper extends the recently proposed power-particle-based fluid simulation method with staggered discretization, GPU implementation, and adaptive sampling, largely enhancing the efficiency and usability of the method. In contrast to the original formulation which uses co-located pressures and velocities, in this paper, a staggered scheme is adapted to the Power Particles to benefit visual details and computing efficiency. Meanwhile, we propose a novel facet-based power diagrams construction algorithm suitable for parallelization and explore its GPU implementation, achieving an order of magnitude boost in performance over the existing code library. In addition, to utilize the potential of Power Particles to control individual cell volume, we apply adaptive particle sampling to improve the detail level with varying resolution. The proposed method can be entirely carried out on GPUs, and our extensive experiments validate our method both in terms of efficiency and visual quality.",
"normalizedTitle": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs",
"notes": null,
"notesType": null,
"pages": "2234-2246",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Fluid Simulation with Adaptive Staggered Power Particles on GPUs",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"adaptive sampling",
"METHOD"
],
[
"power-particle-based fluid simulation method",
"METHOD"
],
[
"GPU implementation",
"METHOD"
],
[
"staggered discretization",
"METHOD"
],
[
"computing efficiency",
"EVALUATION"
],
[
"co-located pressures",
"DATA"
],
[
"staggered scheme",
"METHOD"
],
[
"visual details",
"VISUALIZATION"
],
[
"order of magnitude boost",
"EVALUATION"
],
[
"GPU",
"METHOD"
],
[
"facet-based power diagrams construction algorithm",
"METHOD"
],
[
"parallelization",
"APPLICATION"
],
[
"level",
"METHOD"
],
[
"adaptive particle sampling",
"METHOD"
],
[
"individual cell volume",
"DATA"
],
[
"extensive experiments",
"EVALUATION"
],
[
"efficiency",
"EVALUATION"
],
[
"visual quality",
"EVALUATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cw/2015/9403a375/12OmNA14Ach",
"doi": null,
"id": "proceedings/cw/2015/9403/0/9403a375",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)"
},
"title": "A Particle-Based Real-Time CG Rendering of Carbonated Water with Automatic Release of Bubbles"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/visual/1992/00235226/12OmNz61dc1",
"doi": null,
"id": "proceedings/visual/1992/2897/0/00235226",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/visual/1992/2897/0",
"title": "Proceedings Visualization '92"
},
"title": "Rendering surface-particles"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/e-science/2016/07870923/12OmNzUxObB",
"doi": null,
"id": "proceedings/e-science/2016/4273/0/07870923",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/e-science/2016/4273/0",
"title": "2016 IEEE 12th International Conference on e-Science (e-Science)"
},
"title": "A fast algorithm for neutrally-buoyant Lagrangian particles in numerical ocean modeling"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/sc/2014/5500a054/12OmNzb7Zu7",
"doi": null,
"id": "proceedings/sc/2014/5500/0/5500a054",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/sc/2014/5500/0",
"title": "SC14: International Conference for High Performance Computing, Networking, Storage and Analysis"
},
"title": "24.77 Pflops on a Gravitational Tree-Code to Simulate the Milky Way Galaxy with 18600 GPUs"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/candar/2017/2087a132/12OmNzdoMHd",
"doi": null,
"id": "proceedings/candar/2017/2087/0/2087a132",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/candar/2017/2087/0",
"title": "2017 Fifth International Symposium on Computing and Networking (CANDAR)"
},
"title": "SPH-based Fluid Simulation on GPU Using Verlet List and Subdivided Cell-Linked List"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icppw/2015/7589a081/12OmNzxPTGh",
"doi": null,
"id": "proceedings/icppw/2015/7589/0/7589a081",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icppw/2015/7589/0",
"title": "2015 44th International Conference on Parallel Processing Workshops (ICPPW)"
},
"title": "A Special Sorting Method for Neighbor Search Procedure in Smoothed Particle Hydrodynamics on GPUs"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2016/08/07243356/13rRUx0xPIN",
"doi": null,
"id": "trans/tg/2016/08/07243356",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Fast Coherent Particle Advection through Time-Varying Unstructured Flow Datasets"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/magazine/cs/2005/03/c3039/13rRUxbTMt3",
"doi": null,
"id": "mags/cs/2005/03/c3039",
"parentPublication": {
"__typename": "ParentPublication",
"id": "mags/cs",
"title": "Computing in Science & Engineering"
},
"title": "A Seamless Approach to Multiscale Complex Fluid Simulation"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icdiime/2022/900900a048/1Iz56eSpj3y",
"doi": null,
"id": "proceedings/icdiime/2022/9009/0/900900a048",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)"
},
"title": "Solid-Fluid Interaction Simulation System Based on SPH Unified Particle Framework"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/candar/2020/822100a195/1sA9a0wFBIc",
"doi": null,
"id": "proceedings/candar/2020/8221/0/822100a195",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/candar/2020/8221/0",
"title": "2020 Eighth International Symposium on Computing and Networking (CANDAR)"
},
"title": "Exploiting temporal parallelism in particle-based incompressive fluid simulation on FPGA"
}
],
"webExtras": [
{
"__typename": "WebExtraType",
"extension": "mp4",
"id": "1jx1mmLVkpW",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08573859s1-tvcg_video.mp4",
"name": "ttg202006-08573859s1-tvcg_video.mp4",
"size": "47.7 MB"
}
]
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45XreC6e",
"fno": "08576679"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45We0UEn",
"fno": "08573859"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada",
"fullName": "Simon Drouin",
"givenName": "Simon",
"surname": "Drouin"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada",
"fullName": "Daniel A. Di Giovanni",
"givenName": "Daniel A. Di",
"surname": "Giovanni"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science and Software Engineering, Concordia University, Montreal, Canada",
"fullName": "Marta Kersten-Oertel",
"givenName": "Marta",
"surname": "Kersten-Oertel"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Biomedical Engineering, McConnell Brain Imaging Center, Montreal Neurological Institute, McGill University, Montreal, Canada",
"fullName": "D. Louis Collins",
"givenName": "D. Louis",
"surname": "Collins"
}
],
"doi": "10.1109/TVCG.2018.2884940",
"fno": "08565948",
"hasPdf": true,
"id": "17D45Wda7ec",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Biomedical MRI",
"Blood Vessels",
"Data Visualisation",
"Interactive Systems",
"Medical Image Processing",
"Rendering Computer Graphics",
"Surgery",
"Navigation System",
"Interactive Rendering Techniques",
"Noninteractive Counterpart",
"Psychophysics Experiment",
"20 Medical Imaging Experts",
"Angiographic Data",
"Appreciation",
"Local Vascular Structure",
"Dynamic Chroma Depth",
"Interaction Driven Enhancement",
"Depth Perception",
"Angiographic Volumes",
"User Interaction",
"3 D Medical Images",
"Specialized Environments",
"Intraoperative Exploration",
"Tracked Surgical Pointer",
"Sterility Rules",
"Surgery",
"Rendering Computer Graphics",
"Three Dimensional Displays",
"Tracking",
"Biomedical Imaging",
"Tools",
"Navigation",
"Image Guided Surgery",
"Volume Visualization",
"Interaction Techniques",
"Depth Cues",
"Evaluation",
"Angiography"
],
"normalizedAbstract": "User interaction has the potential to greatly facilitate the exploration and understanding of 3D medical images for diagnosis and treatment. However, in certain specialized environments such as in an operating room (OR), technical and physical constraints such as the need to enforce strict sterility rules, make interaction challenging. In this paper, we propose to facilitate the intraoperative exploration of angiographic volumes by leveraging the motion of a tracked surgical pointer, a tool that is already manipulated by the surgeon when using a navigation system in the OR. We designed and implemented three interactive rendering techniques based on this principle. The benefit of each of these techniques is compared to its non-interactive counterpart in a psychophysics experiment where 20 medical imaging experts were asked to perform a reaching/targeting task while visualizing a 3D volume of angiographic data. The study showed a significant improvement of the appreciation of local vascular structure when using dynamic techniques, while not having a negative impact on the appreciation of the global structure and only a marginal impact on the execution speed. A qualitative evaluation of the different techniques showed a preference for dynamic chroma-depth in accordance with the objective metrics but a discrepancy between objective and subjective measures for dynamic aerial perspective and shading.",
"normalizedTitle": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes",
"notes": null,
"notesType": null,
"pages": "2247-2257",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Interaction Driven Enhancement of Depth Perception in Angiographic Volumes",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"User interaction",
"METHOD"
],
[
"diagnosis and treatment",
"APPLICATION"
],
[
"3D medical images",
"DATA"
],
[
"specialized environment",
"APPLICATION"
],
[
"strict sterility rules",
"APPLICATION"
],
[
"operating room",
"APPLICATION"
],
[
"physical con",
"METHOD"
],
[
"angiographic volumes",
"DATA"
],
[
"surgical pointer",
"METHOD"
],
[
"navigation system",
"METHOD"
],
[
"intraoperative exploration",
"APPLICATION"
],
[
"interactive rendering techniques",
"METHOD"
],
[
"psychophysics experiment",
"EVALUATION"
],
[
"angiographic data",
"DATA"
],
[
"3D volume",
"DATA"
],
[
"reaching/targeting task",
"APPLICATION"
],
[
"medical imaging experts",
"APPLICATION"
],
[
"local vascular structure",
"DATA"
],
[
"global structure",
"DATA"
],
[
"marginal impact",
"EVALUATION"
],
[
"dynamic techniques",
"METHOD"
],
[
"impact",
"EVALUATION"
],
[
"execution speed",
"EVALUATION"
],
[
"dynamic chroma-depth",
"EVALUATION"
],
[
"subjective measures",
"EVALUATION"
],
[
"shading.",
"EVALUATION"
],
[
"objective metrics",
"EVALUATION"
],
[
"qualitative evaluation",
"EVALUATION"
],
[
"dynamic aerial perspective",
"VISUALIZATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cis/2009/3931a160/12OmNAGNCaX",
"doi": null,
"id": "proceedings/cis/2009/3931/1/3931a160",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cis/2009/3931/1",
"title": "2009 International Conference on Computational Intelligence and Security"
},
"title": "Extracting the Coronary Artery in Angiographic Image Based on à Trous Wavelet of Rotary Gaussian with Adaptive Space Coefficient"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cbms/1996/00507125/12OmNrHB1Vm",
"doi": null,
"id": "proceedings/cbms/1996/7441/0/00507125",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cbms/1996/7441/0",
"title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems"
},
"title": "The Fourier adaptive smoothness constraint for computing optical flow on sequences of angiographic images"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cbms/1996/00507121/12OmNwFid1h",
"doi": null,
"id": "proceedings/cbms/1996/7441/0/00507121",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cbms/1996/7441/0",
"title": "Proceedings Ninth IEEE Symposium on Computer-Based Medical Systems"
},
"title": "A method for automatically detecting the systole and diastole phases in sequences of angiographic images"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vbc/1990/00109321/12OmNxy4N2N",
"doi": null,
"id": "proceedings/vbc/1990/2039/0/00109321",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vbc/1990/2039/0",
"title": "[1990] Proceedings of the First Conference on Visualization in Biomedical Computing"
},
"title": "Coronary vasculature visualization from limited angiographic views"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cic/1989/00130584/12OmNy2ah21",
"doi": null,
"id": "proceedings/cic/1989/2114/0/00130584",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cic/1989/2114/0",
"title": "Proceedings Computers in Cardiology"
},
"title": "Computation of functional angiographic images with the Hartley transform"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/bmei/2008/3118a341/12OmNzvhvKm",
"doi": null,
"id": "proceedings/bmei/2008/3118/1/3118a341",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/bmei/2008/3118/1",
"title": "2008 International Conference on Biomedical Engineering and Informatics (BMEI 2008)"
},
"title": "Perception-aware Depth Cueing for Illustrative Vascular Visualization"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2006/05/v1117/13rRUygT7y1",
"doi": null,
"id": "trans/tg/2006/05/v1117",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Enhancing Depth Perception in Translucent Volumes"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/bibm/2018/08621540/17D45X0yjUO",
"doi": null,
"id": "proceedings/bibm/2018/5488/0/08621540",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)"
},
"title": "Inter/Intra-Constraints Optimization for Fast Vessel Enhancement in X-ray Angiographic Image Sequence"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a111/1LxfpGyhNcY",
"doi": null,
"id": "proceedings/iccgiv/2022/9250/0/925000a111",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)"
},
"title": "Coronary Artery Segmentation from X-ray Angiographic Images using Width-aware U-Net"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a190/1cdOB3HCeTm",
"doi": null,
"id": "proceedings/acit-csii-bcd/2017/3302/0/3302a190",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/acit-csii-bcd/2017/3302/0",
"title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)"
},
"title": "Depth Recognition in 3D Translucent Stereoscopic Imaging of Medical Volumes by Means of a Glasses-Free 3D Display"
}
],
"webExtras": [
{
"__typename": "WebExtraType",
"extension": "mp4",
"id": "1js2Esj96ak",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08565948s1-interactive_rendering_video.mp4",
"name": "ttg202006-08565948s1-interactive_rendering_video.mp4",
"size": "28.1 MB"
}
]
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WB0qbp",
"fno": "08554159"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45Wda7ec",
"fno": "08565948"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway",
"fullName": "Dar'ya Guarnera",
"givenName": "Dar'ya",
"surname": "Guarnera"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway",
"fullName": "Giuseppe Claudio Guarnera",
"givenName": "Giuseppe Claudio",
"surname": "Guarnera"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany",
"fullName": "Matteo Toscani",
"givenName": "Matteo",
"surname": "Toscani"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Pismo Software Ltd., Oxford, United Kingdom",
"fullName": "Mashhuda Glencross",
"givenName": "Mashhuda",
"surname": "Glencross"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Computer Science, Loughborough University, Loughborough, United Kingdom",
"fullName": "Baihua Li",
"givenName": "Baihua",
"surname": "Li"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway",
"fullName": "Jon Yngve Hardeberg",
"givenName": "Jon Yngve",
"surname": "Hardeberg"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany",
"fullName": "Karl R. Gegenfurtner",
"givenName": "Karl R.",
"surname": "Gegenfurtner"
}
],
"doi": "10.1109/TVCG.2018.2886877",
"fno": "08576679",
"hasPdf": true,
"id": "17D45XreC6e",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Image Processing",
"Rendering Computer Graphics",
"Solid Modelling",
"Visually Faithful Remapping",
"Remapped Renderings",
"Software Packages",
"Cross Renderer Analytical BRDF Parameter Remapping",
"BRDF Remapping Technique",
"BRDF Difference Probe",
"Digital 3 D Prototyping",
"Rendering Computer Graphics",
"Computational Modeling",
"Lighting",
"Measurement",
"Probes",
"Visualization",
"Optimization",
"BRDF",
"SVBRDF",
"Perceptual Validation",
"Virtual Materials",
"Surface Perception",
"Parameter Remapping"
],
"normalizedAbstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.",
"normalizedTitle": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping",
"notes": null,
"notesType": null,
"pages": "2258-2272",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"Material appearance",
"VISUALIZATION"
],
[
"BRDF implementation",
"METHOD"
],
[
"render",
"METHOD"
],
[
"rendered objects",
"DATA"
],
[
"digital 3D prototyping and design",
"APPLICATION"
],
[
"material parameters",
"DATA"
],
[
"reference image",
"DATA"
],
[
"BRDF models",
"METHOD"
],
[
"source material model",
"DATA"
],
[
"BRDF remapping technique",
"METHOD"
],
[
"mapping (BRDF Difference Probe)",
"METHOD"
],
[
"four user studies",
"EVALUATION"
],
[
"quantitative analysis",
"EVALUATION"
],
[
"visually faithful remapping",
"EVALUATION"
],
[
"remapping framework",
"METHOD"
],
[
"psychometric scaling experiments",
"EVALUATION"
],
[
"analytical BRDF",
"DATA"
],
[
"source model",
"METHOD"
],
[
"remapped renderings",
"VISUALIZATION"
],
[
"phenomenological model",
"METHOD"
],
[
"physically-based one",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/wmsvm/2010/05558360/12OmNARiM3T",
"doi": null,
"id": "proceedings/wmsvm/2010/7077/0/05558360",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/wmsvm/2010/7077/0",
"title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)"
},
"title": "Modeling and Editing Isotropic BRDF"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/icpr/2014/5209c047/12OmNqNXEsZ",
"doi": null,
"id": "proceedings/icpr/2014/5209/0/5209c047",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)"
},
"title": "Effective Acquisition of Dense Anisotropic BRDF"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/nswctc/2010/4011a332/12OmNyvGynS",
"doi": null,
"id": "proceedings/nswctc/2010/4011/1/4011a332",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/nswctc/2010/4011/1",
"title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on"
},
"title": "The Analysis of Global Illumination Rendering Based on BRDF"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iccv/2015/8391d559/12OmNzVoBvI",
"doi": null,
"id": "proceedings/iccv/2015/8391/0/8391d559",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)"
},
"title": "A Gaussian Process Latent Variable Model for BRDF Inference"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2012/11/ttg2012111824/13rRUwjGoFZ",
"doi": null,
"id": "trans/tg/2012/11/ttg2012111824",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Rational BRDF"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K",
"doi": null,
"id": "trans/tg/2023/04/09678000",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2020/716800f416/1m3nYbnokEM",
"doi": null,
"id": "proceedings/cvpr/2020/7168/0/716800f416",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "Neural Voxel Renderer: Learning an Accurate and Controllable Rendering Tool"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2022/04/09203787/1nkyY8W8j1m",
"doi": null,
"id": "trans/tg/2022/04/09203787",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Learning-Based Inverse Bi-Scale Material Fitting From Tabular BRDFs"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2022/12/09444888/1u51y8PQCMU",
"doi": null,
"id": "trans/tg/2022/12/09444888",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Estimating Homogeneous Data-Driven BRDF Parameters From a Reflectance Map Under Known Natural Lighting"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tp/2022/12/09623493/1yJT7tLzbi0",
"doi": null,
"id": "trans/tp/2022/12/09623493",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence"
},
"title": "Invertible Neural BRDF for Object Inverse Rendering"
}
],
"webExtras": [
{
"__typename": "WebExtraType",
"extension": "pdf",
"id": "1js2FLKT2a4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08576679s1-supplemental_material.pdf",
"name": "ttg202006-08576679s1-supplemental_material.pdf",
"size": "54.2 MB"
}
]
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WIXbPb",
"fno": "08554186"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45XreC6e",
"fno": "08576679"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Texas A&M University, College Station, TX, USA",
"fullName": "Kasra Moghadam",
"givenName": "Kasra",
"surname": "Moghadam"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Texas A&M University, College Station, TX, USA",
"fullName": "Colin Banigan",
"givenName": "Colin",
"surname": "Banigan"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of Florida, Gainesville, FL, USA",
"fullName": "Eric D. Ragan",
"givenName": "Eric D.",
"surname": "Ragan"
}
],
"doi": "10.1109/TVCG.2018.2884468",
"fno": "08554159",
"hasPdf": true,
"id": "17D45WB0qbp",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Computer Animation",
"Virtual Reality",
"Automated Viewpoint Changes",
"Head Tracked Virtual Reality",
"Immersive Virtual Reality",
"Scene Transitions",
"Rotational Changes",
"Translational Changes",
"Viewpoint Transitions",
"Rotational Viewpoint Changes",
"Animated Technique",
"Rotational Transitions",
"Scene Changes",
"Instant Teleportations",
"Virtual Scene",
"Spatial Awareness",
"Visual Adjustments",
"Pulsed Interpolation",
"Animated Interpolation",
"Teleportation",
"Transition Techniques",
"Interactive User Control",
"Teleportation",
"Three Dimensional Displays",
"Legged Locomotion",
"Tracking",
"Space Exploration",
"Motion Pictures",
"Virtual Reality",
"Animation",
"Virtual Reality",
"View Transitions",
"Scene Transitions",
"Travel",
"Immersive Cinema",
"3 D Movies",
"Teleportation",
"Navigation",
"Sickness",
"Spatial Orientation",
"Spatial Awareness"
],
"normalizedAbstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.",
"normalizedTitle": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"notes": null,
"notesType": null,
"pages": "2273-2287",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"viewing and travel techniques",
"METHOD"
],
[
"immersive virtual reality",
"APPLICATION"
],
[
"3D environments",
"DATA"
],
[
"head-tracked virtual reality",
"APPLICATION"
],
[
"interactive user control",
"METHOD"
],
[
"automated viewpoint changes",
"METHOD"
],
[
"viewpoint changes",
"EVALUATION"
],
[
"animated interpolation",
"METHOD"
],
[
"pulsed interpolation",
"METHOD"
],
[
"visual adjustments",
"VISUALIZATION"
],
[
"techniques (teleportation",
"METHOD"
],
[
"spatial awareness",
"EVALUATION"
],
[
"dynamic objects",
"DATA"
],
[
"virtual scene",
"APPLICATION"
],
[
"scene changes",
"DATA"
],
[
"instant teleportations",
"APPLICATION"
],
[
"rotational transitions",
"METHOD"
],
[
"translational movements",
"METHOD"
],
[
"rotational viewpoint changes",
"APPLICATION"
],
[
"object",
"DATA"
],
[
"animated technique",
"METHOD"
],
[
"spatial awareness",
"EVALUATION"
],
[
"animated interpolations",
"METHOD"
],
[
"viewpoint transition",
"DATA"
],
[
"translational",
"METHOD"
],
[
"rotational changes",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/robot/1991/00131936/12OmNAS9zt7",
"doi": null,
"id": "proceedings/robot/1991/2163/0/00131936",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation"
},
"title": "Biped gait transitions"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vr/2017/07892316/12OmNCzb9vr",
"doi": null,
"id": "proceedings/vr/2017/6647/0/07892316",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)"
},
"title": "An exploration of input conditions for virtual teleportation"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iciev/2016/07760005/12OmNwtEEP6",
"doi": null,
"id": "proceedings/iciev/2016/1269/0/07760005",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)"
},
"title": "Random forests based recognition of human activities and postural transitions on smartphone"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6",
"doi": null,
"id": "proceedings/vr/2017/6647/0/07892386",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)"
},
"title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2018/09/08031015/13rRUB7a117",
"doi": null,
"id": "trans/tg/2018/09/08031015",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "A Vector Field Design Approach to Animated Transitions"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2020/05/08998297/1hrXhk9mu9W",
"doi": null,
"id": "trans/tg/2020/05/08998297",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vrw/2020/09090560/1jIxzjmEoeY",
"doi": null,
"id": "proceedings/vrw/2020/6532/0/09090560",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)"
},
"title": "Either Give Me a Reason to Stand or an Opportunity to Sit in VR"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG",
"doi": null,
"id": "proceedings/ismar/2020/8508/0/850800a608",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)"
},
"title": "Walking and Teleportation in Wide-area Virtual Reality Experiences"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o",
"doi": null,
"id": "trans/tg/2022/09/09332290",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere",
"doi": null,
"id": "proceedings/vrw/2021/4057/0/405700a480",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)"
},
"title": "Analysis of Positional Tracking Space Usage when using Teleportation"
}
],
"webExtras": [
{
"__typename": "WebExtraType",
"extension": "mp4",
"id": "1js2Mf0SAgw",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08554159s1-transitions-examples.mp4",
"name": "ttg202006-08554159s1-transitions-examples.mp4",
"size": "5.8 MB"
}
]
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45VsBU70",
"fno": "08543848"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WB0qbp",
"fno": "08554159"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, University of Southern California, Los Angels, CA, USA",
"fullName": "Hikaru Ibayashi",
"givenName": "Hikaru",
"surname": "Ibayashi"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Visual Computing Group, Institute of Science and Technology Austria, Klosterneuburg, Austria",
"fullName": "Chris Wojtan",
"givenName": "Chris",
"surname": "Wojtan"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Technische Universität München, München, Germany",
"fullName": "Nils Thuerey",
"givenName": "Nils",
"surname": "Thuerey"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "Department of Computer Science, University of Tokyo, Hongo, Bunkyo-ku, Tokyo, Japan",
"fullName": "Takeo Igarashi",
"givenName": "Takeo",
"surname": "Igarashi"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "National Institute of Informatics, Chiyoda-ku, Tokyo, Japan",
"fullName": "Ryoichi Ando",
"givenName": "Ryoichi",
"surname": "Ando"
}
],
"doi": "10.1109/TVCG.2018.2883628",
"fno": "08554186",
"hasPdf": true,
"id": "17D45WIXbPb",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Cache Storage",
"Deformation",
"Flow Simulation",
"Mesh Generation",
"Deforming Grids",
"Adaptive Liquid Simulation",
"Dynamically Warping Grids",
"Controllable Spatial Adaptivity",
"Fixed Memory Access Pattern",
"Regular Grids",
"Inconsistent Memory Access",
"Adaptive Fluid Simulations",
"Unstructured Grids",
"Adaptation Models",
"Strain",
"Liquids",
"Computational Modeling",
"Streaming Media",
"Computer Graphics",
"Animation",
"Computer Graphics",
"Physics Based Animation",
"Fluid Simulation",
"Liquid",
"Adaptivity",
"Curvilinear Grids"
],
"normalizedAbstract": "We introduce dynamically warping grids for adaptive liquid simulation. Our primary contributions are a strategy for dynamically deforming regular grids over the course of a simulation and a method for efficiently utilizing these deforming grids for liquid simulation. Prior work has shown that unstructured grids are very effective for adaptive fluid simulations. However, unstructured grids often lead to complicated implementations and a poor cache hit rate due to inconsistent memory access. Regular grids, on the other hand, provide a fast, fixed memory access pattern and straightforward implementation. Our method combines the advantages of both: we leverage the simplicity of regular grids while still achieving practical and controllable spatial adaptivity. We demonstrate that our method enables adaptive simulations that are fast, flexible, and robust to null-space issues. At the same time, our method is simple to implement and takes advantage of existing highly-tuned algorithms.",
"normalizedTitle": "Simulating Liquids on Dynamically Warping Grids",
"notes": null,
"notesType": null,
"pages": "2288-2302",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Simulating Liquids on Dynamically Warping Grids",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"dynamically warping grids",
"METHOD"
],
[
"adaptive liquid simulation",
"APPLICATION"
],
[
"regular grids",
"DATA"
],
[
"deforming grids",
"DATA"
],
[
"liquid simulation",
"APPLICATION"
],
[
"adaptive fluid simulations",
"APPLICATION"
],
[
"unstructured grids",
"DATA"
],
[
"poor cache hit rate",
"EVALUATION"
],
[
"inconsistent memory access",
"METHOD"
],
[
"unstructured grids",
"DATA"
],
[
"fixed memory access pattern",
"METHOD"
],
[
"Regular grids",
"DATA"
],
[
"regular grids",
"DATA"
],
[
"spatial adaptivity",
"METHOD"
],
[
"null-space issues",
"APPLICATION"
],
[
"adaptive simulations",
"APPLICATION"
],
[
"highly-tuned algorithms",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ldav/2017/08231854/12OmNAKcNJN",
"doi": null,
"id": "proceedings/ldav/2017/0617/0/08231854",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ldav/2017/0617/0",
"title": "2017 IEEE 7th Symposium on Large Data Analysis and Visualization (LDAV)"
},
"title": "Parallel multi-layer ghost cell generation for distributed unstructured grids"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870248/12OmNC2OSMK",
"doi": null,
"id": "proceedings/ieee-vis/1995/7187/0/71870248",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE"
},
"title": "Vector Plots for Irregular Grids"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/visual/1994/00346313/12OmNyYDDGc",
"doi": null,
"id": "proceedings/visual/1994/6627/0/00346313",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/visual/1994/6627/0",
"title": "Proceedings Visualization '94"
},
"title": "Visualizing flow over curvilinear grid surfaces using line integral convolution"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/sbgames/2011/4648a148/12OmNzb7Zrb",
"doi": null,
"id": "proceedings/sbgames/2011/4648/0/4648a148",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment"
},
"title": "Fluid Animation on Arbitrarily-Shaped Structured Grids"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj",
"doi": null,
"id": "trans/tg/2014/10/06747389",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3801/1H0LsB06x7q",
"doi": null,
"id": "proceedings/cvpr/2022/6946/0/694600n3801",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "Image Based Reconstruction of Liquids from 2D Surface Detections"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "17D45VUZMU0",
"fno": "08580399"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45WIXbPb",
"fno": "08554186"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"fullName": "Yazhen Yuan",
"givenName": "Yazhen",
"surname": "Yuan"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"fullName": "Rui Wang",
"givenName": "Rui",
"surname": "Wang"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "State key laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"fullName": "Hujun Bao",
"givenName": "Hujun",
"surname": "Bao"
}
],
"doi": "10.1109/TVCG.2018.2883314",
"fno": "08543848",
"hasPdf": true,
"id": "17D45VsBU70",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Computational Geometry",
"Pipeline Processing",
"Rendering Computer Graphics",
"Sorting",
"Stereo Image Processing",
"Shading Cost",
"Tile Pair Based Adaptive Multirate Stereo Shading",
"Memory Bandwidth",
"Standard Sort Middle Shading",
"Tile Based Screen Space Cache",
"Stereo Views",
"Tile Pairs",
"Rasterization Algorithm",
"Automatic Shading Reuse",
"Adaptive Shading Rates",
"Stereo Shading Architecture",
"Rendering Computer Graphics",
"Geometry",
"Pipelines",
"Bandwidth",
"Computer Architecture",
"Signal Resolution",
"Stereo Rendering",
"Tile Pair Based Rendering",
"Multi Rate Shading"
],
"normalizedAbstract": "This work proposes a new stereo shading architecture that enables adaptive shading rates and automatic shading reuse among triangles and between two views. The proposed pipeline presents several novel features. First, the present sort-middle/bin shading is extended to tile pair-based shading to rasterize and shade pixels at two views simultaneously. A new rasterization algorithm utilizing epipolar geometry is then proposed to schedule tile pairs and perform rasterization at stereo views efficiently. Second, this work presents an adaptive multi-rate shading framework to compute shading on pixels at different rates. A novel tile-based screen space cache and a new cache reuse shader are proposed to perform such multi-rate shading across triangles and views. The results show that the newly proposed method outperforms the standard sort-middle shading and the state-of-the-art multi-rate shading by achieving considerably lower shading cost and memory bandwidth.",
"normalizedTitle": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading",
"notes": null,
"notesType": null,
"pages": "2303-2314",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Tile Pair-Based Adaptive Multi-Rate Stereo Shading",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"automatic shading reuse",
"APPLICATION"
],
[
"adaptive shading rates",
"APPLICATION"
],
[
"stereo shading architecture",
"METHOD"
],
[
"tile pair-based shading",
"METHOD"
],
[
"sort-middle/bin shading",
"METHOD"
],
[
"rasterization algorithm",
"METHOD"
],
[
"epipolar geometry",
"METHOD"
],
[
"rasterization",
"APPLICATION"
],
[
"views",
"APPLICATION"
],
[
"tile pairs",
"METHOD"
],
[
"pixel",
"METHOD"
],
[
"multi-rate shading framework",
"METHOD"
],
[
"views",
"DATA"
],
[
"multi-rate shading",
"APPLICATION"
],
[
"triangle",
"DATA"
],
[
"cache reuse shader",
"METHOD"
],
[
"tile-based screen space cache",
"METHOD"
],
[
"memory bandwidth",
"EVALUATION"
],
[
"lower shading cost",
"EVALUATION"
],
[
"multi-rate shading",
"METHOD"
],
[
"sort-middle shading",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/hcs/2015/07477462/12OmNAS9zPX",
"doi": null,
"id": "proceedings/hcs/2015/8885/0/07477462",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/hcs/2015/8885/0",
"title": "2015 IEEE Hot Chips 27 Symposium (HCS)"
},
"title": "The ARM® Mali-T880 Mobile GPU"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cgi/1996/75180038/12OmNwBT1ig",
"doi": null,
"id": "proceedings/cgi/1996/7518/0/75180038",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cgi/1996/7518/0",
"title": "Computer Graphics International Conference"
},
"title": "Improved Specular Highlights With Adaptive Shading"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cmpcon/1992/00186697/12OmNxGSm2u",
"doi": null,
"id": "proceedings/cmpcon/1992/2655/0/00186697",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cmpcon/1992/2655/0",
"title": "COMPCON Spring 1992"
},
"title": "Scalable graphics enhancements for PA-RISC workstations"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/iccv/2015/8391c030/12OmNyLiuzk",
"doi": null,
"id": "proceedings/iccv/2015/8391/0/8391c030",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)"
},
"title": "Registering Images to Untextured Geometry Using Average Shading Gradients"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/searis/2014/07152799/12OmNzA6GLj",
"doi": null,
"id": "proceedings/searis/2014/9955/0/07152799",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/searis/2014/9955/0",
"title": "2014 IEEE 7th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)"
},
"title": "guacamole - An extensible scene graph and rendering framework based on deferred shading"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2016/01/07194844/13rRUB7a1fT",
"doi": null,
"id": "trans/tg/2016/01/07194844",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Anisotropic Ambient Volume Shading"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tp/2017/03/07452621/13rRUxYIN5A",
"doi": null,
"id": "trans/tp/2017/03/07452621",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence"
},
"title": "Shape Estimation from Shading, Defocus, and Correspondence Using Light-Field Angular Coherence"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2016/10/07501796/13rRUyv53Fw",
"doi": null,
"id": "trans/tg/2016/10/07501796",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Aggregate G-Buffer Anti-Aliasing -Extended Version-"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/hpca/2019/144400a623/18M7PSwaQkE",
"doi": null,
"id": "proceedings/hpca/2019/1444/0/144400a623",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/hpca/2019/1444/0",
"title": "2019 IEEE International Symposium on High Performance Computer Architecture (HPCA)"
},
"title": "Rendering Elimination: Early Discard of Redundant Tiles in the Graphics Pipeline"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/cvpr/2022/694600g177/1H0NScvhUC4",
"doi": null,
"id": "proceedings/cvpr/2022/6946/0/694600g177",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"
},
"title": "Multi-View Mesh Reconstruction with Neural Deferred Shading"
}
],
"webExtras": [
{
"__typename": "WebExtraType",
"extension": "mp4",
"id": "1js2HXZuFJS",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_video.mp4",
"name": "ttg202006-08543848s1-supplemental_video.mp4",
"size": "96.1 MB"
},
{
"__typename": "WebExtraType",
"extension": "pdf",
"id": "1js2MuR0GjK",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08543848s1-supplemental_document.pdf",
"name": "ttg202006-08543848s1-supplemental_document.pdf",
"size": "52.1 MB"
}
]
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": null,
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "17D45VsBU70",
"fno": "08543848"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": "University of Nevada, Reno, NV, USA",
"fullName": "Majed Al Zayer",
"givenName": "Majed",
"surname": "Al Zayer"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of Nevada, Reno, NV, USA",
"fullName": "Paul MacNeilage",
"givenName": "Paul",
"surname": "MacNeilage"
},
{
"__typename": "ArticleAuthorType",
"affiliation": "University of Nevada, Reno, NV, USA",
"fullName": "Eelke Folmer",
"givenName": "Eelke",
"surname": "Folmer"
}
],
"doi": "10.1109/TVCG.2018.2887379",
"fno": "08580399",
"hasPdf": true,
"id": "17D45VUZMU0",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "06",
"keywords": [
"Virtual Reality",
"Navigation",
"Virtual Locomotion Techniques",
"VLT",
"VR Sickness",
"VR Locomotion",
"Navigation",
"Legged Locomotion",
"Task Analysis",
"Monitoring",
"Visualization",
"Three Dimensional Displays",
"Space Exploration",
"Virtual Reality",
"Virtual Locomotion",
"Virtual Navigation",
"Survey",
"Taxonomy"
],
"normalizedAbstract": "Virtual reality (VR) has enjoyed significant popularity in recent years. Where navigation has been a fundamental appeal of 3D applications for decades, facilitating this in VR has been quite a challenge. Over the past decades, various virtual locomotion techniques (VLTs) have been developed that aim to offer natural, usable and efficient ways of navigating VR without inducing VR sickness. Several studies of these techniques have been conducted in order to evaluate their performance in various study conditions and virtual contexts. Taxonomies have also been proposed to either place similar techniques in meaningful categories or decompose them to their underlying design components. In this survey, we aim to aggregate and understand the current state of the art of VR locomotion research and discuss the design implications of VLTs in terms of strengths, weaknesses and applicability.",
"normalizedTitle": "Virtual Locomotion: A Survey",
"notes": null,
"notesType": null,
"pages": "2315-2334",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"title": "Virtual Locomotion: A Survey",
"year": "2020"
},
"articleVideos": [],
"entities": [
[
"Virtual reality (VR)",
"APPLICATION"
],
[
"3D applications",
"APPLICATION"
],
[
"VR",
"APPLICATION"
],
[
"virtual locomotion techniques (VLTs)",
"METHOD"
],
[
"VR sickness",
"EVALUATION"
],
[
"VR",
"APPLICATION"
],
[
"virtual contexts",
"EVALUATION"
],
[
"Tax",
"METHOD"
],
[
"design component",
"METHOD"
],
[
"VR locomotion research",
"APPLICATION"
],
[
"VLTs",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNzmclo6",
"idPrefix": "tg",
"issueNum": "06",
"label": "June",
"pubType": "journal",
"title": "June",
"volume": "26",
"year": "2020"
},
"recommendedArticles": [
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU",
"doi": null,
"id": "proceedings/vr/2017/6647/0/07892348",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)"
},
"title": "Steering locomotion by vestibular perturbation in room-scale VR"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/3dui/2012/06184180/12OmNxdDFLw",
"doi": null,
"id": "proceedings/3dui/2012/1204/0/06184180",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/3dui/2012/1204/0",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)"
},
"title": "From virtual to actual mobility: Assessing the benefits of active locomotion through an immersive virtual environment using a motorized wheelchair"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"doi": null,
"id": "proceedings/vr/2018/3365/0/08446130",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)"
},
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2018/07/07946183/13rRUEgs2C2",
"doi": null,
"id": "trans/tg/2018/07/07946183",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Walking with Virtual People: Evaluation of Locomotion Interfaces in Dynamic Environments"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vs-games/2018/08493432/14tNJmUlJD4",
"doi": null,
"id": "proceedings/vs-games/2018/7123/0/08493432",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)"
},
"title": "LUTE: A Locomotion Usability Test Environmentfor Virtual Reality"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg",
"doi": null,
"id": "trans/tg/2022/05/09714054",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"doi": null,
"id": "trans/tg/5555/01/09744001",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/vrw/2022/840200a696/1CJeXaYYtd6",
"doi": null,
"id": "proceedings/vrw/2022/8402/0/840200a696",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)"
},
"title": "Seamless-walk: Novel Natural Virtual Reality Locomotion Method with a High-Resolution Tactile Sensor"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/journal/tg/5555/01/09761724/1CKMkLCKOSk",
"doi": null,
"id": "trans/tg/5555/01/09761724",
"parentPublication": {
"__typename": "ParentPublication",
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics"
},
"title": "Effects of Transfer Functions and Body Parts on Body-centric Locomotion in Virtual Reality"
},
{
"__typename": "RecommendedArticleType",
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"doi": null,
"id": "proceedings/ismar/2020/8508/0/850800a452",
"parentPublication": {
"__typename": "ParentPublication",
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)"
},
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality"
}
],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUNvgyW9",
"fno": "v0021"
},
"previous": null
},
"article": {
"__typename": "ArticleType",
"abstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Kiriakos N. Kutulakos",
"givenName": "Kiriakos N.",
"surname": "Kutulakos"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "James R. Vallino",
"givenName": "James R.",
"surname": "Vallino"
}
],
"doi": "10.1109/2945.675647",
"fno": "v0001",
"hasPdf": true,
"id": "13rRUwgQpDb",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "01",
"keywords": [
"Augmented Reality",
"Real Time Computer Vision",
"Calibration",
"Registration",
"Affine Representations",
"Feature Tracking",
"3 D Interaction Techniques"
],
"normalizedAbstract": "Abstract—Camera calibration and the acquisition of Euclidean 3D measurements have so far been considered necessary requirements for overlaying three-dimensional graphical objects with live video. In this article, we describe a new approach to video-based augmented reality that avoids both requirements: It does not use any metric information about the calibration parameters of the camera or the 3D locations and dimensions of the environment's objects. The only requirement is the ability to track across frames at least four fiducial points that are specified by the user during system initialization and whose world coordinates are unknown.Our approach is based on the following observation: Given a set of four or more noncoplanar 3D points, the projection of all points in the set can be computed as a linear combination of the projections of just four of the points. We exploit this observation by 1) tracking regions and color fiducial points at frame rate, and 2) representing virtual objects in a non-Euclidean, affine frame of reference that allows their projection to be computed as a linear combination of the projection of the fiducial points. Experimental results on two augmented reality systems, one monitor-based and one head-mounted, demonstrate that the approach is readily implementable, imposes minimal computational and hardware requirements, and generates real-time and accurate video overlays even when the camera parameters vary dynamically.",
"normalizedTitle": "Calibration-Free Augmented Reality",
"notes": null,
"notesType": null,
"pages": "1-20",
"pubDate": "1998-01-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"title": "Calibration-Free Augmented Reality",
"year": "1998"
},
"articleVideos": [],
"entities": [
[
"Euclidean 3D measurements",
"DATA"
],
[
"live video",
"DATA"
],
[
"Camera calibration",
"APPLICATION"
],
[
"three-dimensional graphical objects",
"DATA"
],
[
"",
"DATA"
],
[
"calibration parameters",
"DATA"
],
[
"video-based augmented reality",
"APPLICATION"
],
[
"object",
"DATA"
],
[
"metric information",
"DATA"
],
[
"3D locations and dimensions",
"DATA"
],
[
"linear combination",
"METHOD"
],
[
"noncoplanar 3D points",
"DATA"
],
[
"system initialization",
"APPLICATION"
],
[
"points",
"METHOD"
],
[
"fiducial points",
"DATA"
],
[
"world",
"DATA"
],
[
"linear combination",
"METHOD"
],
[
"virtual objects",
"DATA"
],
[
"projection",
"DATA"
],
[
"frame of reference",
"METHOD"
],
[
"fiducial points",
"DATA"
],
[
"camera parameters",
"DATA"
],
[
"head-mount",
"METHOD"
],
[
"accurate video overlays",
"VISUALIZATION"
],
[
"minimal computational and hardware requirements",
"EVALUATION"
],
[
"augmented reality systems",
"APPLICATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNAXPyfa",
"idPrefix": "tg",
"issueNum": "01",
"label": "January-March",
"pubType": "journal",
"title": "January-March",
"volume": "4",
"year": "1998"
},
"recommendedArticles": [],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUwwaKsU",
"fno": "v0037"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUwgQpDb",
"fno": "v0001"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "James T. Klosowski",
"givenName": "James T.",
"surname": "Klosowski"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Martin Held",
"givenName": "Martin",
"surname": "Held"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Joseph S.B. Mitchell",
"givenName": "Joseph S.B.",
"surname": "Mitchell"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Henry Sowizral",
"givenName": "Henry",
"surname": "Sowizral"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Karel Zikan",
"givenName": "Karel",
"surname": "Zikan"
}
],
"doi": "10.1109/2945.675649",
"fno": "v0021",
"hasPdf": true,
"id": "13rRUNvgyW9",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "01",
"keywords": [
"Collision Detection",
"Intersection Searching",
"Bounding Volume Hierarchies",
"Discrete Orientation Polytopes",
"Bounding Boxes",
"Virtual Reality",
"Virtual Environments"
],
"normalizedAbstract": "Abstract—Collision detection is of paramount importance for many applications in computer graphics and visualization. Typically, the input to a collision detection algorithm is a large number of geometric objects comprising an environment, together with a set of objects moving within the environment. In addition to determining accurately the contacts that occur between pairs of objects, one needs also to do so at real-time rates. Applications such as haptic force-feedback can require over 1,000 collision queries per second.In this paper, we develop and analyze a method, based on bounding-volume hierarchies, for efficient collision detection for objects moving within highly complex environments. Our choice of bounding volume is to use a \"discrete orientation polytope\" (\"k-dop\"), a convex polytope whose facets are determined by halfspaces whose outward normals come from a small fixed set of k orientations. We compare a variety of methods for constructing hierarchies (\"BV-trees\") of bounding k-dops. Further, we propose algorithms for maintaining an effective BV-tree of k-dops for moving objects, as they rotate, and for performing fast collision detection using BV-trees of the moving objects and of the environment.Our algorithms have been implemented and tested. We provide experimental evidence showing that our approach yields substantially faster collision detection than previous methods.",
"normalizedTitle": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs",
"notes": null,
"notesType": null,
"pages": "21-36",
"pubDate": "1998-01-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"title": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs",
"year": "1998"
},
"articleVideos": [],
"entities": [
[
"Collision detection",
"METHOD"
],
[
"computer graphics",
"APPLICATION"
],
[
"collision detection algorithm",
"METHOD"
],
[
"geometric objects",
"DATA"
],
[
"real-time rates",
"METHOD"
],
[
"bounding-volume hierarchies",
"METHOD"
],
[
"efficient collision detection",
"APPLICATION"
],
[
"collision queries",
"APPLICATION"
],
[
"haptic force-feedback",
"APPLICATION"
],
[
"e\" (\"k-dop",
"METHOD"
],
[
"bounding volume",
"APPLICATION"
],
[
"k orientations",
"DATA"
],
[
"convex polytope",
"DATA"
],
[
"half",
"DATA"
],
[
"es (\"BV-trees",
"METHOD"
],
[
"dop",
"METHOD"
],
[
"fast collision detection",
"APPLICATION"
],
[
"BV-tree",
"METHOD"
],
[
"BV-trees",
"METHOD"
],
[
"experimental evidence",
"EVALUATION"
],
[
"faster collision detection",
"EVALUATION"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNAXPyfa",
"idPrefix": "tg",
"issueNum": "01",
"label": "January-March",
"pubType": "journal",
"title": "January-March",
"volume": "4",
"year": "1998"
},
"recommendedArticles": [],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUxly95q",
"fno": "v0055"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUNvgyW9",
"fno": "v0021"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Peter L. Williams",
"givenName": "Peter L.",
"surname": "Williams"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Nelson L. Max",
"givenName": "Nelson L.",
"surname": "Max"
},
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Clifford M. Stein",
"givenName": "Clifford M.",
"surname": "Stein"
}
],
"doi": "10.1109/2945.675650",
"fno": "v0037",
"hasPdf": true,
"id": "13rRUwwaKsU",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "01",
"keywords": [
"Volume Rendering",
"Unstructured Meshes",
"High Accuracy",
"Finite Element Method",
"Isosurfaces",
"Splatting",
"Cell Projection",
"Visibility Ordering",
"Depth Sorting"
],
"normalizedAbstract": "Abstract—This paper describes a volume rendering system for unstructured data, especially finite element data, that creates images with very high accuracy. The system will currently handle meshes whose cells are either linear or quadratic tetrahedra. Compromises or approximations are not introduced for the sake of efficiency. Whenever possible, exact mathematical solutions for the radiance integrals involved and for interpolation are used. The system will also handle meshes with mixed cell types: tetrahedra, bricks, prisms, wedges, and pyramids, but not with high accuracy. Accurate semitransparent shaded isosurfaces may be embedded in the volume rendering. For very small cells, subpixel accumulation by splatting is used to avoid sampling error. A revision to an existing accurate visibility ordering algorithm is described, which includes a correction and a method for dramatically increasing its efficiency. Finally, hardware assisted projection and compositing are extended from tetrahedra to arbitrary convex polyhedra.",
"normalizedTitle": "A High Accuracy Volume Renderer for Unstructured Data",
"notes": null,
"notesType": null,
"pages": "37-54",
"pubDate": "1998-01-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"title": "A High Accuracy Volume Renderer for Unstructured Data",
"year": "1998"
},
"articleVideos": [],
"entities": [
[
"unstructured data",
"DATA"
],
[
"volume rendering system",
"METHOD"
],
[
"finite element data",
"DATA"
],
[
"images",
"DATA"
],
[
"quadratic tetrahedra",
"DATA"
],
[
"meshes",
"DATA"
],
[
"radiance integrals",
"DATA"
],
[
"mathematical solutions",
"METHOD"
],
[
"me",
"DATA"
],
[
"types: tetrahedra",
"DATA"
],
[
"high accuracy",
"EVALUATION"
],
[
"bricks",
"DATA"
],
[
"pyramid",
"DATA"
],
[
"wedges",
"DATA"
],
[
"prisms",
"DATA"
],
[
"volume rendering",
"APPLICATION"
],
[
"semitransparent shaded isosurfaces",
"DATA"
],
[
"subpixel accumulation",
"METHOD"
],
[
"splatting",
"METHOD"
],
[
"sampling error",
"APPLICATION"
],
[
"accurate visibility ordering algorithm",
"METHOD"
],
[
"hardware assisted projection and compositing",
"METHOD"
],
[
"tetrahedra",
"METHOD"
],
[
"convex polyhedra",
"DATA"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNAXPyfa",
"idPrefix": "tg",
"issueNum": "01",
"label": "January-March",
"pubType": "journal",
"title": "January-March",
"volume": "4",
"year": "1998"
},
"recommendedArticles": [],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUB7a1fF",
"fno": "v0071"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUwwaKsU",
"fno": "v0037"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Fabrice Neyret",
"givenName": "Fabrice",
"surname": "Neyret"
}
],
"doi": "10.1109/2945.675652",
"fno": "v0055",
"hasPdf": true,
"id": "13rRUxly95q",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "01",
"keywords": [
"Volumetric Textures",
"Complex Geometry",
"Levels Of Detail"
],
"normalizedAbstract": "Abstract—Complex repetitive scenes containing forests, foliage, grass, hair, or fur, are challenging for common modeling and rendering tools. The amount of data, the tediousness of modeling and animation tasks, and the cost of realistic rendering have caused such kind of scene to see only limited use even in high-end productions. We describe here how the use of volumetric textures is well suited to such scenes. These primitives can greatly simplify modeling and animation tasks. More importantly, they can be very efficiently rendered using ray tracing with few aliasing artifacts. The main idea, initially introduced by Kajiya and Kay [9], is to represent a pattern of 3D geometry in a reference volume, that is tiled over an underlying surface much like a regular 2D texture. In our contribution, the mapping is independent of the mesh subdivision, the pattern can contain any kind of shape, and it is prefiltered at different scales as for MIP-mapping. Although the model encoding is volumetric, the rendering method differs greatly from traditional volume rendering: A volumetric texture only exists in the neighborhood of a surface, and the repeated instances (called texels) of the reference volume are spatially deformed. Furthermore, each voxel of the reference volume contains a key feature which controls the reflectance function that represents aggregate intravoxel geometry. This allows for ray-tracing of highly complex scenes with very few aliasing artifacts, using a single ray per pixel (for the part of the scene using the volumetric texture representation). The major technical considerations of our method lie in the ray-path determination and in the specification of the reflectance function.",
"normalizedTitle": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures",
"notes": null,
"notesType": null,
"pages": "55-70",
"pubDate": "1998-01-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"title": "Modeling, Animating, and Rendering Complex Scenes Using Volumetric Textures",
"year": "1998"
},
"articleVideos": [],
"entities": [
[
"fur",
"DATA"
],
[
"hair",
"DATA"
],
[
"repetitive scenes",
"DATA"
],
[
"modeling and rendering tools",
"METHOD"
],
[
"modeling and animation tasks",
"APPLICATION"
],
[
"realistic rendering",
"VISUALIZATION"
],
[
"high-end productions",
"APPLICATION"
],
[
"volumetric textures",
"DATA"
],
[
"modeling and animation tasks",
"APPLICATION"
],
[
"ray tracing",
"METHOD"
],
[
"reference volume",
"DATA"
],
[
"surface",
"DATA"
],
[
"3D geometry",
"DATA"
],
[
"2D texture",
"DATA"
],
[
"mesh subdivision",
"DATA"
],
[
"MIP-mapping",
"METHOD"
],
[
"te",
"DATA"
],
[
"surface",
"DATA"
],
[
"rendering method",
"METHOD"
],
[
"reference volume",
"DATA"
],
[
"traditional volume rendering",
"METHOD"
],
[
"volumetric texture",
"DATA"
],
[
"feature",
"DATA"
],
[
"voxel",
"DATA"
],
[
"e intravoxel geometry",
"METHOD"
],
[
"reference volume",
"DATA"
],
[
"reflectance function",
"METHOD"
],
[
"ing",
"EVALUATION"
],
[
"volumetric texture representation",
"VISUALIZATION"
],
[
"ray",
"DATA"
],
[
"ray-tracing",
"APPLICATION"
],
[
"ray-path determination",
"METHOD"
],
[
"reflectance function",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNAXPyfa",
"idPrefix": "tg",
"issueNum": "01",
"label": "January-March",
"pubType": "journal",
"title": "January-March",
"volume": "4",
"year": "1998"
},
"recommendedArticles": [],
"webExtras": []
} |
{
"adjacentArticles": {
"__typename": "AdjacentArticlesType",
"next": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUx0geuY",
"fno": "v0082"
},
"previous": {
"__typename": "AdjacentArticleType",
"articleId": "13rRUxly95q",
"fno": "v0055"
}
},
"article": {
"__typename": "ArticleType",
"abstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.",
"abstracts": [
{
"__typename": "ArticleAbstractType",
"abstractType": "Regular",
"content": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms."
}
],
"authors": [
{
"__typename": "ArticleAuthorType",
"affiliation": null,
"fullName": "Gershon Elber",
"givenName": "Gershon",
"surname": "Elber"
}
],
"doi": "10.1109/2945.675655",
"fno": "v0071",
"hasPdf": true,
"id": "13rRUB7a1fF",
"idPrefix": "tg",
"isOpenAccess": false,
"isbn": null,
"issn": "1077-2626",
"issueNum": "01",
"keywords": [
"Sketches",
"Illustrations",
"Line Drawings",
"Freeform Surfaces",
"NUR Bs",
"Implicit Forms",
"Surface Coverage",
"Printing"
],
"normalizedAbstract": "Abstract—A technique is presented for line art rendering of scenes composed of freeform surfaces. The line art that is created for parametric surfaces is practically intrinsic and is globally invariant to changes in the surface parameterization. This method is equally applicable for line art rendering of implicit forms, creating a unified line art rendering method for both parametric and implicit forms. This added flexibility exposes a new horizon of special, parameterization independent, line art effects. Moreover, the production of the line art illustrations can be combined with traditional rendering techniques such as transparency and texture mapping. Examples that demonstrate the capabilities of the proposed approach are presented for both the parametric and implicit forms.",
"normalizedTitle": "Line Art Illustrations of Parametric and Implicit Forms",
"notes": null,
"notesType": null,
"pages": "71-81",
"pubDate": "1998-01-01 00:00:00",
"pubType": "trans",
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"title": "Line Art Illustrations of Parametric and Implicit Forms",
"year": "1998"
},
"articleVideos": [],
"entities": [
[
"freeform surfaces",
"DATA"
],
[
"scene",
"DATA"
],
[
"line art rendering",
"APPLICATION"
],
[
"surface parameterization",
"METHOD"
],
[
"parametric surfaces",
"DATA"
],
[
"line art",
"VISUALIZATION"
],
[
"ified line art rendering method",
"METHOD"
],
[
"parametric",
"DATA"
],
[
"implicit forms",
"DATA"
],
[
"line art rendering",
"APPLICATION"
],
[
"line art effects",
"VISUALIZATION"
],
[
"traditional rendering techniques",
"METHOD"
],
[
"texture mapping",
"METHOD"
],
[
"transparency",
"METHOD"
],
[
"line art illustrations",
"VISUALIZATION"
],
[
"implicit forms",
"METHOD"
],
[
"parametric",
"METHOD"
]
],
"issue": {
"__typename": "PeriodicalIssue",
"downloadables": {
"__typename": "PeriodicalIssueDownloadablesType",
"hasCover": false
},
"id": "12OmNAXPyfa",
"idPrefix": "tg",
"issueNum": "01",
"label": "January-March",
"pubType": "journal",
"title": "January-March",
"volume": "4",
"year": "1998"
},
"recommendedArticles": [],
"webExtras": []
} |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 39