data
dict |
---|
{
"proceeding": {
"id": "12OmNx8wTfQ",
"title": "2014 12th IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"acronym": "euc",
"groupId": "1002596",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxxdZFo",
"doi": "10.1109/EUC.2014.49",
"title": "Mobile Augmented Reality System for Marine Navigation Assistance",
"normalizedTitle": "Mobile Augmented Reality System for Marine Navigation Assistance",
"abstract": "Augmented Reality devices are about to reach mainstream markets but applications have to meet user expectations in terms of usage and ergonomics. In this paper, we present a reallife outdoor AR application for marine navigation assistance that alleviates cognitive load issues (orientation between electronic navigational devices and bridge view) for vessels and recreational boats. First, we describe the current application and explain the requirements to draw relevant and meaningful objects. Secondly we present the software architecture of our pervasive system, which is compliant with different contexts and applications cases. Then, we detail our Marine Mobile Augmented Reality embedded System (MMARS). Finally, we present implementations on both Embedded system and smartphone.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality devices are about to reach mainstream markets but applications have to meet user expectations in terms of usage and ergonomics. In this paper, we present a reallife outdoor AR application for marine navigation assistance that alleviates cognitive load issues (orientation between electronic navigational devices and bridge view) for vessels and recreational boats. First, we describe the current application and explain the requirements to draw relevant and meaningful objects. Secondly we present the software architecture of our pervasive system, which is compliant with different contexts and applications cases. Then, we detail our Marine Mobile Augmented Reality embedded System (MMARS). Finally, we present implementations on both Embedded system and smartphone.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality devices are about to reach mainstream markets but applications have to meet user expectations in terms of usage and ergonomics. In this paper, we present a reallife outdoor AR application for marine navigation assistance that alleviates cognitive load issues (orientation between electronic navigational devices and bridge view) for vessels and recreational boats. First, we describe the current application and explain the requirements to draw relevant and meaningful objects. Secondly we present the software architecture of our pervasive system, which is compliant with different contexts and applications cases. Then, we detail our Marine Mobile Augmented Reality embedded System (MMARS). Finally, we present implementations on both Embedded system and smartphone.",
"fno": "5249a287",
"keywords": [
"Boats",
"Three Dimensional Displays",
"Global Positioning System",
"Glass",
"Cameras",
"Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Jean Christophe Morgere",
"givenName": "Jean Christophe",
"surname": "Morgere",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jean Philippe Diguet",
"givenName": "Jean Philippe",
"surname": "Diguet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Johann Laurent",
"givenName": "Johann",
"surname": "Laurent",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "euc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "287-292",
"year": "2014",
"issn": null,
"isbn": "978-0-7695-5249-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5249a281",
"articleId": "12OmNBrlPEI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5249a293",
"articleId": "12OmNBrlPzm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2012/4702/0/4702a728",
"title": "School of the Future: Using Augmented Reality for Contextual Information and Navigation in Academic Buildings",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a728/12OmNAkWvcI",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scc/2016/2628/0/2628a299",
"title": "XYZ Indoor Navigation through Augmented Reality: A Research in Progress",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2016/2628a299/12OmNBigFpq",
"parentPublication": {
"id": "proceedings/scc/2016/2628/0",
"title": "2016 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671782",
"title": "Augmented Reality binoculars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671782/12OmNCfSqH6",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a061",
"title": "Augmented Reality Visualization for Sailboats (ARVS)",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a061/12OmNrnJ6Uq",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948454",
"title": "[Poster] Augmented reality binoculars on the move",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948454/12OmNwI8c8P",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948402",
"title": "AR-IVI — Implementation of In-Vehicle Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948402/12OmNySosKY",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/07054558",
"title": "Augmented Reality Binoculars",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/07054558/13rRUx0xPib",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icore/2022/3390/0/339000a020",
"title": "Fish-be-with-you: An Augmented Reality Mobile Application About Endangered Marine Species",
"doi": null,
"abstractUrl": "/proceedings-article/icore/2022/339000a020/1LSOOZeymgo",
"parentPublication": {
"id": "proceedings/icore/2022/3390/0",
"title": "2022 2nd International Conference in Information and Computing Research (iCORE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a443",
"title": "Size-invariant Detection of Marine Vessels From Visual Time Series",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a443/1uqGkoNcw7u",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a260",
"title": "Augmented Reality Interface for Sailing Navigation: a User Study for Wind Representation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a260/1yeQKPuFLTW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzzxuy8",
"title": "2013 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy5R3C7",
"doi": "10.1109/CW.2013.65",
"title": "Estimation of Environmental Lighting from Known Geometries for Mobile Augmented Reality",
"normalizedTitle": "Estimation of Environmental Lighting from Known Geometries for Mobile Augmented Reality",
"abstract": "Light source estimation and virtual lighting must be believable in terms of appearance and correctness in augmented reality scenes. As a result of illumination complexity in an outdoor scene, realistic lighting for augmented reality is still a challenging problem. In this paper, we propose a framework based on an estimation of environmental lighting from well-defined objects, specifically human faces. The method is tuned for outdoor use, and the algorithm is further enhanced to illuminate virtual objects exposed to direct sunlight. Our model can be integrated into existing mobile augmented reality frameworks to enhance visual perception.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Light source estimation and virtual lighting must be believable in terms of appearance and correctness in augmented reality scenes. As a result of illumination complexity in an outdoor scene, realistic lighting for augmented reality is still a challenging problem. In this paper, we propose a framework based on an estimation of environmental lighting from well-defined objects, specifically human faces. The method is tuned for outdoor use, and the algorithm is further enhanced to illuminate virtual objects exposed to direct sunlight. Our model can be integrated into existing mobile augmented reality frameworks to enhance visual perception.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Light source estimation and virtual lighting must be believable in terms of appearance and correctness in augmented reality scenes. As a result of illumination complexity in an outdoor scene, realistic lighting for augmented reality is still a challenging problem. In this paper, we propose a framework based on an estimation of environmental lighting from well-defined objects, specifically human faces. The method is tuned for outdoor use, and the algorithm is further enhanced to illuminate virtual objects exposed to direct sunlight. Our model can be integrated into existing mobile augmented reality frameworks to enhance visual perception.",
"fno": "2246a132",
"keywords": [
"Lighting",
"Light Sources",
"Augmented Reality",
"Estimation",
"Vectors",
"Cameras",
"Mobile Communication",
"Real Time Rendering",
"Mobile Augmented Reality",
"Outdoor Illumination"
],
"authors": [
{
"affiliation": "Fac. of Eng. & Natural Sci., Sabanci Univ., Istanbul, Turkey",
"fullName": "Emre Koc",
"givenName": "Emre",
"surname": "Koc",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fac. of Eng. & Natural Sci., Sabanci Univ., Istanbul, Turkey",
"fullName": "Selim Balcisoy",
"givenName": "Selim",
"surname": "Balcisoy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "132-139",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2246-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2246a124",
"articleId": "12OmNwdtwco",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2246a140",
"articleId": "12OmNyXMQaU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/smartcomp/2017/6517/0/07947047",
"title": "Spectral-Temporal LED Lighting Modules for Reproducing Daily and Seasonal Solar Circadian Rhythmicities",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2017/07947047/12OmNrYlmCk",
"parentPublication": {
"id": "proceedings/smartcomp/2017/6517/0",
"title": "2017 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671792",
"title": "Acceleration methods for radiance transfer in photorealistic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671792/12OmNwGIcB5",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726809",
"title": "A review on illumination techniques in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726809/12OmNwMFMfk",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a192",
"title": "[POSTER] Illumination Estimation Using Cast Shadows for Realistic Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a192/12OmNxX3uLh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728914",
"title": "Parallel lighting and reflectance estimation based on inverse rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728914/12OmNxwENCV",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802044",
"title": "Efficient and robust radiance transfer for probeless photorealistic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802044/12OmNz4SOCN",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492748",
"title": "An empirical user-based study of text drawing styles and outdoor background textures for augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492748/12OmNzZmZwi",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040573",
"title": "Online Tracking of Outdoor Lighting Variations for Augmented Reality with Moving Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040573/13rRUyY28Yr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a189",
"title": "Deep Consistent Illumination in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09123589",
"title": "An Improved Augmented-Reality Framework for Differential Rendering Beyond the Lambertian-World Assumption",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09123589/1kTxwwg0epW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrIrPwU",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyUWR9U",
"doi": "10.1109/ICALT.2010.130",
"title": "Multi-Object Oriented Augmented Reality for Location-Based Adaptive Mobile Learning",
"normalizedTitle": "Multi-Object Oriented Augmented Reality for Location-Based Adaptive Mobile Learning",
"abstract": "This research aims to bring up a strategy called Multi-Object Oriented Augmented Reality, based on the Augmented Reality technique and the location of Mobile Learning Objects, which allows learner to see the suitable learning contents superimposed upon the specific learning objects and enhance the interactive in a mobile learning environment. The three characteristics of the proposed approach, Learning-Object Oriented, Guidance Ability and Highly Interactive will enhance Mobile Learning in a more adaptive and interesting way.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research aims to bring up a strategy called Multi-Object Oriented Augmented Reality, based on the Augmented Reality technique and the location of Mobile Learning Objects, which allows learner to see the suitable learning contents superimposed upon the specific learning objects and enhance the interactive in a mobile learning environment. The three characteristics of the proposed approach, Learning-Object Oriented, Guidance Ability and Highly Interactive will enhance Mobile Learning in a more adaptive and interesting way.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research aims to bring up a strategy called Multi-Object Oriented Augmented Reality, based on the Augmented Reality technique and the location of Mobile Learning Objects, which allows learner to see the suitable learning contents superimposed upon the specific learning objects and enhance the interactive in a mobile learning environment. The three characteristics of the proposed approach, Learning-Object Oriented, Guidance Ability and Highly Interactive will enhance Mobile Learning in a more adaptive and interesting way.",
"fno": "4055a450",
"keywords": [
"Multi Object Oriented",
"Augmented Reality",
"Location Awareness",
"Adaptive Moble Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "William Chang",
"givenName": "William",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qing Tan",
"givenName": "Qing",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fang Wei Tao",
"givenName": "Fang Wei",
"surname": "Tao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-07-01T00:00:00",
"pubType": "proceedings",
"pages": "450-451",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4055-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4055a445",
"articleId": "12OmNwdL7f4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4055a452",
"articleId": "12OmNC8MsAf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cse/2010/4323/0/4323a020",
"title": "Augmented Reality System Design and Scenario Study for Location-Based Adaptive Mobile Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2010/4323a020/12OmNA0dMRS",
"parentPublication": {
"id": "proceedings/cse/2010/4323/0",
"title": "2010 13th IEEE International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2015/8002/0/8002a281",
"title": "Applying Augmented Reality Technology to Book Publication Business",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2015/8002a281/12OmNAfy7JF",
"parentPublication": {
"id": "proceedings/icebe/2015/8002/0",
"title": "2015 IEEE 12th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kam/2009/3888/3/3888c091",
"title": "Ubiquitous Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/kam/2009/3888c091/12OmNCcKQOw",
"parentPublication": {
"id": "proceedings/kam/2009/3888/1",
"title": "Knowledge Acquisition and Modeling, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2015/7334/0/7334a132",
"title": "Augmented Reality Laboratory for High School Electrochemistry Course",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a132/12OmNqBbHAA",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759663",
"title": "Interactive Mobile Augmented Reality system using a vibro-tactile pad",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759663/12OmNwpoFGV",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2012/1536/0/06211167",
"title": "The Design and Implementation of Augmented Reality Learning Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2012/06211167/12OmNxVDuPN",
"parentPublication": {
"id": "proceedings/icis/2012/1536/0",
"title": "2012 IEEE/ACIS 11th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913314",
"title": "A Mobile Augmented Reality Based Scaffolding Platform for Outdoor Fieldtrip Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913314/12OmNyNQSAi",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a357",
"title": "An Interactive 5E Learning Cycle-Based Augmented Reality System to Improve Students' Learning Achievement in a Microcosmic Chemistry Molecule Course",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a357/12OmNzmLxKx",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2010/01/mpc2010010005",
"title": "Gaming and Augmented Reality Come to Location-Based Services",
"doi": null,
"abstractUrl": "/magazine/pc/2010/01/mpc2010010005/13rRUy08MBx",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2019/4528/0/09069366",
"title": "Grey Island: Immersive tangible interaction through augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2019/09069366/1jdCYiSUwBG",
"parentPublication": {
"id": "proceedings/dsc/2019/4528/0",
"title": "2019 IEEE Fourth International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWh",
"title": "2013 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQGShm",
"doi": "10.1109/VR.2013.6549424",
"title": "Rapid generation of personalized avatars",
"normalizedTitle": "Rapid generation of personalized avatars",
"abstract": "Summary form only given. The ICT Mixed Reality Lab will demonstrate a pipeline for rapidly generating personalized avatars from multiple depth and RGB scans of a user with a consumer level sensor such as a Microsoft Kinect. Based on a fusion of state-of-the-art techniques in graphics, surface reconstruction, and animation, our semi-automatic method can produce a fully rigged, skinned, and textured character model suitable for real-time virtual environments in less than 15 minutes. First, a 3D point cloud is collected from the sensor using a simultaneous localization and mapping (SLAM) approach to track the device's movements over time (see Figure 1.a). Next, surface reconstruction techniques are employed to generate a watertight 3D mesh from the raw 3D points (see Figure 1.b). The resulting model is then analyzed to determine the human joint locations, and if a skeleton can be successfully generated for the model, the mesh is then rigged and skinned using weights that are calculated automatically [2]. Finally, photos captured periodically during the scanning process using the sensor's RGB camera are used to texture the final model. The resulting avatar is suitable for real-time animation using a virtual environment or video game engine (see Figure 1.c). We will demonstrate our avatar generation pipeline at IEEE Virtual Reality 2013. Conference attendees may opt to be scanned, and their generated avatar will be provided to them either on a USB stick or through email. A video of this demo can be found at the MxR Lab website [1].",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. The ICT Mixed Reality Lab will demonstrate a pipeline for rapidly generating personalized avatars from multiple depth and RGB scans of a user with a consumer level sensor such as a Microsoft Kinect. Based on a fusion of state-of-the-art techniques in graphics, surface reconstruction, and animation, our semi-automatic method can produce a fully rigged, skinned, and textured character model suitable for real-time virtual environments in less than 15 minutes. First, a 3D point cloud is collected from the sensor using a simultaneous localization and mapping (SLAM) approach to track the device's movements over time (see Figure 1.a). Next, surface reconstruction techniques are employed to generate a watertight 3D mesh from the raw 3D points (see Figure 1.b). The resulting model is then analyzed to determine the human joint locations, and if a skeleton can be successfully generated for the model, the mesh is then rigged and skinned using weights that are calculated automatically [2]. Finally, photos captured periodically during the scanning process using the sensor's RGB camera are used to texture the final model. The resulting avatar is suitable for real-time animation using a virtual environment or video game engine (see Figure 1.c). We will demonstrate our avatar generation pipeline at IEEE Virtual Reality 2013. Conference attendees may opt to be scanned, and their generated avatar will be provided to them either on a USB stick or through email. A video of this demo can be found at the MxR Lab website [1].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. The ICT Mixed Reality Lab will demonstrate a pipeline for rapidly generating personalized avatars from multiple depth and RGB scans of a user with a consumer level sensor such as a Microsoft Kinect. Based on a fusion of state-of-the-art techniques in graphics, surface reconstruction, and animation, our semi-automatic method can produce a fully rigged, skinned, and textured character model suitable for real-time virtual environments in less than 15 minutes. First, a 3D point cloud is collected from the sensor using a simultaneous localization and mapping (SLAM) approach to track the device's movements over time (see Figure 1.a). Next, surface reconstruction techniques are employed to generate a watertight 3D mesh from the raw 3D points (see Figure 1.b). The resulting model is then analyzed to determine the human joint locations, and if a skeleton can be successfully generated for the model, the mesh is then rigged and skinned using weights that are calculated automatically [2]. Finally, photos captured periodically during the scanning process using the sensor's RGB camera are used to texture the final model. The resulting avatar is suitable for real-time animation using a virtual environment or video game engine (see Figure 1.c). We will demonstrate our avatar generation pipeline at IEEE Virtual Reality 2013. Conference attendees may opt to be scanned, and their generated avatar will be provided to them either on a USB stick or through email. A video of this demo can be found at the MxR Lab website [1].",
"fno": "06549424",
"keywords": [
"Three Dimensional Displays",
"Avatars",
"Electronic Mail",
"Solid Modeling",
"Surface Reconstruction",
"Virtual Environments",
"Avatars",
"Depth Sensors"
],
"authors": [
{
"affiliation": null,
"fullName": "Evan A. Suma",
"givenName": "Evan A.",
"surname": "Suma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "David M. Krum",
"givenName": "David M.",
"surname": "Krum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thai Phan",
"givenName": null,
"surname": "Thai Phan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mark Bolas",
"givenName": "Mark",
"surname": "Bolas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "185-185",
"year": "2013",
"issn": "1087-8270",
"isbn": "978-1-4673-4795-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06549423",
"articleId": "12OmNyUFfKo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06549425",
"articleId": "12OmNqIzhfj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2013/6097/0/06550223",
"title": "Poster: Gesture-based control of avatars for social TV",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550223/12OmNAGepYr",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a325",
"title": "Instant Messenger with Personalized 3D Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a325/12OmNAkEU6d",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802113",
"title": "Automatic acquisition and animation of virtual avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08648222",
"title": "The Virtual Caliper: Rapid Creation of Metrically Accurate Avatars from 3D Measurements",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08648222/17QjJf0qqr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a764",
"title": "Automatic 3D Avatar Generation from a Single RBG Frontal Image",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a764/1CJexMJUGxa",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a810",
"title": "Motion Correction of Interactive CG Avatars Using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a810/1CJfavqESje",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5883",
"title": "High-Fidelity Human Avatars from a Single RGB Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5883/1H1hK72b9Je",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d721",
"title": "ANR: Articulated Neural Rendering for Virtual Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d721/1yeKjbSjlao",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgmRWwNUI",
"doi": "10.1109/VR55154.2023.00023",
"title": "Volumetric Avatar Reconstruction with Spatio-Temporally Offset RGBD Cameras",
"normalizedTitle": "Volumetric Avatar Reconstruction with Spatio-Temporally Offset RGBD Cameras",
"abstract": "RGBD cameras can capture users and their actions in the real world for reconstruction of photo-realistic volumetric avatars that allow rich interaction between spatially distributed telepresence parties in virtual environments. In this paper, we present and evaluate a system design that enables volumetric avatar reconstruction at increased frame rates. We demonstrate that we can overcome the limited capturing frame rate of commodity RGBD cameras such as the Azure Kinect by dividing a set of cameras into two spatio-temporally offset reconstruction groups and implementing a real-time reconstruction pipeline to fuse the temporally offset RGBD image streams. Comparisons of our proposed system against capture configurations possible with the same number of RGBD cameras indicate that it is beneficial to use a combination of spatially and temporally offset RGBD cameras, allowing increased reconstruction frame rates and scene coverage while producing temporally consistent volumetric avatars.",
"abstracts": [
{
"abstractType": "Regular",
"content": "RGBD cameras can capture users and their actions in the real world for reconstruction of photo-realistic volumetric avatars that allow rich interaction between spatially distributed telepresence parties in virtual environments. In this paper, we present and evaluate a system design that enables volumetric avatar reconstruction at increased frame rates. We demonstrate that we can overcome the limited capturing frame rate of commodity RGBD cameras such as the Azure Kinect by dividing a set of cameras into two spatio-temporally offset reconstruction groups and implementing a real-time reconstruction pipeline to fuse the temporally offset RGBD image streams. Comparisons of our proposed system against capture configurations possible with the same number of RGBD cameras indicate that it is beneficial to use a combination of spatially and temporally offset RGBD cameras, allowing increased reconstruction frame rates and scene coverage while producing temporally consistent volumetric avatars.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "RGBD cameras can capture users and their actions in the real world for reconstruction of photo-realistic volumetric avatars that allow rich interaction between spatially distributed telepresence parties in virtual environments. In this paper, we present and evaluate a system design that enables volumetric avatar reconstruction at increased frame rates. We demonstrate that we can overcome the limited capturing frame rate of commodity RGBD cameras such as the Azure Kinect by dividing a set of cameras into two spatio-temporally offset reconstruction groups and implementing a real-time reconstruction pipeline to fuse the temporally offset RGBD image streams. Comparisons of our proposed system against capture configurations possible with the same number of RGBD cameras indicate that it is beneficial to use a combination of spatially and temporally offset RGBD cameras, allowing increased reconstruction frame rates and scene coverage while producing temporally consistent volumetric avatars.",
"fno": "481500a072",
"keywords": [
"Three Dimensional Displays",
"Telepresence",
"Avatars",
"Virtual Environments",
"User Interfaces",
"Streaming Media",
"Cameras"
],
"authors": [
{
"affiliation": "Bauhaus-Universität Weimar,Virtual Reality and Visualization,Germany",
"fullName": "Gareth Rendle",
"givenName": "Gareth",
"surname": "Rendle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bauhaus-Universität Weimar,Virtual Reality and Visualization,Germany",
"fullName": "Adrian Kreskowski",
"givenName": "Adrian",
"surname": "Kreskowski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bauhaus-Universität Weimar,Virtual Reality and Visualization,Germany",
"fullName": "Bernd Froehlich",
"givenName": "Bernd",
"surname": "Froehlich",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "72-82",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgmIUuehq",
"name": "pvr202348150-010108476s1-mm_481500a072.zip",
"size": "54.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108476s1-mm_481500a072.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a063",
"articleId": "1MNgBwrqcb6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a083",
"articleId": "1MNgRmjl6Zq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2014/4761/0/06890122",
"title": "Registration of multiple RGBD cameras via local rigid transformations",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890122/12OmNBOll5M",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e660",
"title": "Temporally Coherent 4D Reconstruction of Complex Dynamic Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e660/12OmNCbU37a",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131731",
"title": "Volumetric calibration and registration of multiple RGBD-sensors into a joint coordinate system",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131731/12OmNyqRnac",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a514",
"title": "Hybrid Skeleton Driven Surface Registration for Temporally Consistent Volumetric Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a514/17D45XwUAGX",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a666",
"title": "If I Share with you my Perspective, Would you Share your Data with me?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a666/1CJcFhW6P6M",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600g145",
"title": "NeuralHOFusion: Neural Volumetric Rendering under Human-object Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600g145/1H1itCwY51e",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09925090",
"title": "RobustFusion: Robust Volumetric Performance Reconstruction Under Human-Object Interactions From Monocular RGBD Stream",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09925090/1HBHXf7iQZG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09257094",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1728",
"title": "Pixel-aligned Volumetric Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1728/1yeHX163Xnq",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f742",
"title": "Function4D: Real-time Human Volumetric Capture from Very Sparse Consumer RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f742/1yeJFKObhAY",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1d3MdShi",
"doi": "10.1109/VR.2019.8797819",
"title": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"normalizedTitle": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"abstract": "We evaluate the ability of locally present participants to localize an avatar head's gaze direction in 360° hosted telepresence. We performed a controlled user study to test two potential solutions to indicate a remote user's gaze. We analyze the influence of the user's distance to the avatar and display technique on localization accuracy. Our experimental results suggest that all these factors have a significant effect on the localization accuracy with varying effect sizes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We evaluate the ability of locally present participants to localize an avatar head's gaze direction in 360° hosted telepresence. We performed a controlled user study to test two potential solutions to indicate a remote user's gaze. We analyze the influence of the user's distance to the avatar and display technique on localization accuracy. Our experimental results suggest that all these factors have a significant effect on the localization accuracy with varying effect sizes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We evaluate the ability of locally present participants to localize an avatar head's gaze direction in 360° hosted telepresence. We performed a controlled user study to test two potential solutions to indicate a remote user's gaze. We analyze the influence of the user's distance to the avatar and display technique on localization accuracy. Our experimental results suggest that all these factors have a significant effect on the localization accuracy with varying effect sizes.",
"fno": "08797819",
"keywords": [
"Avatars",
"Teleoperator Gaze",
"Display Technique",
"Avatar Heads Gaze",
"Remote Users Gaze",
"Avatars",
"Telepresence",
"Teleoperators",
"Resists",
"Face"
],
"authors": [
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Jingxin Zhang",
"givenName": "Jingxin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Nikolaos Katzakis",
"givenName": "Nikolaos",
"surname": "Katzakis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Fariba Mostajeran",
"givenName": "Fariba",
"surname": "Mostajeran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1265-1266",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798000",
"articleId": "1cJ19xsq3CM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797878",
"articleId": "1cJ0I4GtxhC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504684",
"title": "MMSpace: Kinetically-augmented telepresence for small group-to-group conversations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504684/12OmNvlg8fs",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08314105",
"title": "Detection Thresholds for Rotation and Translation Gains in 360° Video-Based Telepresence Systems",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08314105/13rRUxASubD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089599",
"title": "An Optical Design for Avatar-User Co-axial Viewpoint Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089599/1jIx8SwZIuQ",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382845",
"title": "The Influence of Avatar Representation on Interpersonal Communication in Virtual Social Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382845/1saZq7bIPUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1dVsXQDS",
"doi": "10.1109/VR.2019.8797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"normalizedTitle": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"abstract": "This paper investigates the effect of avatar appearance on Social Presence and users' perception in an Augmented Reality (AR) telep-resence system. Despite the development of various commercial 3D telepresence systems, there has been little evaluation and discussions about the appearance of the collaborator's avatars. We conducted two user studies comparing the effect of avatar appearances with three levels of body part visibility (head & hands, upper body, and whole body) and two different character styles (realistic and cartoon-like) on Social Presence while performing two different remote collaboration tasks. We found that a realistic whole body avatar was perceived as being the best for remote collaboration, but an upper body or cartoon style could be considered as a substitute depending on the collaboration context. We discuss these results and suggest guidelines for designing future avatar-mediated AR remote collaboration systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper investigates the effect of avatar appearance on Social Presence and users' perception in an Augmented Reality (AR) telep-resence system. Despite the development of various commercial 3D telepresence systems, there has been little evaluation and discussions about the appearance of the collaborator's avatars. We conducted two user studies comparing the effect of avatar appearances with three levels of body part visibility (head & hands, upper body, and whole body) and two different character styles (realistic and cartoon-like) on Social Presence while performing two different remote collaboration tasks. We found that a realistic whole body avatar was perceived as being the best for remote collaboration, but an upper body or cartoon style could be considered as a substitute depending on the collaboration context. We discuss these results and suggest guidelines for designing future avatar-mediated AR remote collaboration systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper investigates the effect of avatar appearance on Social Presence and users' perception in an Augmented Reality (AR) telep-resence system. Despite the development of various commercial 3D telepresence systems, there has been little evaluation and discussions about the appearance of the collaborator's avatars. We conducted two user studies comparing the effect of avatar appearances with three levels of body part visibility (head & hands, upper body, and whole body) and two different character styles (realistic and cartoon-like) on Social Presence while performing two different remote collaboration tasks. We found that a realistic whole body avatar was perceived as being the best for remote collaboration, but an upper body or cartoon style could be considered as a substitute depending on the collaboration context. We discuss these results and suggest guidelines for designing future avatar-mediated AR remote collaboration systems.",
"fno": "08797719",
"keywords": [
"Audio Visual Systems",
"Augmented Reality",
"Avatars",
"Groupware",
"User Interfaces",
"Commercial 3 D Telepresence Systems",
"Body Part Visibility",
"Upper Body",
"Augmented Reality Remote Collaboration",
"User Perception",
"Avatar Appearance Effect",
"Social Presence",
"Augmented Reality Telep Resence System",
"Collaborator Avatars",
"Cartoon Style",
"Avatar Mediated AR Remote Collaboration Systems",
"Avatars",
"Collaboration",
"Three Dimensional Displays",
"Telepresence",
"Augmented Reality",
"Task Analysis",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies"
],
"authors": [
{
"affiliation": "KAIST UVR Lab",
"fullName": "Boram Yoon",
"givenName": "Boram",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Hyung-il Kim",
"givenName": "Hyung-il",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Gun A. Lee",
"givenName": "Gun A.",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Woontack Woo",
"givenName": "Woontack",
"surname": "Woo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "547-556",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798152",
"articleId": "1cJ1djEUmv6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798019",
"articleId": "1cJ17trBZEQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/irc/2017/6724/0/07926508",
"title": "Gutsy-Avatar: Computational Assimilation for Advanced Communication and Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926508/12OmNBLdKLX",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a763",
"title": "Effects of Avatar Face Level of Detail Control on Social Presence in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a763/1J7WdNbgEFy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798044",
"title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998353",
"title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998353/1hpPDKs9c7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a435",
"title": "Multi-scale Mixed Reality Collaboration for Digital Twin",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a435/1yeQLyb4LpC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1djEUmv6",
"doi": "10.1109/VR.2019.8798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"normalizedTitle": "The Influence of Size in Augmented Reality Telepresence Avatars",
"abstract": "In this work, we explore how advances in augmented reality technologies are creating a new design space for long-distance telepresence communication through virtual avatars. Studies have shown that the relative size of a speaker has a significant impact on many aspects of human communication including perceived dominance and persuasiveness. Our system synchronizes the body pose of a remote user with a realistic, virtual human avatar visible to a local user wearing an augmented reality head-mounted display. We conducted a two-by-two (relative system size: equivalent vs. small; leader vs. follower), between participants study (N = 40) to investigate the effect of avatar size on the interactions between remote and local user. We found the equal-sized avatars to be significantly more influential than the small-sized avatars and that the small avatars commanded significantly less attention than the equal-sized avatars. Additionally, we found the assigned leadership role to significantly impact participant subjective satisfaction of the task outcome.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we explore how advances in augmented reality technologies are creating a new design space for long-distance telepresence communication through virtual avatars. Studies have shown that the relative size of a speaker has a significant impact on many aspects of human communication including perceived dominance and persuasiveness. Our system synchronizes the body pose of a remote user with a realistic, virtual human avatar visible to a local user wearing an augmented reality head-mounted display. We conducted a two-by-two (relative system size: equivalent vs. small; leader vs. follower), between participants study (N = 40) to investigate the effect of avatar size on the interactions between remote and local user. We found the equal-sized avatars to be significantly more influential than the small-sized avatars and that the small avatars commanded significantly less attention than the equal-sized avatars. Additionally, we found the assigned leadership role to significantly impact participant subjective satisfaction of the task outcome.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we explore how advances in augmented reality technologies are creating a new design space for long-distance telepresence communication through virtual avatars. Studies have shown that the relative size of a speaker has a significant impact on many aspects of human communication including perceived dominance and persuasiveness. Our system synchronizes the body pose of a remote user with a realistic, virtual human avatar visible to a local user wearing an augmented reality head-mounted display. We conducted a two-by-two (relative system size: equivalent vs. small; leader vs. follower), between participants study (N = 40) to investigate the effect of avatar size on the interactions between remote and local user. We found the equal-sized avatars to be significantly more influential than the small-sized avatars and that the small avatars commanded significantly less attention than the equal-sized avatars. Additionally, we found the assigned leadership role to significantly impact participant subjective satisfaction of the task outcome.",
"fno": "08798152",
"keywords": [
"Augmented Reality",
"Avatars",
"Helmet Mounted Displays",
"Augmented Reality Telepresence Avatars",
"Augmented Reality Technologies",
"Long Distance Telepresence Communication",
"Realistic Avatar",
"Virtual Human Avatar",
"Augmented Reality Head Mounted Display",
"Avatar Size",
"Equal Sized Avatars",
"Small Sized Avatars",
"Human Communication",
"Avatars",
"Telepresence",
"Robots",
"Augmented Reality",
"Collaboration",
"Leadership",
"Avatars Augmented Reality",
"Mixed Reality",
"Avatar Mediated Communication",
"Human Avatar Interaction",
"Avatar Telepresence Systems",
"Avatar Size",
"Scenario Based Design",
"Team Role",
"Human Centered Computing X 2014 HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality Human Centered Computing X 2014 HCI X 2014 Interaction Paradigms X 2014 Virtual Reality Human Centered Computing X 2014 HCI X 2014 Interaction Paradigms X 2014 Web Based Interaction Human Centered Computing X 2014 HCI X 2014 Interaction Paradigms X 2014 Collaborative Interaction Human Centered Computing X 2014 HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies Human Centered Computing X 2014 HCI X 2014 HCI Design And Evaluation Methods X 2014 Laboratory Experiments Human Centered Computing X 2014 Interaction Design X 2014 Interaction Design Process And Methods X 2014 Scenario Based Design"
],
"authors": [
{
"affiliation": "University of Colorado Boulder, Boulder, CO, USA",
"fullName": "Michael E. Walker",
"givenName": "Michael E.",
"surname": "Walker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Colorado Boulder, Boulder, CO, USA",
"fullName": "Daniel Szafir",
"givenName": "Daniel",
"surname": "Szafir",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google, Madison, WI, USA",
"fullName": "Irene Rae",
"givenName": "Irene",
"surname": "Rae",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "538-546",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797716",
"articleId": "1cJ1dFOKU3m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797719",
"articleId": "1cJ1dVsXQDS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2016/5670/0/5670a051",
"title": "Introducing Avatarification: An Experimental Examination of How Avatars Influence Student Motivation",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670a051/12OmNvDqsPL",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a881",
"title": "Investigating the Relation Between Gender Expression of Mixed Reality Avatars and Sexuality of Male Users",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a881/1J7WtSuCzdu",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a026",
"title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a026/1gysn4uy67C",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09173828",
"title": "Placement Retargeting of Virtual Avatars to Dissimilar Indoor Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09173828/1mtsbpUceNG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a462",
"title": "Body Weight Perception of Females using Photorealistic Avatars in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a462/1pysu9tPcGc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxd00PzX2",
"doi": "10.1109/VR46266.2020.00017",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"normalizedTitle": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"abstract": "Telepresence avatars enable users in different environments to interact with each other. In order to increase the effectiveness of these interactions, however, the movements of avatars must be adjusted accordingly to account for differences between user environments. For instance, if a user moves from one point to another in one environment, the avatar’s locomotion speed must be adjusted to move to the corresponding target point in another environment at the same time. Several locomotion styles can be used to achieve this speed change. This paper investigates how different avatar locomotion styles (speed, stride, and glide), body visibility levels (full body and head-to-knee), and views (front views and side views) influence human perceptions of the naturalness of motion, similarity to the user’s locomotion, and the degree of preserving the user’s intention. Our results indicate that 1) speed and stride styles are perceived as more natural than the glide style, while the glide style is more intention-preserving than the others, 2) a greater locomotion speed of the avatar is perceived as more natural, similar, and intention-preserving than slower motion, 3) the perception of naturalness has the greatest impact on people’s preferences for locomotion styles, and that 4) head-to-knee body visibility may enhance the perception of naturalness for the glide style.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telepresence avatars enable users in different environments to interact with each other. In order to increase the effectiveness of these interactions, however, the movements of avatars must be adjusted accordingly to account for differences between user environments. For instance, if a user moves from one point to another in one environment, the avatar’s locomotion speed must be adjusted to move to the corresponding target point in another environment at the same time. Several locomotion styles can be used to achieve this speed change. This paper investigates how different avatar locomotion styles (speed, stride, and glide), body visibility levels (full body and head-to-knee), and views (front views and side views) influence human perceptions of the naturalness of motion, similarity to the user’s locomotion, and the degree of preserving the user’s intention. Our results indicate that 1) speed and stride styles are perceived as more natural than the glide style, while the glide style is more intention-preserving than the others, 2) a greater locomotion speed of the avatar is perceived as more natural, similar, and intention-preserving than slower motion, 3) the perception of naturalness has the greatest impact on people’s preferences for locomotion styles, and that 4) head-to-knee body visibility may enhance the perception of naturalness for the glide style.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telepresence avatars enable users in different environments to interact with each other. In order to increase the effectiveness of these interactions, however, the movements of avatars must be adjusted accordingly to account for differences between user environments. For instance, if a user moves from one point to another in one environment, the avatar’s locomotion speed must be adjusted to move to the corresponding target point in another environment at the same time. Several locomotion styles can be used to achieve this speed change. This paper investigates how different avatar locomotion styles (speed, stride, and glide), body visibility levels (full body and head-to-knee), and views (front views and side views) influence human perceptions of the naturalness of motion, similarity to the user’s locomotion, and the degree of preserving the user’s intention. Our results indicate that 1) speed and stride styles are perceived as more natural than the glide style, while the glide style is more intention-preserving than the others, 2) a greater locomotion speed of the avatar is perceived as more natural, similar, and intention-preserving than slower motion, 3) the perception of naturalness has the greatest impact on people’s preferences for locomotion styles, and that 4) head-to-knee body visibility may enhance the perception of naturalness for the glide style.",
"fno": "09089654",
"keywords": [
"Avatars",
"Legged Locomotion",
"Telepresence",
"Torso",
"Three Dimensional Displays",
"Conferences",
"Telepresence",
"Motion Retargeting",
"Perception",
"Virtual Avatar"
],
"authors": [
{
"affiliation": "KAIST,Graduate School of Culture Technology",
"fullName": "Youjin Choi",
"givenName": "Youjin",
"surname": "Choi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST,Graduate School of Culture Technology",
"fullName": "Jeongmi Lee",
"givenName": "Jeongmi",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST,Graduate School of Culture Technology",
"fullName": "Sung-Hee Lee",
"givenName": "Sung-Hee",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-9",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089498",
"articleId": "1jIxfJtmet2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089588",
"articleId": "1jIxbTl2uRi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223406",
"title": "Self-characterstics and sound in immersive virtual reality — Estimating avatar weight from footstep sounds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223406/12OmNAlvHUH",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771353",
"title": "Expression of emotional states during locomotion based on canonical parameters",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771353/12OmNAqU4VX",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492762",
"title": "Comparing VE locomotion interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492762/12OmNx8fi8K",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446229",
"title": "Any “Body” There? Avatar Visibility Effects in a Virtual Reality Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446229/13bd1fHrlRx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09744001",
"title": "Influence of user posture and virtual exercise on impression of locomotion during VR observation",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09744001/1C8BFV420lq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797819",
"title": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797819/1cJ1d3MdShi",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090453",
"title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a452",
"title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxZeM",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"acronym": "icpads",
"groupId": "1000534",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqzu6VX",
"doi": "10.1109/PADSW.2014.7097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"normalizedTitle": "Virtual keyboard for head mounted display-based wearable devices",
"abstract": "Wearable devices eliminate the need of physically taking out a mobile device before operating on it and are emerging as the next wave of mobile systems. Head-mounted display (HMD) is a key building block of wearable devices, and offers users immediate access to relevant information in a glance. However, most existing user input mechanisms accompanying HMDs are designed for interactive information exploration rather than for extended text entry. This paper describes the design, implementation and evaluation of a text input system for HMDs called Air Typing, which requires only a standard camera and is shown to be comparable in effectiveness to single-hand text input on tablet computers in a lab setting. Air Typing features a novel two-level virtual keyword layout, which substantially improves the typing speed by cutting down unnecessary hand movements during typing and greatly simplifies the associated image processing task by doing away with fine-grained matching between fingertips and keys. The current Air Typing prototype incorporates an OpenCV-based virtual key press detection algorithm that runs on the featured two-level virtual keyboard. In our tests, an experienced user's typing speeds of one-hand text input and of two-hand text input under Air Typing are 13 and 15 words per minute (WPM), respectively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearable devices eliminate the need of physically taking out a mobile device before operating on it and are emerging as the next wave of mobile systems. Head-mounted display (HMD) is a key building block of wearable devices, and offers users immediate access to relevant information in a glance. However, most existing user input mechanisms accompanying HMDs are designed for interactive information exploration rather than for extended text entry. This paper describes the design, implementation and evaluation of a text input system for HMDs called Air Typing, which requires only a standard camera and is shown to be comparable in effectiveness to single-hand text input on tablet computers in a lab setting. Air Typing features a novel two-level virtual keyword layout, which substantially improves the typing speed by cutting down unnecessary hand movements during typing and greatly simplifies the associated image processing task by doing away with fine-grained matching between fingertips and keys. The current Air Typing prototype incorporates an OpenCV-based virtual key press detection algorithm that runs on the featured two-level virtual keyboard. In our tests, an experienced user's typing speeds of one-hand text input and of two-hand text input under Air Typing are 13 and 15 words per minute (WPM), respectively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearable devices eliminate the need of physically taking out a mobile device before operating on it and are emerging as the next wave of mobile systems. Head-mounted display (HMD) is a key building block of wearable devices, and offers users immediate access to relevant information in a glance. However, most existing user input mechanisms accompanying HMDs are designed for interactive information exploration rather than for extended text entry. This paper describes the design, implementation and evaluation of a text input system for HMDs called Air Typing, which requires only a standard camera and is shown to be comparable in effectiveness to single-hand text input on tablet computers in a lab setting. Air Typing features a novel two-level virtual keyword layout, which substantially improves the typing speed by cutting down unnecessary hand movements during typing and greatly simplifies the associated image processing task by doing away with fine-grained matching between fingertips and keys. The current Air Typing prototype incorporates an OpenCV-based virtual key press detection algorithm that runs on the featured two-level virtual keyboard. In our tests, an experienced user's typing speeds of one-hand text input and of two-hand text input under Air Typing are 13 and 15 words per minute (WPM), respectively.",
"fno": "07097812",
"keywords": [
"Cameras",
"Feature Extraction",
"Gesture Recognition",
"Helmet Mounted Displays",
"Image Matching",
"Keyboards",
"Mobile Handsets",
"Notebook Computers",
"Two Hand Text Input",
"One Hand Text Input",
"User Typing Speeds",
"Featured Two Level Virtual Keyboard",
"Open CV Based Virtual Key Press Detection Algorithm",
"Air Typing Prototype",
"Fine Grained Matching",
"Image Processing Task",
"Two Level Virtual Keyword Layout",
"Tablet Computers",
"Single Hand Text Input System",
"Extended Text Entry",
"Interactive Information Exploration",
"HMD",
"Mobile Systems",
"Mobile Device",
"Head Mounted Display Based Wearable Devices",
"Keyboards",
"Thumb",
"Presses",
"Skin",
"Layout",
"Engines",
"Head Mounted Display",
"Typing In Air",
"Virtual Keyboard",
"Keyboard Layout",
"Hand Tracking",
"Fingertip Detection And Tracking"
],
"authors": [
{
"affiliation": "Industrial Technology Research Institute, Hsinchu, Taiwan",
"fullName": "Ming-Wei Chang",
"givenName": "Ming-Wei",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Technology Research Institute, Hsinchu, Taiwan",
"fullName": "Tzi-cker Chiueh",
"givenName": "Tzi-cker",
"surname": "Chiueh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Industrial Technology Research Institute, Hsinchu, Taiwan",
"fullName": "Chia-Ming Chang",
"givenName": "Chia-Ming",
"surname": "Chang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpads",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "225-232",
"year": "2014",
"issn": "1521-9097",
"isbn": "978-1-4799-7615-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07097811",
"articleId": "12OmNAhfIxl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07097813",
"articleId": "12OmNA0dMS8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/aina/2018/2195/0/219501a342",
"title": "Space Saving Text Input Method for Head Mounted Display with Virtual 12-key Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2018/219501a342/12OmNyoiZ37",
"parentPublication": {
"id": "proceedings/aina/2018/2195/0",
"title": "2018 IEEE 32nd International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2018/10/08269805",
"title": "CamK: Camera-Based Keystroke Detection and Localization for Small Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tm/2018/10/08269805/13rRUxlgxU6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a792",
"title": "C-SAK: Chinese Scanning Ambiguous Keyboard for Parkinson's Disease Patients",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a792/17D45XoXP46",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0",
"title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2018/7308/0/08644590",
"title": "Oinput: A Bone-Conductive QWERTY Keyboard Recognition for Wearable Device",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2018/08644590/17QjJd2W6Lq",
"parentPublication": {
"id": "proceedings/icpads/2018/7308/0",
"title": "2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a519",
"title": "TapID: Rapid Touch Interaction in Virtual Reality using Wearable Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a519/1tuBtNYt0LC",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523889",
"title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523889/1wpqwAIMiRy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvlxJwR",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx1IwaL",
"doi": "10.1109/CBMS.2017.134",
"title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing",
"normalizedTitle": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing",
"abstract": "Gaze-based virtual keyboards provide an effective interface for text entry by eye movements. The efficiency and usability of these keyboards have traditionally been evaluated with conventional text entry performance measures such as words per minute, keystrokes per character, backspace usage, etc. However, in comparison to the traditional text entry approaches, gaze-based typing involves natural eye movements that are highly correlated with human brain cognition. Employing eye gaze as an input could lead to excessive mental demand, and in this work we argue the need to include cognitive load as an eye typing evaluation measure. We evaluate three variations of gaze-based virtual keyboards, which implement variable designs in terms of word suggestion positioning. The conventional text entry metrics indicate no significant difference in the performance of the different keyboard designs. However, STFT (Short-time Fourier Transform) based analysis of EEG signals indicate variances in the mental workload of participants while interacting with these designs. Moreover, the EEG analysis provides insights into the users cognition variation for different typing phases and intervals, which should be considered in order to improve eye typing usability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaze-based virtual keyboards provide an effective interface for text entry by eye movements. The efficiency and usability of these keyboards have traditionally been evaluated with conventional text entry performance measures such as words per minute, keystrokes per character, backspace usage, etc. However, in comparison to the traditional text entry approaches, gaze-based typing involves natural eye movements that are highly correlated with human brain cognition. Employing eye gaze as an input could lead to excessive mental demand, and in this work we argue the need to include cognitive load as an eye typing evaluation measure. We evaluate three variations of gaze-based virtual keyboards, which implement variable designs in terms of word suggestion positioning. The conventional text entry metrics indicate no significant difference in the performance of the different keyboard designs. However, STFT (Short-time Fourier Transform) based analysis of EEG signals indicate variances in the mental workload of participants while interacting with these designs. Moreover, the EEG analysis provides insights into the users cognition variation for different typing phases and intervals, which should be considered in order to improve eye typing usability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaze-based virtual keyboards provide an effective interface for text entry by eye movements. The efficiency and usability of these keyboards have traditionally been evaluated with conventional text entry performance measures such as words per minute, keystrokes per character, backspace usage, etc. However, in comparison to the traditional text entry approaches, gaze-based typing involves natural eye movements that are highly correlated with human brain cognition. Employing eye gaze as an input could lead to excessive mental demand, and in this work we argue the need to include cognitive load as an eye typing evaluation measure. We evaluate three variations of gaze-based virtual keyboards, which implement variable designs in terms of word suggestion positioning. The conventional text entry metrics indicate no significant difference in the performance of the different keyboard designs. However, STFT (Short-time Fourier Transform) based analysis of EEG signals indicate variances in the mental workload of participants while interacting with these designs. Moreover, the EEG analysis provides insights into the users cognition variation for different typing phases and intervals, which should be considered in order to improve eye typing usability.",
"fno": "1710a787",
"keywords": [
"Cognition",
"Electroencephalography",
"Eye",
"Fourier Transforms",
"Gaze Tracking",
"Human Computer Interaction",
"Keyboards",
"Medical Signal Processing",
"Neurophysiology",
"User Interfaces",
"Text Entry Performance Measures",
"Gaze Based Virtual Keyboards",
"Keyboard Usability",
"Short Time Fourier Transform",
"EEG Signal Analysis",
"Eye Typing Usability",
"Word Suggestion Positioning",
"Eye Typing Evaluation Measure",
"Eye Gaze",
"Human Brain Cognition",
"Natural Eye Movements",
"Cognitive Load",
"Keyboards",
"Electroencephalography",
"Visualization",
"Layout",
"Eye Typing",
"Gaze Input",
"EEG",
"Cognitive Load"
],
"authors": [
{
"affiliation": null,
"fullName": "Korok Sengupta",
"givenName": "Korok",
"surname": "Sengupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Sun",
"givenName": "Jun",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Raphael Menges",
"givenName": "Raphael",
"surname": "Menges",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chandan Kumar",
"givenName": "Chandan",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Steffen Staab",
"givenName": "Steffen",
"surname": "Staab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-06-01T00:00:00",
"pubType": "proceedings",
"pages": "787-792",
"year": "2017",
"issn": "2372-9198",
"isbn": "978-1-5386-1710-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1710a781",
"articleId": "12OmNxaNGnu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1710a793",
"articleId": "12OmNAKcNL4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2017/0563/0/08273592",
"title": "Evaluating effectiveness of smartphone typing as an indicator of user emotion",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273592/12OmNz61d2O",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917593",
"title": "EyeAssist: A communication aid through gaze tracking for patients with neuro-motor disabilities",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917593/19wAJMxS29G",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a888",
"title": "Flick Typing: Toward A New XR Text Input System Based on 3D Gestures and Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a888/1CJe2bLhGbC",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/1/260701a622",
"title": "Investigating Differences in Gaze and Typing Behavior Across Age Groups and Writing Genres",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260701a622/1cYiyjMU0ne",
"parentPublication": {
"id": "proceedings/compsac/2019/2607/1",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09430933",
"title": "Exploratory Analysis of Nose-gesture for Smartphone Aided Typing for Users with Clinical Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09430933/1tROMoFWkVy",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCcbEdf",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz61d2O",
"doi": "10.1109/ACII.2017.8273592",
"title": "Evaluating effectiveness of smartphone typing as an indicator of user emotion",
"normalizedTitle": "Evaluating effectiveness of smartphone typing as an indicator of user emotion",
"abstract": "In Affective Computing, different modalities, such as speech, facial expressions, physiological properties, smart-phone usage patterns, and their combinations, are applied to detect the affective states of a user. Keystroke analysis i.e. study of the typing behavior in desktop computer is found to be an effective modality for emotion detection because of its reliability, non-intrusiveness and low resource overhead. As smartphones proliferate, typing behavior on smartphone presents an equally powerful modality for emotion detection. It has the added advantage to run in-situ experiments with better coverage than the experiments using desktop computer keyboards. This work explores the efficacy of smartphone typing to detect multiple affective states. We use a qualitative and experimental approach to answer the question. We conduct an online survey among 120 participants to understand the typing habits in smartphones and collect feedback on multiple measurable parameters that affect their emotion while typing. The findings lead us to design and implement an Android based emotion detection system, TapSense, which can identify four different emotion states (happy, sad, stressed, relaxed) with an average accuracy (AUCROC) of 73% (maximum of 94%) based on typing features only. The analysis also reveals that among different features, typing speed is the most discriminative one.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In Affective Computing, different modalities, such as speech, facial expressions, physiological properties, smart-phone usage patterns, and their combinations, are applied to detect the affective states of a user. Keystroke analysis i.e. study of the typing behavior in desktop computer is found to be an effective modality for emotion detection because of its reliability, non-intrusiveness and low resource overhead. As smartphones proliferate, typing behavior on smartphone presents an equally powerful modality for emotion detection. It has the added advantage to run in-situ experiments with better coverage than the experiments using desktop computer keyboards. This work explores the efficacy of smartphone typing to detect multiple affective states. We use a qualitative and experimental approach to answer the question. We conduct an online survey among 120 participants to understand the typing habits in smartphones and collect feedback on multiple measurable parameters that affect their emotion while typing. The findings lead us to design and implement an Android based emotion detection system, TapSense, which can identify four different emotion states (happy, sad, stressed, relaxed) with an average accuracy (AUCROC) of 73% (maximum of 94%) based on typing features only. The analysis also reveals that among different features, typing speed is the most discriminative one.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In Affective Computing, different modalities, such as speech, facial expressions, physiological properties, smart-phone usage patterns, and their combinations, are applied to detect the affective states of a user. Keystroke analysis i.e. study of the typing behavior in desktop computer is found to be an effective modality for emotion detection because of its reliability, non-intrusiveness and low resource overhead. As smartphones proliferate, typing behavior on smartphone presents an equally powerful modality for emotion detection. It has the added advantage to run in-situ experiments with better coverage than the experiments using desktop computer keyboards. This work explores the efficacy of smartphone typing to detect multiple affective states. We use a qualitative and experimental approach to answer the question. We conduct an online survey among 120 participants to understand the typing habits in smartphones and collect feedback on multiple measurable parameters that affect their emotion while typing. The findings lead us to design and implement an Android based emotion detection system, TapSense, which can identify four different emotion states (happy, sad, stressed, relaxed) with an average accuracy (AUCROC) of 73% (maximum of 94%) based on typing features only. The analysis also reveals that among different features, typing speed is the most discriminative one.",
"fno": "08273592",
"keywords": [
"Android Operating System",
"Emotion Recognition",
"Face Recognition",
"Smart Phones",
"User Interfaces",
"Smartphone Typing",
"User Emotion",
"Affective Computing",
"Facial Expressions",
"Physiological Properties",
"Keystroke Analysis",
"Desktop Computer Keyboards",
"Android Based Emotion Detection System",
"Smartphone Usage Patterns",
"Feature Extraction",
"Keyboards",
"Probes",
"Servers",
"Affective Computing",
"Physiology",
"Reliability"
],
"authors": [
{
"affiliation": "Department of Computer Science and Engineering, Indian Institute of Technology Kharagpur, INDIA 721302",
"fullName": "Surjya Ghosh",
"givenName": "Surjya",
"surname": "Ghosh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Indian Institute of Technology Kharagpur, INDIA 721302",
"fullName": "Niloy Ganguly",
"givenName": "Niloy",
"surname": "Ganguly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Indian Institute of Technology Kharagpur, INDIA 721302",
"fullName": "Bivas Mitra",
"givenName": "Bivas",
"surname": "Mitra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Sciences, Georgia Southern University, USA",
"fullName": "Pradipta De",
"givenName": "Pradipta",
"surname": "De",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "146-151",
"year": "2017",
"issn": "2156-8111",
"isbn": "978-1-5386-0563-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08273591",
"articleId": "12OmNyk2ZWN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08273593",
"articleId": "12OmNrNh0KC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/compsacw/2014/3578/0/3578a258",
"title": "Physiological Mouse: Towards an Emotion-Aware Mouse",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2014/3578a258/12OmNAoDicJ",
"parentPublication": {
"id": "proceedings/compsacw/2014/3578/0",
"title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2009/3895/0/3895a699",
"title": "Joint Emotion-Topic Modeling for Social Affective Text Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2009/3895a699/12OmNqBbHEo",
"parentPublication": {
"id": "proceedings/icdm/2009/3895/0",
"title": "2009 Ninth IEEE International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2010/4198/2/4198b225",
"title": "Real-Time Emotion Assessment Method Based on Physiological Signals",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2010/4198b225/12OmNwNOaMn",
"parentPublication": {
"id": "proceedings/iscid/2010/4198/2",
"title": "Computational Intelligence and Design, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2015/02/07029058",
"title": "Predicting Mood from Punctual Emotion Annotations on Videos",
"doi": null,
"abstractUrl": "/journal/ta/2015/02/07029058/13rRUB7a1e8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/02/07736040",
"title": "ASCERTAIN: Emotion and Personality Recognition Using Commercial Sensors",
"doi": null,
"abstractUrl": "/journal/ta/2018/02/07736040/13rRUyY2938",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2022/5908/0/09953819",
"title": "ALOE: Active Learning based Opportunistic Experience Sampling for Smartphone Keyboard driven Emotion Self-report Collection",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2022/09953819/1IAJWCF565a",
"parentPublication": {
"id": "proceedings/acii/2022/5908/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10085996",
"title": "Interactive Machine Learning for Multimodal Affective Computing",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10085996/1M666hWOLMQ",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a151",
"title": "Context-Aware Affective Graph Reasoning for Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a151/1cdOIiP0oMg",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925518",
"title": "Representation Learning for Emotion Recognition from Smartphone Keyboard Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925518/1fHGEFEjZOE",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09470976",
"title": "Multimodal Affective States Recognition Based on Multiscale CNNs and Biologically Inspired Decision Fusion Model",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09470976/1uSOwMJI2TS",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJf9WRhN84",
"doi": "10.1109/VRW55335.2022.00198",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"normalizedTitle": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"abstract": "Mid-air text entry on virtual keyboards suffers from the lack of tactile feedback, bringing challenges to both tap detection and input prediction. In this poster, we demonstrated the feasibility of efficient single-finger typing in mid-air through probabilistic touch modeling. We first collected users' typing data on different sizes of virtual keyboards. Based on analyzing the data, we derived an input prediction algorithm that incorporated probabilistic touch detection and elastic probabilistic decoding. In the evaluation study where the participants performed real text entry tasks with this technique, they reached a pick-up single-finger typing speed of 24.0 WPM with 2.8% word-level error rate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mid-air text entry on virtual keyboards suffers from the lack of tactile feedback, bringing challenges to both tap detection and input prediction. In this poster, we demonstrated the feasibility of efficient single-finger typing in mid-air through probabilistic touch modeling. We first collected users' typing data on different sizes of virtual keyboards. Based on analyzing the data, we derived an input prediction algorithm that incorporated probabilistic touch detection and elastic probabilistic decoding. In the evaluation study where the participants performed real text entry tasks with this technique, they reached a pick-up single-finger typing speed of 24.0 WPM with 2.8% word-level error rate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mid-air text entry on virtual keyboards suffers from the lack of tactile feedback, bringing challenges to both tap detection and input prediction. In this poster, we demonstrated the feasibility of efficient single-finger typing in mid-air through probabilistic touch modeling. We first collected users' typing data on different sizes of virtual keyboards. Based on analyzing the data, we derived an input prediction algorithm that incorporated probabilistic touch detection and elastic probabilistic decoding. In the evaluation study where the participants performed real text entry tasks with this technique, they reached a pick-up single-finger typing speed of 24.0 WPM with 2.8% word-level error rate.",
"fno": "840200a694",
"keywords": [
"Human Computer Interaction",
"Keyboards",
"Touch Sensitive Screens",
"Elastic Probabilistic Decoding",
"Text Entry Tasks",
"Pick Up Single Finger Typing Speed",
"2 8 Word Level Error Rate",
"Single Finger Mid Air Typing",
"Probabilistic Touch Modeling",
"Mid Air Text Entry",
"Virtual Keyboards Suffers",
"Tactile Feedback",
"Tap Detection",
"Efficient Single Finger Typing",
"Users",
"Input Prediction Algorithm",
"Probabilistic Touch Detection",
"Solid Modeling",
"Three Dimensional Displays",
"Conferences",
"Keyboards",
"Tactile Sensors",
"Virtual Reality",
"User Interfaces",
"Human Centered Computing X 2015 Human Computer Interaction HCI X 2015 Interaction Techniques X 2015 Text Input",
"Human Centered Computing X 2015 Human Computer Interaction HCI X 2015 Interaction Paradigms X 2015 Virtual Reality"
],
"authors": [
{
"affiliation": "Tsinghua University",
"fullName": "Xin Yi",
"givenName": "Xin",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Chen Liang",
"givenName": "Chen",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Haozhan Chen",
"givenName": "Haozhan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,Santa Barbara",
"fullName": "Jiuxu Song",
"givenName": "Jiuxu",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Chun Yu",
"givenName": "Chun",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Yuanchun Shil",
"givenName": "Yuanchun",
"surname": "Shil",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "694-695",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a692",
"articleId": "1CJd8TdT6hi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a696",
"articleId": "1CJeXaYYtd6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ictai/2016/4459/0/4459a165",
"title": "Modifying Keyboard Layout to Reduce Finger-Travel Distance",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2016/4459a165/12OmNvwC5up",
"parentPublication": {
"id": "proceedings/ictai/2016/4459/0",
"title": "2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a787",
"title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a787/12OmNx1IwaL",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917636",
"title": "Preventing shoulder surfing using randomized augmented reality keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917636/19wAJpRnCE0",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794572",
"title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523889",
"title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523889/1wpqwAIMiRy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a393",
"title": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a393/1yeCVRK9bri",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a510",
"title": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a510/1yeQWHyOQes",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJfr9wrq1i",
"doi": "10.1109/VRW55335.2022.00189",
"title": "AiRType: An Air-tapping Keyboard for Augmented Reality Environments",
"normalizedTitle": "AiRType: An Air-tapping Keyboard for Augmented Reality Environments",
"abstract": "We present AiRType for AR/VR HMDs that enables text entry through bare hands for more natural perception. The hand models in the virtual environment mirror hand movements of the user and user targets and selects the keys via hand models. AiRType fully leverages the additional dimension without restraining the interaction space by users' arm lengths. It can be attached to anywhere and can be scaled freely. We evaluated and compared AiRType with the baseline-the built-in keyboard of Magic Leap 1. AiRType shows 27% decrease in the error rate, 3.3% increase in character-per-second, and 9.4% increase in user satisfaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present AiRType for AR/VR HMDs that enables text entry through bare hands for more natural perception. The hand models in the virtual environment mirror hand movements of the user and user targets and selects the keys via hand models. AiRType fully leverages the additional dimension without restraining the interaction space by users' arm lengths. It can be attached to anywhere and can be scaled freely. We evaluated and compared AiRType with the baseline-the built-in keyboard of Magic Leap 1. AiRType shows 27% decrease in the error rate, 3.3% increase in character-per-second, and 9.4% increase in user satisfaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present AiRType for AR/VR HMDs that enables text entry through bare hands for more natural perception. The hand models in the virtual environment mirror hand movements of the user and user targets and selects the keys via hand models. AiRType fully leverages the additional dimension without restraining the interaction space by users' arm lengths. It can be attached to anywhere and can be scaled freely. We evaluated and compared AiRType with the baseline-the built-in keyboard of Magic Leap 1. AiRType shows 27% decrease in the error rate, 3.3% increase in character-per-second, and 9.4% increase in user satisfaction.",
"fno": "840200a676",
"keywords": [
"Augmented Reality",
"Keyboards",
"Text Analysis",
"User Interfaces",
"Virtual Reality",
"Ai R Type",
"Air Tapping Keyboard",
"Augmented Reality Environments",
"Text Entry",
"Bare Hands",
"Natural Perception",
"Hand Models",
"Virtual Environment",
"Hand Movements",
"User Targets",
"Users",
"User Satisfaction",
"Solid Modeling",
"Three Dimensional Displays",
"Conferences",
"Atmospheric Modeling",
"Keyboards",
"Virtual Environments",
"Switches",
"Human Centered Computing Text Input",
"Human Centered Computing",
"Empirical Studies In Interaction Design Human",
"Centered Computing",
"Ubiquitous And Mobile Devices"
],
"authors": [
{
"affiliation": "University of Central Florida",
"fullName": "Necip Fazıl Yıldıran",
"givenName": "Necip Fazıl",
"surname": "Yıldıran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Ülkü Meteriz-Yildiran",
"givenName": "Ülkü",
"surname": "Meteriz-Yildiran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "David Mohaisen",
"givenName": "David",
"surname": "Mohaisen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "676-677",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a674",
"articleId": "1CJdE39vY9G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a678",
"articleId": "1CJdEu0Syic",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446138",
"title": "Guiding People in Complex Indoor Environments Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446138/13bd1fWcuDq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a765",
"title": "A Keylogging Inference Attack on Air-Tapping Keyboards in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a765/1CJcxOuPom4",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a447",
"title": "User Retention of Mobile Augmented Reality for Cultural Heritage Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a447/1J7Wl7wOJ68",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a702",
"title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/10042022",
"title": "Green Edge Intelligence Scheme for Mobile Keyboard Emoji Prediction",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/10042022/1KEtgm6tEpq",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2019/9148/0/08767420",
"title": "HIBEY: Hide the Keyboard in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm",
"parentPublication": {
"id": "proceedings/percom/2019/9148/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382836",
"title": "Text Entry in Virtual Environments using Speech and a Midair Keyboard",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382836/1saZrgazKz6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523889",
"title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523889/1wpqwAIMiRy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrR2KZbVXq",
"doi": "10.1109/ISMAR55827.2022.00014",
"title": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality",
"normalizedTitle": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality",
"abstract": "For touch typists, using a physical keyboard ensures optimal text entry task performance in immersive virtual environments. However, successful typing depends on the user’s ability to accurately position their hands on the keyboard after performing other, non-keyboard tasks. Finding the correct hand position depends on sensory feedback, including visual information. We designed and conducted a user study where we investigated the impact of visual representations of the keyboard and users’ hands on the time required to place hands on the homing bars of a keyboard after performing other tasks. We found that this keyboard homing time decreased as the fidelity of visual representations of the keyboard and hands increased, with a video pass-through condition providing the best performance. We discuss additional impacts of visual representations of a user’s hands and the keyboard on typing performance and user experience in virtual reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For touch typists, using a physical keyboard ensures optimal text entry task performance in immersive virtual environments. However, successful typing depends on the user’s ability to accurately position their hands on the keyboard after performing other, non-keyboard tasks. Finding the correct hand position depends on sensory feedback, including visual information. We designed and conducted a user study where we investigated the impact of visual representations of the keyboard and users’ hands on the time required to place hands on the homing bars of a keyboard after performing other tasks. We found that this keyboard homing time decreased as the fidelity of visual representations of the keyboard and hands increased, with a video pass-through condition providing the best performance. We discuss additional impacts of visual representations of a user’s hands and the keyboard on typing performance and user experience in virtual reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For touch typists, using a physical keyboard ensures optimal text entry task performance in immersive virtual environments. However, successful typing depends on the user’s ability to accurately position their hands on the keyboard after performing other, non-keyboard tasks. Finding the correct hand position depends on sensory feedback, including visual information. We designed and conducted a user study where we investigated the impact of visual representations of the keyboard and users’ hands on the time required to place hands on the homing bars of a keyboard after performing other tasks. We found that this keyboard homing time decreased as the fidelity of visual representations of the keyboard and hands increased, with a video pass-through condition providing the best performance. We discuss additional impacts of visual representations of a user’s hands and the keyboard on typing performance and user experience in virtual reality.",
"fno": "532500a008",
"keywords": [
"Ergonomics",
"Keyboards",
"Mobile Handsets",
"User Interfaces",
"Virtual Reality",
"Correct Hand Position",
"Immersive Virtual Environments",
"Intermittent Typing",
"Keyboard Homing Time",
"Nonkeyboard Tasks",
"Optimal Text Entry Task Performance",
"Physical Keyboard",
"Successful Typing",
"Typing Performance",
"User Experience",
"Users",
"Virtual Reality",
"Visual Information",
"Visual Representations",
"Performance Evaluation",
"Visualization",
"Keyboards",
"Virtual Environments",
"User Experience",
"Task Analysis",
"Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Keyboards Human Centered Computing",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Virginia Tech,Center for Human-Computer Interaction & Department of Computer Science,Blacksburg,VA,USA",
"fullName": "Alexander Giovannelli",
"givenName": "Alexander",
"surname": "Giovannelli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Center for Human-Computer Interaction & Department of Computer Science,Blacksburg,VA,USA",
"fullName": "Lee Lisle",
"givenName": "Lee",
"surname": "Lisle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Center for Human-Computer Interaction & Department of Computer Science,Blacksburg,VA,USA",
"fullName": "Doug A. Bowman",
"givenName": "Doug A.",
"surname": "Bowman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "8-17",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1JrR2zd7Dpu",
"name": "pismar202253250-09994946s1-mm_532500a008.zip",
"size": "142 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09994946s1-mm_532500a008.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "532500a001",
"articleId": "1JrRdMd6OZi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a018",
"articleId": "1JrQSj43WV2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2017/1710/0/1710a787",
"title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a787/12OmNx1IwaL",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699306",
"title": "HiKeyb: High-Efficiency Mixed Reality System for Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699306/19F1UXTzDos",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2017/4338/0/07917636",
"title": "Preventing shoulder surfing using randomized augmented reality keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2017/07917636/19wAJpRnCE0",
"parentPublication": {
"id": "proceedings/percom-workshops/2017/4338/0",
"title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049695",
"title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798238",
"title": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090425",
"title": "Using Augmented Reality to Assist Seated Office Workers’ Data Entry Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090425/1jIxnbCAMxy",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09430933",
"title": "Exploratory Analysis of Nose-gesture for Smartphone Aided Typing for Users with Clinical Conditions",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09430933/1tROMoFWkVy",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0Qw94bi8",
"doi": "10.1109/VR.2019.8798238",
"title": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"normalizedTitle": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"abstract": "In this work, we were interested in using smartphone touchscreen keyboard for text typing in virtual environments (VEs) with head-mounted display. We carried out an experiment comparing the smartphone to the ordinary devices: gamepad and HTC Vive Controllers. We represented the touchscreen keyboard in the VE with a virtual interface and the fingertips with tracked green circles. A confirm-on-release paradigm was employed for text typing. Results showed that the smartphone did not fully outperformed the other devices. However, unlike the other devices, smartphones users tended to correct progressively their error while typing thanks to their familiarity with the device.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we were interested in using smartphone touchscreen keyboard for text typing in virtual environments (VEs) with head-mounted display. We carried out an experiment comparing the smartphone to the ordinary devices: gamepad and HTC Vive Controllers. We represented the touchscreen keyboard in the VE with a virtual interface and the fingertips with tracked green circles. A confirm-on-release paradigm was employed for text typing. Results showed that the smartphone did not fully outperformed the other devices. However, unlike the other devices, smartphones users tended to correct progressively their error while typing thanks to their familiarity with the device.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we were interested in using smartphone touchscreen keyboard for text typing in virtual environments (VEs) with head-mounted display. We carried out an experiment comparing the smartphone to the ordinary devices: gamepad and HTC Vive Controllers. We represented the touchscreen keyboard in the VE with a virtual interface and the fingertips with tracked green circles. A confirm-on-release paradigm was employed for text typing. Results showed that the smartphone did not fully outperformed the other devices. However, unlike the other devices, smartphones users tended to correct progressively their error while typing thanks to their familiarity with the device.",
"fno": "08798238",
"keywords": [
"Helmet Mounted Displays",
"Keyboards",
"Smart Phones",
"Text Analysis",
"Touch Sensitive Screens",
"User Interfaces",
"Virtual Reality",
"Text Typing",
"Smartphone Touchscreen Keyboard",
"Virtual Environments",
"Head Mounted Display",
"HTC Vive Controllers",
"Virtual Interface",
"VR",
"HMD",
"Gamepad",
"Smart Phones",
"Keyboards",
"Error Analysis",
"Resists",
"Virtual Reality",
"Prototypes",
"Writing",
"Human Centered Computing X 2014 Virtual Reality X 2014 Touch Screens",
"Human Centered Computing X 2014 User Interface Design"
],
"authors": [
{
"affiliation": "University of Toronto, Canada",
"fullName": "Sabah Boustila",
"givenName": "Sabah",
"surname": "Boustila",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tohoku University, Japan",
"fullName": "Thomas Guégan",
"givenName": "Thomas",
"surname": "Guégan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto, Canada",
"fullName": "Kazuki Takashima",
"givenName": "Kazuki",
"surname": "Takashima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto, Canada",
"fullName": "Yoshifumi Kitamura",
"givenName": "Yoshifumi",
"surname": "Kitamura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "860-861",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798214",
"articleId": "1cJ0QSLRO6Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797980",
"articleId": "1cJ14PgErfi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2015/7457/0/07357217",
"title": "A syntax-directed keyboard extension for writing source code on touchscreen devices",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2015/07357217/12OmNB0Fxid",
"parentPublication": {
"id": "proceedings/vlhcc/2015/7457/0",
"title": "2015 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2016/8845/2/8845b608",
"title": "BrailleÉcran: A Braille Approach to Text Entry on Smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2016/8845b608/12OmNBeRtMR",
"parentPublication": {
"id": "proceedings/compsac/2016/8845/1",
"title": "2016 IEEE 40th Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipccc/2016/5252/0/07820656",
"title": "User authentication and identification on smartphones by incorporating capacitive touchscreen",
"doi": null,
"abstractUrl": "/proceedings-article/ipccc/2016/07820656/12OmNscxj5u",
"parentPublication": {
"id": "proceedings/ipccc/2016/5252/0",
"title": "2016 IEEE 35th International Performance Computing and Communications Conference (IPCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2018/4353/0/435301a144",
"title": "EyeTell: Video-Assisted Touchscreen Keystroke Inference from Eye Movements",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2018/435301a144/12OmNzC5SIa",
"parentPublication": {
"id": "proceedings/sp/2018/4353/0",
"title": "2018 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a008",
"title": "Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a008/1JrR2KZbVXq",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2019/0409/0/040900a539",
"title": "Continuous Transparent Mobile Device Touchscreen Soft Keyboard Biometric Authentication",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2019/040900a539/1a3wT0UUlG0",
"parentPublication": {
"id": "proceedings/vlsid/2019/0409/0",
"title": "2019 32nd International Conference on VLSI Design and 2019 18th International Conference on Embedded Systems (VLSID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2020/9325/0/09232570",
"title": "Touchless Typing Using Head Movement-based Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2020/09232570/1o56Avh0Bhu",
"parentPublication": {
"id": "proceedings/bigmm/2020/9325/0",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsecs-icocsim/2021/1407/0/140700a394",
"title": "Tree-based Ensemble Learning for Stress Detection by Typing Behavior on Smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/icsecs-icocsim/2021/140700a394/1wYlzlFR1D2",
"parentPublication": {
"id": "proceedings/icsecs-icocsim/2021/1407/0",
"title": "2021 International Conference on Software Engineering & Computer Systems and 4th International Conference on Computational Science and Information Management (ICSECS-ICOCSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1o56xuliEpi",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"acronym": "bigmm",
"groupId": "1808144",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1o56Avh0Bhu",
"doi": "10.1109/BigMM50055.2020.00025",
"title": "Touchless Typing Using Head Movement-based Gestures",
"normalizedTitle": "Touchless Typing Using Head Movement-based Gestures",
"abstract": "In this paper, we propose a novel touchless typing interface that makes use of an on-screen QWERTY keyboard and a smartphone camera. The keyboard was divided into nine color-coded clusters. The user moved their head toward clusters, which contained the letters that they wanted to type. A front-facing smartphone camera recorded the head movements. A bidirectional GRU based model which used pre-trained embedding rich in head pose features was employed to translate the recordings into cluster sequences. The model achieved an accuracy of 96.78% and 86.81% under intra- and inter-user scenarios, respectively, over a dataset of 2234 video sequences collected from 22 users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a novel touchless typing interface that makes use of an on-screen QWERTY keyboard and a smartphone camera. The keyboard was divided into nine color-coded clusters. The user moved their head toward clusters, which contained the letters that they wanted to type. A front-facing smartphone camera recorded the head movements. A bidirectional GRU based model which used pre-trained embedding rich in head pose features was employed to translate the recordings into cluster sequences. The model achieved an accuracy of 96.78% and 86.81% under intra- and inter-user scenarios, respectively, over a dataset of 2234 video sequences collected from 22 users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a novel touchless typing interface that makes use of an on-screen QWERTY keyboard and a smartphone camera. The keyboard was divided into nine color-coded clusters. The user moved their head toward clusters, which contained the letters that they wanted to type. A front-facing smartphone camera recorded the head movements. A bidirectional GRU based model which used pre-trained embedding rich in head pose features was employed to translate the recordings into cluster sequences. The model achieved an accuracy of 96.78% and 86.81% under intra- and inter-user scenarios, respectively, over a dataset of 2234 video sequences collected from 22 users.",
"fno": "09232570",
"keywords": [
"Cameras",
"Feature Extraction",
"Gesture Recognition",
"Human Computer Interaction",
"Image Motion Analysis",
"Image Sequences",
"Keyboards",
"Pose Estimation",
"Smart Phones",
"Video Signal Processing",
"Touchless Typing Interface",
"On Screen QWERTY Keyboard",
"Smartphone Camera",
"Color Coded Clusters",
"Head Movements",
"Bidirectional GRU Based Model",
"Cluster Sequences",
"Interuser Scenarios",
"Video Sequences",
"Keyboards",
"Cameras",
"Magnetic Heads",
"Tracking",
"Speech Recognition",
"Solid Modeling",
"Portable Computers",
"Touchless Typing",
"Contactless Typing",
"Gesture Recognition",
"Head Movement",
"Deep Learning",
"And Accessibility"
],
"authors": [
{
"affiliation": "Delhi Technological University,India",
"fullName": "Shivam Rustagi",
"givenName": "Shivam",
"surname": "Rustagi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Delhi Technological University,India",
"fullName": "Aakash Garg",
"givenName": "Aakash",
"surname": "Garg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indraprastha Institute of Information Technology Delhi,India",
"fullName": "Pranay Raj Anand",
"givenName": "Pranay Raj",
"surname": "Anand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Haverford College,USA",
"fullName": "Rajesh Kumar",
"givenName": "Rajesh",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indraprastha Institute of Information Technology Delhi,India",
"fullName": "Yaman Kumar",
"givenName": "Yaman",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indraprastha Institute of Information Technology Delhi,India",
"fullName": "Rajiv Ratn Shah",
"givenName": "Rajiv Ratn",
"surname": "Shah",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bigmm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "112-119",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9325-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09232539",
"articleId": "1o56yrUvzMI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09232486",
"articleId": "1o56Byj8zXW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2015/9711/0/5720a452",
"title": "Evaluating Real-Time Mirroring of Head Gestures Using Smart Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a452/12OmNBr4esM",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a400",
"title": "Recognizing Visual Signatures of Spontaneous Head Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a400/12OmNwF0BJx",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460207",
"title": "Optical flow based Head Movement and Gesture Analyzer (OHMeGA)",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460207/12OmNz2kqlG",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a888",
"title": "Flick Typing: Toward A New XR Text Input System Based on 3D Gestures and Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a888/1CJe2bLhGbC",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2019/0089/0/08756509",
"title": "Automated Measurement of Head Movement Synchrony during Dyadic Depression Severity Interviews",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2019/08756509/1bzYxlcrzkk",
"parentPublication": {
"id": "proceedings/fg/2019/0089/0",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798238",
"title": "Text Typing in VR Using Smartphones Touchscreen and HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798238/1cJ0Qw94bi8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2020/11/08798759",
"title": "Your Eyes Reveal Your Secrets: An Eye Movement Based Password Inference on Smartphone",
"doi": null,
"abstractUrl": "/journal/tm/2020/11/08798759/1cumSK1WLKw",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a327",
"title": "ReenactNet: Real-time Full Head Reenactment",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a327/1kecI9LUOWs",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeCVRK9bri",
"doi": "10.1109/ISMAR52148.2021.00056",
"title": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing",
"normalizedTitle": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing",
"abstract": "The eventual success of many AR and VR intelligent interactive systems relies on the ability to collect user motion data at large scale. Realistic simulation of human motion trajectories is a potential solution to this problem. Simulated user motion data can facilitate prototyping and speed up the design process. There are also potential benefits in augmenting training data for deep learning-based AR/VR applications to improve performance. However, the generation of realistic motion data is nontrivial. In this paper, we examine the specific challenge of simulating index finger movement data to inform mid-air gesture keyboard design. The mid-air gesture keyboard is deployed on an optical see-through display that allows the user to enter text by articulating word gesture patterns with their physical index finger in the vicinity of a visualized keyboard layout. We propose and compare four different approaches to simulating this type of motion data, including a Jerk-Minimization model, a Recurrent Neural Network (RNN)-based generative model, and a Generative Adversarial Network (GAN)-based model with two modes: style transfer and data alteration. We also introduce a procedure for validating the quality of the generated trajectories in terms of realism and diversity. The GAN-based model shows significant potential for generating synthetic motion trajectories to facilitate design and deep learning for advanced gesture keyboards deployed in AR and VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The eventual success of many AR and VR intelligent interactive systems relies on the ability to collect user motion data at large scale. Realistic simulation of human motion trajectories is a potential solution to this problem. Simulated user motion data can facilitate prototyping and speed up the design process. There are also potential benefits in augmenting training data for deep learning-based AR/VR applications to improve performance. However, the generation of realistic motion data is nontrivial. In this paper, we examine the specific challenge of simulating index finger movement data to inform mid-air gesture keyboard design. The mid-air gesture keyboard is deployed on an optical see-through display that allows the user to enter text by articulating word gesture patterns with their physical index finger in the vicinity of a visualized keyboard layout. We propose and compare four different approaches to simulating this type of motion data, including a Jerk-Minimization model, a Recurrent Neural Network (RNN)-based generative model, and a Generative Adversarial Network (GAN)-based model with two modes: style transfer and data alteration. We also introduce a procedure for validating the quality of the generated trajectories in terms of realism and diversity. The GAN-based model shows significant potential for generating synthetic motion trajectories to facilitate design and deep learning for advanced gesture keyboards deployed in AR and VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The eventual success of many AR and VR intelligent interactive systems relies on the ability to collect user motion data at large scale. Realistic simulation of human motion trajectories is a potential solution to this problem. Simulated user motion data can facilitate prototyping and speed up the design process. There are also potential benefits in augmenting training data for deep learning-based AR/VR applications to improve performance. However, the generation of realistic motion data is nontrivial. In this paper, we examine the specific challenge of simulating index finger movement data to inform mid-air gesture keyboard design. The mid-air gesture keyboard is deployed on an optical see-through display that allows the user to enter text by articulating word gesture patterns with their physical index finger in the vicinity of a visualized keyboard layout. We propose and compare four different approaches to simulating this type of motion data, including a Jerk-Minimization model, a Recurrent Neural Network (RNN)-based generative model, and a Generative Adversarial Network (GAN)-based model with two modes: style transfer and data alteration. We also introduce a procedure for validating the quality of the generated trajectories in terms of realism and diversity. The GAN-based model shows significant potential for generating synthetic motion trajectories to facilitate design and deep learning for advanced gesture keyboards deployed in AR and VR.",
"fno": "015800a393",
"keywords": [
"Computer Animation",
"Gesture Recognition",
"Human Computer Interaction",
"Interactive Systems",
"Keyboards",
"Learning Artificial Intelligence",
"Recurrent Neural Nets",
"Virtual Reality",
"Jerk Minimization Model",
"Recurrent Neural Network Based Generative Model",
"Generative Adversarial Network Based Model",
"Data Alteration",
"Generated Trajectories",
"GAN Based Model",
"Synthetic Motion Trajectories",
"Advanced Gesture Keyboards",
"Realistic Human Motion Trajectories",
"Eventual Success",
"VR Intelligent Interactive Systems",
"Realistic Simulation",
"Simulated User Motion Data",
"Design Process",
"Augmenting Training Data",
"Realistic Motion Data",
"Index Finger Movement Data",
"Mid Air Gesture Keyboard Design",
"Word Gesture Patterns",
"Physical Index Finger",
"Visualized Keyboard Layout",
"Training",
"Solid Modeling",
"Adaptation Models",
"Layout",
"Fingers",
"Keyboards",
"Training Data",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Techniques",
"Text Input"
],
"authors": [
{
"affiliation": "University of Cambridge",
"fullName": "Junxiao Shen",
"givenName": "Junxiao",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Cambridge",
"fullName": "John Dudley",
"givenName": "John",
"surname": "Dudley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Cambridge",
"fullName": "Per Ola Kristensson",
"givenName": "Per Ola",
"surname": "Kristensson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "393-402",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "015800a386",
"articleId": "1yeD59qEabK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a403",
"articleId": "1yeD8DDATSw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2013/5009/0/5009a439",
"title": "VECAR: Virtual English Classroom with Markerless Augmented Reality and Intuitive Gesture Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a439/12OmNwJPMVw",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a787",
"title": "Analyzing the Impact of Cognitive Load in Evaluating Gaze-Based Typing",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a787/12OmNx1IwaL",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2018/4235/0/08506501",
"title": "The design and evaluation of a gestural keyboard for entering programming code on mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2018/08506501/17D45VUZMWc",
"parentPublication": {
"id": "proceedings/vlhcc/2018/4235/0",
"title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a888",
"title": "Flick Typing: Toward A New XR Text Input System Based on 3D Gestures and Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a888/1CJe2bLhGbC",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a702",
"title": "Personalization of a Mid-Air Gesture Keyboard using Multi-Objective Bayesian Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a702/1JrQW09ujvi",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2019/9148/0/08767420",
"title": "HIBEY: Hide the Keyboard in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm",
"parentPublication": {
"id": "proceedings/percom/2019/9148/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2020/4716/0/09156084",
"title": "MyoKey: Surface Electromyography and Inertial Motion Sensing-based Text Entry in AR",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2020/09156084/1m1jB6aqf3W",
"parentPublication": {
"id": "proceedings/percom-workshops/2020/4716/0",
"title": "2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2020/9325/0/09232570",
"title": "Touchless Typing Using Head Movement-based Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2020/09232570/1o56Avh0Bhu",
"parentPublication": {
"id": "proceedings/bigmm/2020/9325/0",
"title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523889",
"title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523889/1wpqwAIMiRy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a510",
"title": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a510/1yeQWHyOQes",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQWHyOQes",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00124",
"title": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing",
"normalizedTitle": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing",
"abstract": "Augmented reality headsets have great potential to transform the modern workplace as the technology improves. However, a major obstacle in bringing AR headsets into workplaces is the need for a precise, virtual, mid-air typing solution. Transitioning from physical to virtual keyboards is difficult due to loss of many physical affordances, such as the ability to tell between touching and pressing a key. We present our system, MusiKeys, as an investigation into the effects of presenting a user with auditory tones and effects as replacements for every kind of feedback that could ordinarily be perceived through touching a keyboard.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality headsets have great potential to transform the modern workplace as the technology improves. However, a major obstacle in bringing AR headsets into workplaces is the need for a precise, virtual, mid-air typing solution. Transitioning from physical to virtual keyboards is difficult due to loss of many physical affordances, such as the ability to tell between touching and pressing a key. We present our system, MusiKeys, as an investigation into the effects of presenting a user with auditory tones and effects as replacements for every kind of feedback that could ordinarily be perceived through touching a keyboard.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality headsets have great potential to transform the modern workplace as the technology improves. However, a major obstacle in bringing AR headsets into workplaces is the need for a precise, virtual, mid-air typing solution. Transitioning from physical to virtual keyboards is difficult due to loss of many physical affordances, such as the ability to tell between touching and pressing a key. We present our system, MusiKeys, as an investigation into the effects of presenting a user with auditory tones and effects as replacements for every kind of feedback that could ordinarily be perceived through touching a keyboard.",
"fno": "129800a510",
"keywords": [
"Augmented Reality",
"Feedback",
"Keyboards",
"AR Headsets",
"Workplaces",
"Mid Air Typing Solution",
"Virtual Keyboards",
"Musi Keys",
"Auditory Tones",
"Augmented Reality Headsets",
"Auditory Physical Feedback Replacement",
"Headphones",
"Affordances",
"Employment",
"Keyboards",
"Music",
"Transforms",
"Pressing",
"Human Centered Computing",
"Virtual Augmented Reality",
"User Interface Design"
],
"authors": [
{
"affiliation": "Virginia Tech",
"fullName": "Alexander Krasner",
"givenName": "Alexander",
"surname": "Krasner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Grimley Widgets, Inc.",
"fullName": "Joseph L. Gabbard",
"givenName": "Joseph L.",
"surname": "Gabbard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Nottingham",
"fullName": "Gary Burnett",
"givenName": "Gary",
"surname": "Burnett",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "510-512",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeQW0LVMzu",
"name": "pismar-adjunct202112980-09585813s1-mm_129800a510.zip",
"size": "130 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar-adjunct202112980-09585813s1-mm_129800a510.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "129800a508",
"articleId": "1yeQVyeG0Pm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a513",
"articleId": "1yeQTODhIJ2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090466",
"title": "Auditory Stimulation on Touching a Virtual Object Outside a user’s Field of View",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090466/1jIxjlFlp0k",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523889",
"title": "Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523889/1wpqwAIMiRy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a393",
"title": "Simulating Realistic Human Motion Trajectories of Mid-Air Gesture Typing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a393/1yeCVRK9bri",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYoKmw",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAtK4hi",
"doi": "10.1109/ISMAR.2013.6671780",
"title": "Image-guided simulation of heterogeneous tissue deformation for augmented reality during hepatic surgery",
"normalizedTitle": "Image-guided simulation of heterogeneous tissue deformation for augmented reality during hepatic surgery",
"abstract": "This paper presents a method for real-time augmentation of vascular network and tumors during minimally invasive liver surgery. Internal structures computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to state-of-the-art methods, our method uses a real-time biomechanical model to compute a volumetric displacement field from partial three-dimensional liver surface motion. This permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Real-time augmentation results are presented on in vivo and phantom data and illustrate the benefits of such an approach for minimally invasive surgery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a method for real-time augmentation of vascular network and tumors during minimally invasive liver surgery. Internal structures computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to state-of-the-art methods, our method uses a real-time biomechanical model to compute a volumetric displacement field from partial three-dimensional liver surface motion. This permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Real-time augmentation results are presented on in vivo and phantom data and illustrate the benefits of such an approach for minimally invasive surgery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a method for real-time augmentation of vascular network and tumors during minimally invasive liver surgery. Internal structures computed from pre-operative CT scans can be overlaid onto the laparoscopic view for surgery guidance. Compared to state-of-the-art methods, our method uses a real-time biomechanical model to compute a volumetric displacement field from partial three-dimensional liver surface motion. This permits to properly handle the motion of internal structures even in the case of anisotropic or heterogeneous tissues, as it is the case for the liver and many anatomical structures. Real-time augmentation results are presented on in vivo and phantom data and illustrate the benefits of such an approach for minimally invasive surgery.",
"fno": "06671780",
"keywords": [
"Liver",
"Biological System Modeling",
"Computational Modeling",
"Three Dimensional Displays",
"Surgery",
"Deformable Models",
"Biomechanics"
],
"authors": [
{
"affiliation": "Shacra Team, Inria, France",
"fullName": "Nazim Haouchine",
"givenName": "Nazim",
"surname": "Haouchine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shacra Team, Inria, France",
"fullName": "Jeremie Dequidt",
"givenName": "Jeremie",
"surname": "Dequidt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IHU, Strasbourg, France",
"fullName": "Igor Peterlik",
"givenName": "Igor",
"surname": "Peterlik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magrit Team, Inria, France",
"fullName": "Erwan Kerrien",
"givenName": "Erwan",
"surname": "Kerrien",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magrit Team, Inria, France",
"fullName": "Marie-Odile Berger",
"givenName": "Marie-Odile",
"surname": "Berger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shacra Team, Inria, France",
"fullName": "Stephane Cotin",
"givenName": "Stephane",
"surname": "Cotin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "199-208",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2869-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06671779",
"articleId": "12OmNBrlPB1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06671781",
"articleId": "12OmNwNwzKj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciibms/2017/6664/0/08279688",
"title": "Verification of accuracy of knife tip position estimation in liver surgery support system",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2017/08279688/12OmNAk5HQk",
"parentPublication": {
"id": "proceedings/iciibms/2017/6664/0",
"title": "2017 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761741",
"title": "Real-time update of 3D deformable models for computer aided liver surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761741/12OmNrMHOmG",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icas/2008/3093/0/3093a260",
"title": "Automation of Manual Tasks for Minimally Invasive Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icas/2008/3093a260/12OmNrNh0BR",
"parentPublication": {
"id": "proceedings/icas/2008/3093/0",
"title": "2008 4th International Conference on Autonomic and Autonomous Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260039",
"title": "Microfabricated Instruments for Fetal Cardiac Surgery: Experiments on Haptic Tissue Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260039/12OmNvCRgkA",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a264",
"title": "The Simulation of Delineation and Splitting in Virtual Liver Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a264/12OmNyPQ4SW",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/segah/2014/4823/0/07067098",
"title": "Spatially aware mobile interface for 3D visualization and interactive surgery planning",
"doi": null,
"abstractUrl": "/proceedings-article/segah/2014/07067098/12OmNzDNtuH",
"parentPublication": {
"id": "proceedings/segah/2014/4823/0",
"title": "2014 IEEE 3rd International Conference on Serious Games and Applications for Health (SeGAH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iacsit-sc/2009/3653/0/3653a475",
"title": "Modeling and Simulation of Graspers Force in Minimally Invasive Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/iacsit-sc/2009/3653a475/12OmNzt0IC5",
"parentPublication": {
"id": "proceedings/iacsit-sc/2009/3653/0",
"title": "Computer Science and Information Technology, International Association of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/06/mcg2006060036",
"title": "Liver Surgery Planning Using Virtual Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2006/06/mcg2006060036/13rRUx0xPCD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/06987340",
"title": "Impact of Soft Tissue Heterogeneity on Augmented Reality for Liver Surgery",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/06987340/13rRUyuegp8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d588",
"title": "Minimally Invasive Surgery for Sparse Neural Networks in Contrastive Manner",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d588/1yeJqvlKcfK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxHrylY",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"acronym": "cad-graphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC2fGtO",
"doi": "10.1109/CADGRAPHICS.2015.22",
"title": "Novel, Robust, and Efficient Guidewire Modeling for PCI Surgery Simulator Based on Heterogeneous and Integrated Chain-Mails",
"normalizedTitle": "Novel, Robust, and Efficient Guidewire Modeling for PCI Surgery Simulator Based on Heterogeneous and Integrated Chain-Mails",
"abstract": "Despite the long R&D history of interactive minimally-invasive surgery and therapy simulations, the guide wire/catheter behavior modeling remains challenging in Percutaneous Coronary Intervention (PCI) surgery simulators. This is primarily due to the heterogeneous heart physiological structures and complex intravascular inter-dynamic procedures. To ameliorate, this paper advocates a novel, robust, and efficient guide wire/catheter modeling method based on heterogeneous and integrated chain-mails, that can afford medical practitioners and trainees the unique opportunity to experience the entire guide wire-dominant PCI procedures in virtual environments as our model aims to mimic what occurs in clinical settings. Our approach's originality is primarily founded upon this new method's unconditional stability, real time performance, flexibility, and high-fidelity realism for guide wire/catheter simulation. Considering the front end of the guide wire has different stiffness with its conjunctive slender body and the guide wire length is adaptive to the surrounding environment, we propose to model the spatially-varying six degree of freedom behaviors by solely resorting to the generalized 3D chain-mails. Meanwhile, to effectively accommodate the motion constraints caused by the beating vessels and flowing blood, we integrate heterogeneous volumetric chain mails to streamline guide wire modeling and its interaction with surrounding substances. By dynamically coupling guide wire chain-mails with the surrounding media via virtual links, we are capable of efficiently simulating the collision-involved interdynamic behaviors of the guide wire. Finally, we showcase a PCI prototype simulator equipped with hap tic feedback for mimicing the guide wire intervention therapy, including pushing, pulling, and twisting operations, where the built-in high-fidelity, real-time efficiency, and stableness show great promise for its practical applications in clinical training and surgery rehearsal fields.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite the long R&D history of interactive minimally-invasive surgery and therapy simulations, the guide wire/catheter behavior modeling remains challenging in Percutaneous Coronary Intervention (PCI) surgery simulators. This is primarily due to the heterogeneous heart physiological structures and complex intravascular inter-dynamic procedures. To ameliorate, this paper advocates a novel, robust, and efficient guide wire/catheter modeling method based on heterogeneous and integrated chain-mails, that can afford medical practitioners and trainees the unique opportunity to experience the entire guide wire-dominant PCI procedures in virtual environments as our model aims to mimic what occurs in clinical settings. Our approach's originality is primarily founded upon this new method's unconditional stability, real time performance, flexibility, and high-fidelity realism for guide wire/catheter simulation. Considering the front end of the guide wire has different stiffness with its conjunctive slender body and the guide wire length is adaptive to the surrounding environment, we propose to model the spatially-varying six degree of freedom behaviors by solely resorting to the generalized 3D chain-mails. Meanwhile, to effectively accommodate the motion constraints caused by the beating vessels and flowing blood, we integrate heterogeneous volumetric chain mails to streamline guide wire modeling and its interaction with surrounding substances. By dynamically coupling guide wire chain-mails with the surrounding media via virtual links, we are capable of efficiently simulating the collision-involved interdynamic behaviors of the guide wire. Finally, we showcase a PCI prototype simulator equipped with hap tic feedback for mimicing the guide wire intervention therapy, including pushing, pulling, and twisting operations, where the built-in high-fidelity, real-time efficiency, and stableness show great promise for its practical applications in clinical training and surgery rehearsal fields.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite the long R&D history of interactive minimally-invasive surgery and therapy simulations, the guide wire/catheter behavior modeling remains challenging in Percutaneous Coronary Intervention (PCI) surgery simulators. This is primarily due to the heterogeneous heart physiological structures and complex intravascular inter-dynamic procedures. To ameliorate, this paper advocates a novel, robust, and efficient guide wire/catheter modeling method based on heterogeneous and integrated chain-mails, that can afford medical practitioners and trainees the unique opportunity to experience the entire guide wire-dominant PCI procedures in virtual environments as our model aims to mimic what occurs in clinical settings. Our approach's originality is primarily founded upon this new method's unconditional stability, real time performance, flexibility, and high-fidelity realism for guide wire/catheter simulation. Considering the front end of the guide wire has different stiffness with its conjunctive slender body and the guide wire length is adaptive to the surrounding environment, we propose to model the spatially-varying six degree of freedom behaviors by solely resorting to the generalized 3D chain-mails. Meanwhile, to effectively accommodate the motion constraints caused by the beating vessels and flowing blood, we integrate heterogeneous volumetric chain mails to streamline guide wire modeling and its interaction with surrounding substances. By dynamically coupling guide wire chain-mails with the surrounding media via virtual links, we are capable of efficiently simulating the collision-involved interdynamic behaviors of the guide wire. Finally, we showcase a PCI prototype simulator equipped with hap tic feedback for mimicing the guide wire intervention therapy, including pushing, pulling, and twisting operations, where the built-in high-fidelity, real-time efficiency, and stableness show great promise for its practical applications in clinical training and surgery rehearsal fields.",
"fno": "07450404",
"keywords": [
"Solid Modeling",
"Adaptation Models",
"Three Dimensional Displays",
"Blood",
"Couplings",
"Computational Modeling",
"Surgery",
"Haptic Feedback",
"Guidewire Simulation",
"Heterogeneous Chain Mails",
"Guidewire Vessel Interaction",
"Guidewire Blood Interaction",
"PCI Simulator"
],
"authors": [
{
"affiliation": null,
"fullName": "Weiwei Wang",
"givenName": "Weiwei",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuai Li",
"givenName": "Shuai",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hong Qin",
"givenName": "Hong",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Aimin Hao",
"givenName": "Aimin",
"surname": "Hao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cad-graphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "105-112",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8020-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07450403",
"articleId": "12OmNyqiaRP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07450405",
"articleId": "12OmNxGj9Mh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a443",
"title": "An Interactive 3D Preoperative Planning and Training System for Minimally Invasive Vascular Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a443/12OmNAfy7Id",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2015/6775/0/6775a310",
"title": "Region-Specific Automated Feedback in Temporal Bone Surgery Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2015/6775a310/12OmNCmGNS7",
"parentPublication": {
"id": "proceedings/cbms/2015/6775/0",
"title": "2015 IEEE 28th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1995/7084/0/70840101",
"title": "Intelligent assistance for intravascular tele-surgery and experiments on virtual simulator",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840101/12OmNvA1h9f",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082172",
"title": "Micro active guide wire catheter system",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082172/12OmNylboCg",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2014/03/mcg2014030012",
"title": "A Brain Surgery Simulator",
"doi": null,
"abstractUrl": "/magazine/cg/2014/03/mcg2014030012/13rRUIJcWnu",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/06/mcg2012060056",
"title": "A Catheterization-Training Simulator Based on a Fast Multigrid Solver",
"doi": null,
"abstractUrl": "/magazine/cg/2012/06/mcg2012060056/13rRUxDqSaU",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a247",
"title": "A Virtual Reality Based Simulator for Training Surgical Skills in Procedure of Catheter Ablation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a247/17D45WwsQ7x",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a354",
"title": "Kinetic Simulation of Cardiac Motion with Patient-Specific Coronary Artery Vessels Attached for PCI Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a354/1ap5BWOVCzm",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a269",
"title": "Vascular Intervention Training System Based on Electromagnetic Tracking Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a269/1vg85Xpt6Xm",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523834",
"title": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523834/1wpqnjOfx60",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzRZpZR",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "1002425",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCga1NG",
"doi": "10.1109/SITIS.2017.24",
"title": "Cardiac Surgery Rehabilitation System (CSRS) for a Personalized Support to Patients",
"normalizedTitle": "Cardiac Surgery Rehabilitation System (CSRS) for a Personalized Support to Patients",
"abstract": "For a successful rehabilitation after cardiac surgery, it is crucial to have a carefully personalized, structured, and supervised physiotherapy program. Due to erroneous or unsupervised physiotherapy, nearly 50% of surgeries fail. Researchers have tried to leverage advances in wearable sensors and motion tracking to build affordable, automated, and customizable rehabilitation systems that help both therapists and patients during physiotherapy sessions. In this paper, we present a patient-centered cardiac surgery rehabilitation system (CSRS) for the personalization of the patient's physiotherapy for the early post-operative period. The system has been designed to interconnect different acquisition sensors and to be distributed on different stations in order to be able to continuously monitor the patient's vital signs and evaluate her/his cognitive and motor abilities in real time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For a successful rehabilitation after cardiac surgery, it is crucial to have a carefully personalized, structured, and supervised physiotherapy program. Due to erroneous or unsupervised physiotherapy, nearly 50% of surgeries fail. Researchers have tried to leverage advances in wearable sensors and motion tracking to build affordable, automated, and customizable rehabilitation systems that help both therapists and patients during physiotherapy sessions. In this paper, we present a patient-centered cardiac surgery rehabilitation system (CSRS) for the personalization of the patient's physiotherapy for the early post-operative period. The system has been designed to interconnect different acquisition sensors and to be distributed on different stations in order to be able to continuously monitor the patient's vital signs and evaluate her/his cognitive and motor abilities in real time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For a successful rehabilitation after cardiac surgery, it is crucial to have a carefully personalized, structured, and supervised physiotherapy program. Due to erroneous or unsupervised physiotherapy, nearly 50% of surgeries fail. Researchers have tried to leverage advances in wearable sensors and motion tracking to build affordable, automated, and customizable rehabilitation systems that help both therapists and patients during physiotherapy sessions. In this paper, we present a patient-centered cardiac surgery rehabilitation system (CSRS) for the personalization of the patient's physiotherapy for the early post-operative period. The system has been designed to interconnect different acquisition sensors and to be distributed on different stations in order to be able to continuously monitor the patient's vital signs and evaluate her/his cognitive and motor abilities in real time.",
"fno": "4283a083",
"keywords": [
"Body Sensor Networks",
"Patient Monitoring",
"Patient Rehabilitation",
"Surgery",
"Wearable Sensors",
"Motion Tracking",
"Acquisition Sensors",
"Patients Vital Signsmonitoring",
"Surgeries",
"Unsupervised Physiotherapy",
"Supervised Physiotherapy Program",
"CSRS",
"Cardiac Surgery Rehabilitation System",
"Physiotherapy Sessions",
"Customizable Rehabilitation Systems",
"Surgery",
"Monitoring",
"Biomedical Monitoring",
"Games",
"Tracking",
"Sensor Systems",
"Cardiac Surgery Rehabilitation System",
"Personalized Rehabilitation Sessions",
"Patient Centered Environment",
"Cognitive And Physical Abilities Monitoring"
],
"authors": [
{
"affiliation": null,
"fullName": "Giuseppe Caggianese",
"givenName": "Giuseppe",
"surname": "Caggianese",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mariaconsiglia Calabrese",
"givenName": "Mariaconsiglia",
"surname": "Calabrese",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luigi Gallo",
"givenName": "Luigi",
"surname": "Gallo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Giovanna Sannino",
"givenName": "Giovanna",
"surname": "Sannino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Carmine Vecchione",
"givenName": "Carmine",
"surname": "Vecchione",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-12-01T00:00:00",
"pubType": "proceedings",
"pages": "83-90",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-4283-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4283a076",
"articleId": "12OmNqGitVa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4283a091",
"articleId": "12OmNyRPgK8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/compsac/2017/0367/2/0367b125",
"title": "Technological Module for Unsupervised, Personalized Cardiac Rehabilitation Exercising",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2017/0367b125/12OmNA14Ad2",
"parentPublication": {
"id": "compsac/2017/0367/2",
"title": "2017 IEEE 41st Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260039",
"title": "Microfabricated Instruments for Fetal Cardiac Surgery: Experiments on Haptic Tissue Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260039/12OmNvCRgkA",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2016/6117/0/6117a146",
"title": "A Proactive Solution, using Wearable and Mobile Applications, for Closing the Gap between the Rehabilitation Team and Cardiac Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2016/6117a146/12OmNyFCvXU",
"parentPublication": {
"id": "proceedings/ichi/2016/6117/0",
"title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2017/5812/0/08056590",
"title": "Gamified 3D orthopaedic rehabilitation using low cost and portable inertial sensors",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056590/12OmNzmclzn",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2018/7744/0/774400a257",
"title": "Nursing Cooperation for Complex Thyroid Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2018/774400a257/17D45WK5ApY",
"parentPublication": {
"id": "proceedings/itme/2018/7744/0",
"title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2018/7744/0/774400a095",
"title": "Clinical Observation and Nursing Experience of Complications in Thyroid Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2018/774400a095/17D45WK5Ar3",
"parentPublication": {
"id": "proceedings/itme/2018/7744/0",
"title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/i-span/2018/8534/0/853400a253",
"title": "An Upper Extremity Rehabilitation System Using Virtual Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/i-span/2018/853400a253/17D45WWzW5h",
"parentPublication": {
"id": "proceedings/i-span/2018/8534/0",
"title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2021/3965/0/396500a115",
"title": "Comprehensive Digital Health Intervention to Improve Delivery of Cardiac Rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2021/396500a115/1AIMH0jcgxO",
"parentPublication": {
"id": "proceedings/chase/2021/3965/0",
"title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523834",
"title": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523834/1wpqnjOfx60",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/segah/2017/5482/0/07939284",
"title": "Fun-Knee™: A novel smart knee sleeve for Total-Knee-Replacement rehabilitation with gamification",
"doi": null,
"abstractUrl": "/proceedings-article/segah/2017/07939284/1yq2wBMwmdy",
"parentPublication": {
"id": "proceedings/segah/2017/5482/0",
"title": "2017 IEEE 5th International Conference on Serious Games and Applications for Health (SeGAH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC17hVF",
"title": "Healthcare Informatics, Imaging and Systems Biology, IEEE International Conference on",
"acronym": "hisb",
"groupId": "1800547",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSKO0m",
"doi": "10.1109/HISB.2012.14",
"title": "Predicting Complications of Percutaneous Coronary Intervention Using a Novel Support Vector Method",
"normalizedTitle": "Predicting Complications of Percutaneous Coronary Intervention Using a Novel Support Vector Method",
"abstract": "Clinical tools to identify patients at risk of complications during percutaneous coronary intervention (PCI) are important to determine care at the bedside and to assess quality and outcomes. We address the growing need for such tools by proposing a novel support vector machine (SVM) approach to stratify PCI patients. Our approach simultaneously leverages properties of both one-class and two-class SVM classification to address the diminished prevalence of many important PCI complications. When studied on the Blue Cross Blue Shield of Michigan Cardiovascular Consortium (BMC2) multi-center cardiology registry data, our SVM method provided moderate to high levels of discrimination for different PCI endpoints, and improved model performance in many cases relative to both traditional one-class and two-class SVMs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Clinical tools to identify patients at risk of complications during percutaneous coronary intervention (PCI) are important to determine care at the bedside and to assess quality and outcomes. We address the growing need for such tools by proposing a novel support vector machine (SVM) approach to stratify PCI patients. Our approach simultaneously leverages properties of both one-class and two-class SVM classification to address the diminished prevalence of many important PCI complications. When studied on the Blue Cross Blue Shield of Michigan Cardiovascular Consortium (BMC2) multi-center cardiology registry data, our SVM method provided moderate to high levels of discrimination for different PCI endpoints, and improved model performance in many cases relative to both traditional one-class and two-class SVMs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Clinical tools to identify patients at risk of complications during percutaneous coronary intervention (PCI) are important to determine care at the bedside and to assess quality and outcomes. We address the growing need for such tools by proposing a novel support vector machine (SVM) approach to stratify PCI patients. Our approach simultaneously leverages properties of both one-class and two-class SVM classification to address the diminished prevalence of many important PCI complications. When studied on the Blue Cross Blue Shield of Michigan Cardiovascular Consortium (BMC2) multi-center cardiology registry data, our SVM method provided moderate to high levels of discrimination for different PCI endpoints, and improved model performance in many cases relative to both traditional one-class and two-class SVMs.",
"fno": "4921a031",
"keywords": [],
"authors": [
{
"affiliation": null,
"fullName": "Gyemin Lee",
"givenName": "Gyemin",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hitinder S. Gurm",
"givenName": "Hitinder S.",
"surname": "Gurm",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zeeshan Syed",
"givenName": "Zeeshan",
"surname": "Syed",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hisb",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-09-01T00:00:00",
"pubType": "proceedings",
"pages": "31",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-4803-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4921a023",
"articleId": "12OmNB9t6xA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4921a032",
"articleId": "12OmNClQ0yH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmla/2009/3926/0/3926a799",
"title": "Artificial Neural Networks Prognostic Evaluation of Post-Surgery Complications in Patients Underwent to Coronary Artery Bypass Graft Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2009/3926a799/12OmNC4wtDI",
"parentPublication": {
"id": "proceedings/icmla/2009/3926/0",
"title": "Machine Learning and Applications, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2009/4879/0/05255454",
"title": "Predicting risk of complications following a drug eluting stent procedure: A SVM approach for imbalanced data",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2009/05255454/12OmNvKePI7",
"parentPublication": {
"id": "proceedings/cbms/2009/4879/0",
"title": "2009 22nd IEEE International Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2006/2727/0/27270325",
"title": "A Supervised Learning Approach to Predicting Coronary Heart Disease Complications in Type 2 Diabetes Mellitus Patients",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2006/27270325/12OmNx6g6j1",
"parentPublication": {
"id": "proceedings/bibe/2006/2727/0",
"title": "2006 IEEE Symposium on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cic/1989/2114/0/00130544",
"title": "Computerised reporting of percutaneous transluminal coronary angioplasty (PTCA) procedures",
"doi": null,
"abstractUrl": "/proceedings-article/cic/1989/00130544/12OmNx9nGDd",
"parentPublication": {
"id": "proceedings/cic/1989/2114/0",
"title": "Proceedings Computers in Cardiology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2018/6060/0/606001a106",
"title": "Predicting Disease Complications Using a Stepwise Hidden Variable Approach for Learning Dynamic Bayesian Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2018/606001a106/12OmNyvY9on",
"parentPublication": {
"id": "proceedings/cbms/2018/6060/0",
"title": "2018 IEEE 31st International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a765",
"title": "Predicting Software Reliability with Support Vector Machines",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a765/12OmNzQR1sj",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2019/9214/0/921400a049",
"title": "Predicting Spine Surgery Complications Using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2019/921400a049/1cJ0DiKerni",
"parentPublication": {
"id": "proceedings/icmew/2019/9214/0",
"title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2019/1867/0/08983318",
"title": "Unsupervised Machine Learning Elicits Patient Archetypes in a Primary Percutaneous Coronary Intervention Service",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2019/08983318/1hgugJiZ4ys",
"parentPublication": {
"id": "proceedings/bibm/2019/1867/0",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2019/1867/0/08983143",
"title": "Predicting 30 days Mortality in STEMI Patients using Patient Referral Data to a Primary Percutaneous Coronary Intervention Service",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2019/08983143/1hguhrPs6vS",
"parentPublication": {
"id": "proceedings/bibm/2019/1867/0",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523834",
"title": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523834/1wpqnjOfx60",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1GhVTNddUvm",
"title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cmbs",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1GhW5drgeRy",
"doi": "10.1109/CBMS55023.2022.00048",
"title": "Predicting the Onset of Delirium on Hourly Basis in an Intensive Care Unit Following Cardiac Surgery",
"normalizedTitle": "Predicting the Onset of Delirium on Hourly Basis in an Intensive Care Unit Following Cardiac Surgery",
"abstract": "Delirium, affecting up to 52% of cardiac surgery patients, can have serious long-term effects on patients by damaging cognitive ability and causing subsequent functional decline. This study reports on the development and evaluation of predictive models aimed at identifying the likely onset of delirium on an hourly basis in intensive care unit following cardiac surgery. Most models achieved a mean AUC > 0.900 across all lead times. A support vector machine achieved the highest performance across all lead times of AUC = 0.941 and Sensitivity = 0.907, and BARTm, where missing values were replaced with missForest imputation, achieved the highest Specificity of 0.892. Being able to predict delirium hours in advance gives clinicians the ability to intervene and optimize treatments for patients who are at risk and avert potentially serious and life-threatening consequences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Delirium, affecting up to 52% of cardiac surgery patients, can have serious long-term effects on patients by damaging cognitive ability and causing subsequent functional decline. This study reports on the development and evaluation of predictive models aimed at identifying the likely onset of delirium on an hourly basis in intensive care unit following cardiac surgery. Most models achieved a mean AUC > 0.900 across all lead times. A support vector machine achieved the highest performance across all lead times of AUC = 0.941 and Sensitivity = 0.907, and BARTm, where missing values were replaced with missForest imputation, achieved the highest Specificity of 0.892. Being able to predict delirium hours in advance gives clinicians the ability to intervene and optimize treatments for patients who are at risk and avert potentially serious and life-threatening consequences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Delirium, affecting up to 52% of cardiac surgery patients, can have serious long-term effects on patients by damaging cognitive ability and causing subsequent functional decline. This study reports on the development and evaluation of predictive models aimed at identifying the likely onset of delirium on an hourly basis in intensive care unit following cardiac surgery. Most models achieved a mean AUC > 0.900 across all lead times. A support vector machine achieved the highest performance across all lead times of AUC = 0.941 and Sensitivity = 0.907, and BARTm, where missing values were replaced with missForest imputation, achieved the highest Specificity of 0.892. Being able to predict delirium hours in advance gives clinicians the ability to intervene and optimize treatments for patients who are at risk and avert potentially serious and life-threatening consequences.",
"fno": "677000a234",
"keywords": [
"Cognition",
"Diseases",
"Medical Computing",
"Patient Care",
"Patient Monitoring",
"Support Vector Machines",
"Surgery",
"Support Vector Machine",
"Intensive Care Unit",
"Cardiac Surgery Patients",
"Predictive Models",
"Support Vector Machines",
"Sensitivity",
"Computational Modeling",
"Sociology",
"Surgery",
"Predictive Models",
"Statistics",
"Delirium",
"Dynamic Risk Prediction",
"Intensive Care"
],
"authors": [
{
"affiliation": "University of Strathclyde,Dept. of Computer and Information Sciences,Glasgow,UK",
"fullName": "Linda Lapp",
"givenName": "Linda",
"surname": "Lapp",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Strathclyde,Dept. of Computer and Information Sciences,Glasgow,UK",
"fullName": "Marc Roper",
"givenName": "Marc",
"surname": "Roper",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Strathclyde,Dept. of Mathematics and Statistics,Glasgow,UK",
"fullName": "Kimberley Kavanagh",
"givenName": "Kimberley",
"surname": "Kavanagh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Golden Jubilee National Hospital,Dept. of Perioperative Medicine,Clydebank,UK",
"fullName": "Stefan Schraag",
"givenName": "Stefan",
"surname": "Schraag",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cmbs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "234-239",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6770-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "677000a228",
"articleId": "1GhVWsmVCKI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "677000a240",
"articleId": "1GhW51WN4nC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iri/2016/3207/0/3207a433",
"title": "Statistical Measurement and Analysis on How the Late-Life Function & Disability Instrument Enhances the Frailty Assessment Compared to the National Standards Used on Transcatheter Aortic Valve Patients (Application Paper)",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2016/3207a433/12OmNCctf8n",
"parentPublication": {
"id": "proceedings/iri/2016/3207/0",
"title": "2016 IEEE 17th International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a083",
"title": "Cardiac Surgery Rehabilitation System (CSRS) for a Personalized Support to Patients",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a083/12OmNCga1NG",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/miar/2001/1113/0/11130016",
"title": "Towards Endoscopic Augmented Reality for Robotically Assisted Minimally Invasive Cardiac Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130016/12OmNqBtj9E",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260039",
"title": "Microfabricated Instruments for Fetal Cardiac Surgery: Experiments on Haptic Tissue Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260039/12OmNvCRgkA",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2017/1324/0/132401a568",
"title": "Delirium Prediction using Machine Learning Models on Predictive Electronic Health Records Data",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2017/132401a568/12OmNvD8RtV",
"parentPublication": {
"id": "proceedings/bibe/2017/1324/0",
"title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669879",
"title": "Predicting Same Hospital Readmission following Fontan Cavopulmonary Anastomosis using Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669879/1A9VLnohaz6",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669806",
"title": "Unsupervised Learning to Subphenotype Delirium Patients from Electronic Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669806/1A9Wl3i4gEM",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2022/3418/0/341800a033",
"title": "PrediCatE: Predicting Acute Endophthalmitis for Patients with Cataract Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2022/341800a033/1BYIrzYVvd6",
"parentPublication": {
"id": "proceedings/icsc/2022/3418/0",
"title": "2022 IEEE 16th International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdicn/2022/8476/0/847600a213",
"title": "Automatically Predicting Lung Adenocarcinoma Invasiveness",
"doi": null,
"abstractUrl": "/proceedings-article/bdicn/2022/847600a213/1CJgvInkLG8",
"parentPublication": {
"id": "proceedings/bdicn/2022/8476/0",
"title": "2022 International Conference on Big Data, Information and Computer Network (BDICN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2021/1685/0/168500a215",
"title": "A Wireless Single Lead ECG Module for Cloud-Computing Based Postoperative Monitoring of Cardiac Surgical Patients",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2021/168500a215/1ymJexKshws",
"parentPublication": {
"id": "proceedings/icdh/2021/1685/0",
"title": "2021 IEEE International Conference on Digital Health (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ap5wvyUHKM",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1ap5ASodZde",
"doi": "10.1109/ICVRV.2017.00042",
"title": "Research on Interaction of Exposure Operation in Virtual Surgery",
"normalizedTitle": "Research on Interaction of Exposure Operation in Virtual Surgery",
"abstract": "To achieve better interactivity in virtual surgery system, based on the exposure of surgery view using retractors, which is the basic surgery operation, study in the interaction technology in virtual surgery was carried out. In this paper, we presented a solution to solve the problem that the current methods cannot achieve good accuracy in location and high quality in visual feedback while satisfying the refresh rate. We chose handle to be the interactive equipment, the combination of inertial sensor location method and laser positioning method was applied to spatial location of handle, which can not only guarantee the accuracy, but also make the final output data smoother and the update rate soars. Visual feedback is the main function in the human body model interaction, we proposed an improved Mass Spring Damper model, which includes both surface grid and skeleton grid, and in addition connects particles on the surface grid and internal skeleton grid through the spring, to effectively support the mesh surface and prevent hyperelastic deformation. Finally, experiments were conducted and results show that the methods above achieve higher accuracy and efficiency in interaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To achieve better interactivity in virtual surgery system, based on the exposure of surgery view using retractors, which is the basic surgery operation, study in the interaction technology in virtual surgery was carried out. In this paper, we presented a solution to solve the problem that the current methods cannot achieve good accuracy in location and high quality in visual feedback while satisfying the refresh rate. We chose handle to be the interactive equipment, the combination of inertial sensor location method and laser positioning method was applied to spatial location of handle, which can not only guarantee the accuracy, but also make the final output data smoother and the update rate soars. Visual feedback is the main function in the human body model interaction, we proposed an improved Mass Spring Damper model, which includes both surface grid and skeleton grid, and in addition connects particles on the surface grid and internal skeleton grid through the spring, to effectively support the mesh surface and prevent hyperelastic deformation. Finally, experiments were conducted and results show that the methods above achieve higher accuracy and efficiency in interaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To achieve better interactivity in virtual surgery system, based on the exposure of surgery view using retractors, which is the basic surgery operation, study in the interaction technology in virtual surgery was carried out. In this paper, we presented a solution to solve the problem that the current methods cannot achieve good accuracy in location and high quality in visual feedback while satisfying the refresh rate. We chose handle to be the interactive equipment, the combination of inertial sensor location method and laser positioning method was applied to spatial location of handle, which can not only guarantee the accuracy, but also make the final output data smoother and the update rate soars. Visual feedback is the main function in the human body model interaction, we proposed an improved Mass Spring Damper model, which includes both surface grid and skeleton grid, and in addition connects particles on the surface grid and internal skeleton grid through the spring, to effectively support the mesh surface and prevent hyperelastic deformation. Finally, experiments were conducted and results show that the methods above achieve higher accuracy and efficiency in interaction.",
"fno": "263600a168",
"keywords": [
"Biomedical Equipment",
"Deformation",
"Medical Computing",
"Sensors",
"Shock Absorbers",
"Springs Mechanical",
"Surgery",
"Virtual Reality",
"Exposure Operation",
"Virtual Surgery System",
"Visual Feedback",
"Interactive Equipment",
"Inertial Sensor Location Method",
"Laser Positioning Method",
"Human Body Model Interaction",
"Surface Grid",
"Internal Skeleton Grid",
"Improved Mass Spring Damper Model",
"Retractors",
"Mesh Surface",
"Hyperelastic Deformation",
"Surgery",
"Deformable Models",
"Strain",
"Coordinate Measuring Machines",
"Visualization",
"Solid Modeling",
"Measurement By Laser Beam",
"Virtual Surgery",
"Interaction Device",
"Spatial Location",
"Visual Feedback",
"Mass Spring Damper Mode"
],
"authors": [
{
"affiliation": null,
"fullName": "Pan Zhou",
"givenName": "Pan",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Quanyu Wang",
"givenName": "Quanyu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "168-173",
"year": "2017",
"issn": "2375-141X",
"isbn": "978-1-5386-2636-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "263600a166",
"articleId": "1ap5yKzxg9G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "263600a174",
"articleId": "1ap5ytwKETu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptic/2006/0226/0/01627099",
"title": "A Displacement Driven Real-Time Deformable Model For Haptic Surgery Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627099/12OmNAXxWYS",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2016/1195/0/07774779",
"title": "Uncertainty Analysis Based Geometric Error Detection for Heavy-Duty Machine Tools",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2016/07774779/12OmNB0Fxi7",
"parentPublication": {
"id": "proceedings/imccc/2016/1195/0",
"title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2014/6854/0/6854a449",
"title": "An Improved Meshless Method in Virtual Surgery Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2014/6854a449/12OmNvkGW50",
"parentPublication": {
"id": "proceedings/icvrv/2014/6854/0",
"title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/01627099",
"title": "A Displacement Driven Real-Time Deformable Model For Haptic Surgery Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/01627099/12OmNx1IwaC",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031c351",
"title": "The Implementation of Haptic Interaction in Virtual Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031c351/12OmNxAlA0U",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2005/2489/0/24890790",
"title": "Larynx Virtual Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2005/24890790/12OmNxcdG07",
"parentPublication": {
"id": "proceedings/ism/2005/2489/0",
"title": "Seventh IEEE International Symposium on Multimedia (ISM'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260075",
"title": "A Displacement Driven Real-Time Deformable Model For Haptic Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260075/12OmNxwENJC",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a264",
"title": "The Simulation of Delineation and Splitting in Virtual Liver Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a264/12OmNyPQ4SW",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gmai/2007/2901/0/29010079",
"title": "Virtual Knee Joint Replacement Surgery System",
"doi": null,
"abstractUrl": "/proceedings-article/gmai/2007/29010079/12OmNz6iOqt",
"parentPublication": {
"id": "proceedings/gmai/2007/2901/0",
"title": "2007 Geometric Modeling and Imaging: New Advances",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2017/2628/0/2628a083",
"title": "Precision Cylindricity Error Measurement System Design and Surface Topography Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2017/2628a083/12OmNzV70kh",
"parentPublication": {
"id": "proceedings/icmcce/2017/2628/0",
"title": "2017 Second International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ap5wvyUHKM",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1ap5BWOVCzm",
"doi": "10.1109/ICVRV.2017.00081",
"title": "Kinetic Simulation of Cardiac Motion with Patient-Specific Coronary Artery Vessels Attached for PCI Simulator",
"normalizedTitle": "Kinetic Simulation of Cardiac Motion with Patient-Specific Coronary Artery Vessels Attached for PCI Simulator",
"abstract": "In this paper, we propose to simulate the dynamic motion of cardiovascular system attached with patient-specific vessel structure for personalized Percutaneous Coronary Intervention (PCI) simulation to train surgeons of skills and to help planning surgery. To obtain patient-specific vessel structure, a coarse segmentation with the centerlines extraction subsequently is applied to the computed tomography (CT) scans and the vessels along the centerlines is modeled using a lofted 2D segmentation method. The vessels are then combined with a template heart model to construct a cardiovascular system. For the cardiac motion, we estimate the ventricles motion from 4D Magnetic Resonance Imaging (MRI) sequences to drive the whole heart motion. And the position-based method coupling with a mass-spring model constructed with elastic spheres is used to simulate the cardiac motion cycle stably in the interactive PCI simulator. With our method, a personalized highly realistic beating motion of a whole heart is able to be created and applied to our patient-specific PCI surgery simulation system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose to simulate the dynamic motion of cardiovascular system attached with patient-specific vessel structure for personalized Percutaneous Coronary Intervention (PCI) simulation to train surgeons of skills and to help planning surgery. To obtain patient-specific vessel structure, a coarse segmentation with the centerlines extraction subsequently is applied to the computed tomography (CT) scans and the vessels along the centerlines is modeled using a lofted 2D segmentation method. The vessels are then combined with a template heart model to construct a cardiovascular system. For the cardiac motion, we estimate the ventricles motion from 4D Magnetic Resonance Imaging (MRI) sequences to drive the whole heart motion. And the position-based method coupling with a mass-spring model constructed with elastic spheres is used to simulate the cardiac motion cycle stably in the interactive PCI simulator. With our method, a personalized highly realistic beating motion of a whole heart is able to be created and applied to our patient-specific PCI surgery simulation system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose to simulate the dynamic motion of cardiovascular system attached with patient-specific vessel structure for personalized Percutaneous Coronary Intervention (PCI) simulation to train surgeons of skills and to help planning surgery. To obtain patient-specific vessel structure, a coarse segmentation with the centerlines extraction subsequently is applied to the computed tomography (CT) scans and the vessels along the centerlines is modeled using a lofted 2D segmentation method. The vessels are then combined with a template heart model to construct a cardiovascular system. For the cardiac motion, we estimate the ventricles motion from 4D Magnetic Resonance Imaging (MRI) sequences to drive the whole heart motion. And the position-based method coupling with a mass-spring model constructed with elastic spheres is used to simulate the cardiac motion cycle stably in the interactive PCI simulator. With our method, a personalized highly realistic beating motion of a whole heart is able to be created and applied to our patient-specific PCI surgery simulation system.",
"fno": "263600a354",
"keywords": [
"Biomechanics",
"Biomedical MRI",
"Blood Vessels",
"Cardiovascular System",
"Computerised Tomography",
"Feature Extraction",
"Image Segmentation",
"Image Sequences",
"Medical Image Processing",
"Physiological Models",
"Surgery",
"Cardiovascular System",
"Patient Specific Vessel Structure",
"Personalized Percutaneous Coronary Intervention Simulation",
"Computed Tomography Scans",
"Ventricles Motion",
"4 D Magnetic Resonance Imaging Sequences",
"Heart Motion",
"Cardiac Motion Cycle",
"Interactive PCI Simulator",
"Patient Specific PCI Surgery Simulation System",
"2 D Segmentation Method",
"Heart Model",
"Patient Specific Coronary Artery Vessels",
"CT Scans",
"Solid Modeling",
"Heart",
"Computational Modeling",
"Deformable Models",
"Surgery",
"Strain",
"Magnetic Resonance Imaging",
"Percutaneous Coronary Intervention PCI",
"Patient Specific Vessel Modeling",
"Heart Motion",
"Position Based Method"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhijun Xie",
"givenName": "Zhijun",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shuai Li",
"givenName": "Shuai",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qing Xia",
"givenName": "Qing",
"surname": "Xia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Aimin Hao",
"givenName": "Aimin",
"surname": "Hao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "354-359",
"year": "2017",
"issn": "2375-141X",
"isbn": "978-1-5386-2636-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "263600a352",
"articleId": "1ap5Cx8Jtra",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "263600a360",
"articleId": "1ap5CAij63C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cad-graphics/2015/8020/0/07450404",
"title": "Novel, Robust, and Efficient Guidewire Modeling for PCI Surgery Simulator Based on Heterogeneous and Integrated Chain-Mails",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450404/12OmNC2fGtO",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341060",
"title": "Shape-based tracking of naturally occurring annuli in image sequences",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341060/12OmNqEjhZB",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571104",
"title": "Visualisation of Left Ventricular Dysfunction in the Virtual Pathological Heart",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571104/12OmNrJRP9m",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2012/2746/0/06470345",
"title": "Clinic effect of tongguan capsule for coronary heart disease : A meta-analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2012/06470345/12OmNvjgWqn",
"parentPublication": {
"id": "proceedings/bibmw/2012/2746/0",
"title": "2012 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2017/4722/0/4722a243",
"title": "Does Race Play a Role in Invasive Procedure Treatments? An Initial Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2017/4722a243/12OmNx5Yvg7",
"parentPublication": {
"id": "proceedings/chase/2017/4722/0",
"title": "2017 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2012/2049/0/06266381",
"title": "On the road to predictive smart alarms based on a networked operating room",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2012/06266381/12OmNxVlTKs",
"parentPublication": {
"id": "proceedings/cbms/2012/2049/0",
"title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444626",
"title": "Haptic noise cancellation: Restoring force perception in robotically-assisted beating heart surgery",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444626/12OmNxuo0kx",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1994/6256/0/00315983",
"title": "Model-based analysis of cardiac motion from tagged MRI data",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1994/00315983/12OmNzICENj",
"parentPublication": {
"id": "proceedings/cbms/1994/6256/0",
"title": "Proceedings of IEEE Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1632",
"title": "CoViCAD: Comprehensive Visualization of Coronary Artery Disease",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1632/13rRUxBa5xa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523834",
"title": "Design and Evaluation of Personalized Percutaneous Coronary Intervention Surgery Simulation System",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523834/1wpqnjOfx60",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hgtR5xF6VO",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hguhrPs6vS",
"doi": "10.1109/BIBM47256.2019.8983143",
"title": "Predicting 30 days Mortality in STEMI Patients using Patient Referral Data to a Primary Percutaneous Coronary Intervention Service",
"normalizedTitle": "Predicting 30 days Mortality in STEMI Patients using Patient Referral Data to a Primary Percutaneous Coronary Intervention Service",
"abstract": "Primary percutaneous coronary intervention (PPCI) is a minimally invasive procedure to unblock the arteries which carry blood to the heart. This procedure is carried out once patients are accepted based on the STEMI criteria upon the assessment of 12-lead ECG. This paper reports the analyses of a dataset compiled from patients accepted for PPCI. The primary objective was to explore the features which may predict 30days mortality. The 30 day mortality was?? The main features identified were a patient's age, sex, door to balloon time, call time, pain time, and activation status. Together these features appear to be a predictor of 30day mortality in patients referred for PPCI (76% accuracy, 70% sensitivity and 85% specificity).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Primary percutaneous coronary intervention (PPCI) is a minimally invasive procedure to unblock the arteries which carry blood to the heart. This procedure is carried out once patients are accepted based on the STEMI criteria upon the assessment of 12-lead ECG. This paper reports the analyses of a dataset compiled from patients accepted for PPCI. The primary objective was to explore the features which may predict 30days mortality. The 30 day mortality was?? The main features identified were a patient's age, sex, door to balloon time, call time, pain time, and activation status. Together these features appear to be a predictor of 30day mortality in patients referred for PPCI (76% accuracy, 70% sensitivity and 85% specificity).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Primary percutaneous coronary intervention (PPCI) is a minimally invasive procedure to unblock the arteries which carry blood to the heart. This procedure is carried out once patients are accepted based on the STEMI criteria upon the assessment of 12-lead ECG. This paper reports the analyses of a dataset compiled from patients accepted for PPCI. The primary objective was to explore the features which may predict 30days mortality. The 30 day mortality was?? The main features identified were a patient's age, sex, door to balloon time, call time, pain time, and activation status. Together these features appear to be a predictor of 30day mortality in patients referred for PPCI (76% accuracy, 70% sensitivity and 85% specificity).",
"fno": "08983143",
"keywords": [
"Blood Vessels",
"Cardiovascular System",
"Diseases",
"Electrocardiography",
"Medical Signal Detection",
"Medical Signal Processing",
"Surgery",
"STEMI Patients",
"Patient Referral Data",
"Primary Percutaneous Coronary Intervention Service",
"PPCI",
"Minimally Invasive Procedure",
"STEMI Criteria",
"12 Lead ECG",
"Mortality",
"Time 30 0 D",
"Heart",
"Sensitivity",
"Minimally Invasive Surgery",
"Pain",
"Conferences",
"Biological System Modeling",
"Predictive Models",
"PPCI Patient Referrals",
"Acute MI",
"STEMI",
"30 Days Mortality"
],
"authors": [
{
"affiliation": "Ulster University Jordanstown,Computing, Engineering & Built Environment,Northern Ireland, UK",
"fullName": "Aleeha Iftikhar",
"givenName": "Aleeha",
"surname": "Iftikhar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ulster University Jordanstown,Computing, Engineering & Built Environment,Northern Ireland, UK",
"fullName": "Raymond Bond",
"givenName": "Raymond",
"surname": "Bond",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centre for Personalised Medicine, Ulster University Jordanstown,Faculty of Life & Health Sciences,Northern Ireland, UK",
"fullName": "Victoria McGilligan",
"givenName": "Victoria",
"surname": "McGilligan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ulster University Jordanstown,Computing, Engineering & Built Environment,Northern Ireland, UK",
"fullName": "Khaled Rjoob",
"givenName": "Khaled",
"surname": "Rjoob",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of the Highlands and Islands, Centre for Health Science,Department of Diabetes & Cardiovascular Science,Inverness,UK",
"fullName": "Charles Knoery",
"givenName": "Charles",
"surname": "Knoery",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of the Highlands and Islands, Centre for Health Science,Department of Diabetes & Cardiovascular Science,Inverness,UK",
"fullName": "Stephen J Leslie",
"givenName": "Stephen J",
"surname": "Leslie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Letterkenny University hospital,Letterkenny,Donegal,Ireland",
"fullName": "Anne McShane",
"givenName": "Anne",
"surname": "McShane",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ulster University,Western Health and Social Care Trust C-TRIC,Northern Ireland, UK",
"fullName": "Aaron Peace",
"givenName": "Aaron",
"surname": "Peace",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1315-1317",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1867-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08983318",
"articleId": "1hgugJiZ4ys",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08983306",
"articleId": "1hgufJXVnQ4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/itme/2015/8302/0/8302a264",
"title": "The Analysis of Endoscopic-Assisted Neck Minimally Invasive Radical Operation of Thyroid Cancer (Experience of 402 Cases)",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2015/8302a264/12OmNCm7BHY",
"parentPublication": {
"id": "proceedings/itme/2015/8302/0",
"title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asiajcis/2015/1989/0/1989a085",
"title": "Privacy-Preserving Epidemiological Analysis for a Distributed Database of Hospitals",
"doi": null,
"abstractUrl": "/proceedings-article/asiajcis/2015/1989a085/12OmNwFid2w",
"parentPublication": {
"id": "proceedings/asiajcis/2015/1989/0",
"title": "2015 10th Asia Joint Conference on Information Security (AsiaJCIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2018/7744/0/774400a229",
"title": "Latest Progress of Percutaneous Thernel Ablation Treatment for Secondary Hyperparathyroidism (a Literature Review)",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2018/774400a229/17D45VTRov7",
"parentPublication": {
"id": "proceedings/itme/2018/7744/0",
"title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a045",
"title": "A Controllable Stiffness Robotics for Natural Orifice Transluminal Endoscopic Surgery",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a045/1ANLop40HUA",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2021/2427/0/242700a508",
"title": "Multimodal Machine Learning for 30-Days Post-Operative Mortality Prediction of Elderly Hip Fracture Patients",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2021/242700a508/1AjSSTlAPuw",
"parentPublication": {
"id": "proceedings/icdmw/2021/2427/0",
"title": "2021 International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a425",
"title": "Design requirements to improve laparoscopy via XR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a425/1CJf9jqKiWs",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arace/2022/5153/0/515300a166",
"title": "Robot assisted unilateral biportal endoscopic lumbar interbody fusion for lumbar spondylolisthesis: A case report",
"doi": null,
"abstractUrl": "/proceedings-article/arace/2022/515300a166/1Ip7H6QPSsU",
"parentPublication": {
"id": "proceedings/arace/2022/5153/0",
"title": "2022 Asia Conference on Advanced Robotics, Automation, and Control Engineering (ARACE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2019/0409/0/040900a496",
"title": "Design and Analysis of a Minimally Invasive and ECG Controlled Ventricular Assistive Device",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2019/040900a496/1a3wS8jGcqA",
"parentPublication": {
"id": "proceedings/vlsid/2019/0409/0",
"title": "2019 32nd International Conference on VLSI Design and 2019 18th International Conference on Embedded Systems (VLSID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2019/1867/0/08983318",
"title": "Unsupervised Machine Learning Elicits Patient Archetypes in a Primary Percutaneous Coronary Intervention Service",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2019/08983318/1hgugJiZ4ys",
"parentPublication": {
"id": "proceedings/bibm/2019/1867/0",
"title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j517",
"title": "Towards Unified Surgical Skill Assessment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j517/1yeJPNahaj6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cK",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB7cjhR",
"doi": "10.1109/ISMAR.2016.22",
"title": "Practical and Precise Projector-Camera Calibration",
"normalizedTitle": "Practical and Precise Projector-Camera Calibration",
"abstract": "Projectors are important display devices for large scale augmented reality applications. However, precisely calibrating projectors with large focus distances implies a trade-off between practicality and accuracy. People either need a huge calibration board or a precise 3D model [12]. In this paper, we present a practical projectorcamera calibration method to solve this problem. The user only needs a small calibration board to calibrate the system regardless of the focus distance of the projector. Results show that the rootmean-squared re-projection error (RMSE) for a 450cm projection distance is only about 4mm, even though it is calibrated using a small B4 (250×353mm) calibration board.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projectors are important display devices for large scale augmented reality applications. However, precisely calibrating projectors with large focus distances implies a trade-off between practicality and accuracy. People either need a huge calibration board or a precise 3D model [12]. In this paper, we present a practical projectorcamera calibration method to solve this problem. The user only needs a small calibration board to calibrate the system regardless of the focus distance of the projector. Results show that the rootmean-squared re-projection error (RMSE) for a 450cm projection distance is only about 4mm, even though it is calibrated using a small B4 (250×353mm) calibration board.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projectors are important display devices for large scale augmented reality applications. However, precisely calibrating projectors with large focus distances implies a trade-off between practicality and accuracy. People either need a huge calibration board or a precise 3D model [12]. In this paper, we present a practical projectorcamera calibration method to solve this problem. The user only needs a small calibration board to calibrate the system regardless of the focus distance of the projector. Results show that the rootmean-squared re-projection error (RMSE) for a 450cm projection distance is only about 4mm, even though it is calibrated using a small B4 (250×353mm) calibration board.",
"fno": "3641a063",
"keywords": [
"Calibration",
"Cameras",
"Lead",
"Augmented Reality",
"Three Dimensional Displays",
"Distortion",
"Robustness",
"And Virtual Realities",
"H 5 1 INFORMATION INTERFACES AND PRESENTATION E G",
"HCI Multimedia Information Systems Artificial",
"Augmented"
],
"authors": [
{
"affiliation": null,
"fullName": "Liming Yang",
"givenName": "Liming",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jean-Marie Normand",
"givenName": "Jean-Marie",
"surname": "Normand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guillaume Moreau",
"givenName": "Guillaume",
"surname": "Moreau",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "63-70",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3641-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3641a054",
"articleId": "12OmNrFTr6j",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3641a071",
"articleId": "12OmNBrV1TN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2011/0529/0/05981726",
"title": "Fully automatic multi-projector calibration with an uncalibrated camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011885",
"title": "Novel projector calibration approaches of multi-resolution display",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06475056",
"title": "Geometric calibration for a multi-camera-projector system",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06475056/12OmNvBrgGd",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543487",
"title": "Projector optical distortion calibration using Gray code patterns",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543487/12OmNxWcHf2",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810996",
"title": "A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810996/12OmNzV70vz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwJPMYe",
"title": "CVPR 2011 WORKSHOPS",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBzRNuv",
"doi": "10.1109/CVPRW.2011.5981781",
"title": "Simultaneous self-calibration of a projector and a camera using structured light",
"normalizedTitle": "Simultaneous self-calibration of a projector and a camera using structured light",
"abstract": "We propose a method for geometric calibration of an active vision system, composed of a projector and a camera, using structured light projection. Unlike existing methods of self-calibration for projector-camera systems, our method estimates the intrinsic parameters of both the projector and the camera as well as extrinsic parameters except a global scale without any calibration apparatus such as a checker-pattern board. Our method is based on the decomposition of a radial fundamental matrix into intrinsic and extrinsic parameters. Dense and accurate correspondences are obtained utilizing structured light patterns consisting of Gray code and phase-shifting sinusoidal code. To alleviate the sensitivity issue in estimating and decomposing the radial fundamental matrix, we propose an optimization approach that guarantees the possible solution using a prior for the principal points. We demonstrate the stability of our method using several examples and evaluate the system quantitatively and qualitatively.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method for geometric calibration of an active vision system, composed of a projector and a camera, using structured light projection. Unlike existing methods of self-calibration for projector-camera systems, our method estimates the intrinsic parameters of both the projector and the camera as well as extrinsic parameters except a global scale without any calibration apparatus such as a checker-pattern board. Our method is based on the decomposition of a radial fundamental matrix into intrinsic and extrinsic parameters. Dense and accurate correspondences are obtained utilizing structured light patterns consisting of Gray code and phase-shifting sinusoidal code. To alleviate the sensitivity issue in estimating and decomposing the radial fundamental matrix, we propose an optimization approach that guarantees the possible solution using a prior for the principal points. We demonstrate the stability of our method using several examples and evaluate the system quantitatively and qualitatively.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method for geometric calibration of an active vision system, composed of a projector and a camera, using structured light projection. Unlike existing methods of self-calibration for projector-camera systems, our method estimates the intrinsic parameters of both the projector and the camera as well as extrinsic parameters except a global scale without any calibration apparatus such as a checker-pattern board. Our method is based on the decomposition of a radial fundamental matrix into intrinsic and extrinsic parameters. Dense and accurate correspondences are obtained utilizing structured light patterns consisting of Gray code and phase-shifting sinusoidal code. To alleviate the sensitivity issue in estimating and decomposing the radial fundamental matrix, we propose an optimization approach that guarantees the possible solution using a prior for the principal points. We demonstrate the stability of our method using several examples and evaluate the system quantitatively and qualitatively.",
"fno": "05981781",
"keywords": [
"Calibration",
"Cameras",
"Computer Vision",
"Geometry",
"Simultaneous Self Calibration",
"Camera",
"Structured Light",
"Geometric Calibration",
"Active Vision System",
"Projector Camera System",
"Intrinsic Parameter",
"Calibration Apparatus",
"Checker Pattern Board",
"Radial Fundamental Matrix",
"Extrinsic Parameters",
"Gray Code",
"Phase Shifting Sinusoidal Code",
"Radial Fundamental Matrix",
"Cameras",
"Calibration",
"Lenses",
"Reflective Binary Codes",
"Equations",
"Matrix Decomposition",
"Mathematical Model"
],
"authors": [
{
"affiliation": "National Institute of Advanced Industrial Science and Technology, 2-3-26 Aomi, Koto-ku, Tokyo 135-0064, Japan",
"fullName": "Shuntaro Yamazaki",
"givenName": "Shuntaro",
"surname": "Yamazaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Advanced Industrial Science and Technology, 2-3-26 Aomi, Koto-ku, Tokyo 135-0064, Japan",
"fullName": "Masaaki Mochimaru",
"givenName": "Masaaki",
"surname": "Mochimaru",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University, 5000 Forbes Avenue, Pittsburgh, PA 15213, USA",
"fullName": "Takeo Kanade",
"givenName": "Takeo",
"surname": "Kanade",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-06-01T00:00:00",
"pubType": "proceedings",
"pages": "60-67",
"year": "2011",
"issn": "2160-7508",
"isbn": "978-1-4577-0529-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05981780",
"articleId": "12OmNy49sE3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05981782",
"articleId": "12OmNypIYDa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981726",
"title": "Fully automatic multi-projector calibration with an uncalibrated camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500323",
"title": "A practical structured light acquisition system for point-based geometry and texture",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500323/12OmNCdTeQ0",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d596",
"title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543487",
"title": "Projector optical distortion calibration using Gray code patterns",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543487/12OmNxWcHf2",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a320",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07164353",
"title": "On-Site Semi-Automatic Calibration and Registration of a Projector-Camera System Using Arbitrary Objects with Known Geometry",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07164353/13rRUEgs2M6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvpw7he",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "1",
"displayVolume": "2",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCb3fwi",
"doi": "10.1109/ICPR.2004.1333994",
"title": "Auto-Calibration of Multi-Projector Display Walls",
"normalizedTitle": "Auto-Calibration of Multi-Projector Display Walls",
"abstract": "By treating projectors as pin-hole cameras, we show it is possible to calibrate the projectors of a casually-aligned, multi-projector display wall using the principles of planar auto-calibration. We also use a pose estimation technique for planar scenes to reconstruct the relative pose of a calibration camera, the projectors and the plane they project on. Together with assumptions about the pose of the camera, we use the reconstruction to automatically compute the projector-display homographies needed to render properly scaled and oriented imagery on the display wall. The main contribution of this paper is thus to provide a fully automated approach to calibrate a multi-projector display wall without the need for fiducials or interaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "By treating projectors as pin-hole cameras, we show it is possible to calibrate the projectors of a casually-aligned, multi-projector display wall using the principles of planar auto-calibration. We also use a pose estimation technique for planar scenes to reconstruct the relative pose of a calibration camera, the projectors and the plane they project on. Together with assumptions about the pose of the camera, we use the reconstruction to automatically compute the projector-display homographies needed to render properly scaled and oriented imagery on the display wall. The main contribution of this paper is thus to provide a fully automated approach to calibrate a multi-projector display wall without the need for fiducials or interaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "By treating projectors as pin-hole cameras, we show it is possible to calibrate the projectors of a casually-aligned, multi-projector display wall using the principles of planar auto-calibration. We also use a pose estimation technique for planar scenes to reconstruct the relative pose of a calibration camera, the projectors and the plane they project on. Together with assumptions about the pose of the camera, we use the reconstruction to automatically compute the projector-display homographies needed to render properly scaled and oriented imagery on the display wall. The main contribution of this paper is thus to provide a fully automated approach to calibrate a multi-projector display wall without the need for fiducials or interaction.",
"fno": "212810014",
"keywords": [],
"authors": [
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Andrew Raij",
"givenName": "Andrew",
"surname": "Raij",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill",
"fullName": "Marc Pollefeys",
"givenName": "Marc",
"surname": "Pollefeys",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-08-01T00:00:00",
"pubType": "proceedings",
"pages": "14-17",
"year": "2004",
"issn": "1051-4651",
"isbn": "0-7695-2128-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "212810010",
"articleId": "12OmNzWfpa3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "212810018",
"articleId": "12OmNAlNiCl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2004/2244/0/01410480",
"title": "A survey of multi-projector tiled display wall construction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a063",
"title": "Practical and Precise Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270464",
"title": "Geometric Modeling and Calibration of Planar Multi-Projector Displays Using Rational Bezier Patches",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270464/12OmNBQkx7b",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981726",
"title": "Fully automatic multi-projector calibration with an uncalibrated camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444797",
"title": "Auto-calibration of cylindrical multi-projector systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444797/12OmNviHKkd",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2005/2660/0/237230113",
"title": "Automatic Projector Calibration Using Self-Identifying Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2005/237230113/12OmNyRg4zs",
"parentPublication": {
"id": "proceedings/cvprw/2005/2660/0",
"title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05) - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chen",
"title": "Scalable Alignment of Large-Format Multi-Projector Displays Using Camera Homography Trees",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chen/12OmNzh5yZn",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1101",
"title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1101/13rRUwInvJ9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/04/mcg2005040024",
"title": "Tools and Applications for Large-Scale Display Walls",
"doi": null,
"abstractUrl": "/magazine/cg/2005/04/mcg2005040024/13rRUxYINaF",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrNh0uC",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"acronym": "3dimpvt",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx0RIZY",
"doi": "10.1109/3DIMPVT.2012.77",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"normalizedTitle": "Simple, Accurate, and Robust Projector-Camera Calibration",
"abstract": "Structured-light systems are simple and effective tools to acquire 3D models. Built with off-the-shelf components, a data projector and a camera, they are easy to deploy and compare in precision with expensive laser scanners. But such a high precision is only possible if camera and projector are both accurately calibrated. Robust calibration methods are well established for cameras but, while cameras and projectors can both be described with the same mathematical model, it is not clear how to adapt these methods to projectors. In consequence, many of the proposed projector calibration techniques make use of a simplified model, neglecting lens distortion, resulting in loss of precision. In this paper, we present a novel method to estimate the image coordinates of 3D points in the projector image plane. The method relies on an uncalibrated camera and makes use of local homographies to reach sub-pixel precision. As a result, any camera model can be used to describe the projector, including the extended pinhole model with radial and tangential distortion coefficients, or even those with more complex lens distortion models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Structured-light systems are simple and effective tools to acquire 3D models. Built with off-the-shelf components, a data projector and a camera, they are easy to deploy and compare in precision with expensive laser scanners. But such a high precision is only possible if camera and projector are both accurately calibrated. Robust calibration methods are well established for cameras but, while cameras and projectors can both be described with the same mathematical model, it is not clear how to adapt these methods to projectors. In consequence, many of the proposed projector calibration techniques make use of a simplified model, neglecting lens distortion, resulting in loss of precision. In this paper, we present a novel method to estimate the image coordinates of 3D points in the projector image plane. The method relies on an uncalibrated camera and makes use of local homographies to reach sub-pixel precision. As a result, any camera model can be used to describe the projector, including the extended pinhole model with radial and tangential distortion coefficients, or even those with more complex lens distortion models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Structured-light systems are simple and effective tools to acquire 3D models. Built with off-the-shelf components, a data projector and a camera, they are easy to deploy and compare in precision with expensive laser scanners. But such a high precision is only possible if camera and projector are both accurately calibrated. Robust calibration methods are well established for cameras but, while cameras and projectors can both be described with the same mathematical model, it is not clear how to adapt these methods to projectors. In consequence, many of the proposed projector calibration techniques make use of a simplified model, neglecting lens distortion, resulting in loss of precision. In this paper, we present a novel method to estimate the image coordinates of 3D points in the projector image plane. The method relies on an uncalibrated camera and makes use of local homographies to reach sub-pixel precision. As a result, any camera model can be used to describe the projector, including the extended pinhole model with radial and tangential distortion coefficients, or even those with more complex lens distortion models.",
"fno": "4873a464",
"keywords": [
"Local Homography",
"Structured Light",
"Camera",
"Projector",
"Calibration"
],
"authors": [
{
"affiliation": null,
"fullName": "Daniel Moreno",
"givenName": "Daniel",
"surname": "Moreno",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gabriel Taubin",
"givenName": "Gabriel",
"surname": "Taubin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dimpvt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-10-01T00:00:00",
"pubType": "proceedings",
"pages": "464-471",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-4470-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4873a456",
"articleId": "12OmNy2rS5x",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4873a472",
"articleId": "12OmNroij7C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/1999/5897/0/58970026",
"title": "Multi-Projector Displays Using Camera-Based Registration",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970026/12OmNAfy7KW",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2001/1272/2/127220504",
"title": "A Self-Correcting Projector",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/2",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a307",
"title": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a307/12OmNs0C9zQ",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2011/4369/0/4369a397",
"title": "A Multi-camera, Multi-projector Super-Resolution Framework for Structured Light",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2011/4369a397/12OmNxcMSiL",
"parentPublication": {
"id": "proceedings/3dimpvt/2011/4369/0",
"title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608b285",
"title": "Research of Color Correction Algorithm for Multi-projector Screen Based on Projector-Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608b285/12OmNxwENpp",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a320",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccet/2009/3521/1/3521a462",
"title": "A Novel Binary Code Based Projector-Camera System Registration Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccet/2009/3521a462/12OmNzYwcew",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chen",
"title": "Scalable Alignment of Large-Format Multi-Projector Displays Using Camera Homography Trees",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chen/12OmNzh5yZn",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/12/i1845",
"title": "Autocalibration of a Projector-Camera System",
"doi": null,
"abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/10/ttp2008101831",
"title": "Robust and Accurate Visual Echo Cancelation in a Full-duplex Projector-Camera System",
"doi": null,
"abstractUrl": "/journal/tp/2008/10/ttp2008101831/13rRUxjQyip",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyNQSGO",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxYtu7r",
"doi": "10.1109/CVPR.2007.383477",
"title": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"normalizedTitle": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"abstract": "In this paper, an easy calibration method for projector is proposed. The calibration handled in this paper is projective relation between 3D space and 2D pattern, and is not correction of trapezoid distortion in projected pattern. In projector-camera systems, especially for 3D measurement, such calibration is the basis of process. The projection from projector can be modeled as inverse projection of the pinhole camera, which is generally considered as perspective projection. In the existing systems, some special objects or devices are often used to calibrate projector, so that 3D-2D projection map can be measured for typical camera calibration methods. The proposed method utilizes projective geometry between camera and projector, so that it requires only pre-calibrated camera and a plane. It is easy to practice, easy to calculate, and reasonably accurate.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, an easy calibration method for projector is proposed. The calibration handled in this paper is projective relation between 3D space and 2D pattern, and is not correction of trapezoid distortion in projected pattern. In projector-camera systems, especially for 3D measurement, such calibration is the basis of process. The projection from projector can be modeled as inverse projection of the pinhole camera, which is generally considered as perspective projection. In the existing systems, some special objects or devices are often used to calibrate projector, so that 3D-2D projection map can be measured for typical camera calibration methods. The proposed method utilizes projective geometry between camera and projector, so that it requires only pre-calibrated camera and a plane. It is easy to practice, easy to calculate, and reasonably accurate.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, an easy calibration method for projector is proposed. The calibration handled in this paper is projective relation between 3D space and 2D pattern, and is not correction of trapezoid distortion in projected pattern. In projector-camera systems, especially for 3D measurement, such calibration is the basis of process. The projection from projector can be modeled as inverse projection of the pinhole camera, which is generally considered as perspective projection. In the existing systems, some special objects or devices are often used to calibrate projector, so that 3D-2D projection map can be measured for typical camera calibration methods. The proposed method utilizes projective geometry between camera and projector, so that it requires only pre-calibrated camera and a plane. It is easy to practice, easy to calculate, and reasonably accurate.",
"fno": "04270475",
"keywords": [],
"authors": [
{
"affiliation": "Digital Human Research Center, National Institute of Advanced Industrial Science and Technology, Jap",
"fullName": "Makoto Kimura",
"givenName": "Makoto",
"surname": "Kimura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Digital Human Research Center, National Institute of Advanced Industrial Science and Technology, Jap",
"fullName": "Masaaki Mochimaru",
"givenName": "Masaaki",
"surname": "Mochimaru",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Digital Human Research Center, National Institute of Advanced Industrial Science and Technology, Jap",
"fullName": "Takeo Kanade",
"givenName": "Takeo",
"surname": "Kanade",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2007",
"issn": null,
"isbn": "1-4244-1179-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04270474",
"articleId": "12OmNvkGW5F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04270476",
"articleId": "12OmNyRPgPU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dim/2003/1991/0/19910217",
"title": "Multi-projectors for arbitrary surfaces without explicit calibration nor reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/2003/19910217/12OmNAoDhVM",
"parentPublication": {
"id": "proceedings/3dim/2003/1991/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2009/3994/0/05204317",
"title": "Geometric video projector auto-calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC",
"parentPublication": {
"id": "proceedings/cvprw/2009/3994/0",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457d596",
"title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2008/2242/0/04587788",
"title": "Automatic calibration of a single-projector catadioptric display system",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2008/04587788/12OmNy314cl",
"parentPublication": {
"id": "proceedings/cvpr/2008/2242/0",
"title": "2008 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a320",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a828",
"title": "Geometric Calibration with Multi-Viewpoints for Multi-Projector Systems on Arbitrary Shapes Using Homography and Pixel Maps",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a828/1CJcMwF5tO8",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a261",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxdm4Cp",
"doi": "10.1109/ICCV.2015.407",
"title": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture",
"normalizedTitle": "Active One-Shot Scan for Wide Depth Range Using a Light Field Projector Based on Coded Aperture",
"abstract": "The central projection model commonly used to model cameras as well as projectors, results in similar advantages and disadvantages in both types of system. Considering the case of active stereo systems using a projector and camera setup, a central projection model creates several problems, among them, narrow depth range and necessity of wide baseline are crucial. In the paper, we solve the problems by introducing a light field projector, which can project a depth-dependent pattern. The light field projector is realized by attaching a coded aperture with a high frequency mask in front of the lens of the video projector, which also projects a high frequency pattern. Because the light field projector cannot be approximated by a thin lens model and a precise calibration method is not established yet, an image-based approach is proposed to apply a stereo technique to the system. Although image-based techniques usually require a large database and often imply heavy computational costs, we propose a hierarchical approach and a feature-based search for solution. In the experiments, it is confirmed that our method can accurately recover the dense shape of curved and textured objects for a wide range of depths from a single captured image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The central projection model commonly used to model cameras as well as projectors, results in similar advantages and disadvantages in both types of system. Considering the case of active stereo systems using a projector and camera setup, a central projection model creates several problems, among them, narrow depth range and necessity of wide baseline are crucial. In the paper, we solve the problems by introducing a light field projector, which can project a depth-dependent pattern. The light field projector is realized by attaching a coded aperture with a high frequency mask in front of the lens of the video projector, which also projects a high frequency pattern. Because the light field projector cannot be approximated by a thin lens model and a precise calibration method is not established yet, an image-based approach is proposed to apply a stereo technique to the system. Although image-based techniques usually require a large database and often imply heavy computational costs, we propose a hierarchical approach and a feature-based search for solution. In the experiments, it is confirmed that our method can accurately recover the dense shape of curved and textured objects for a wide range of depths from a single captured image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The central projection model commonly used to model cameras as well as projectors, results in similar advantages and disadvantages in both types of system. Considering the case of active stereo systems using a projector and camera setup, a central projection model creates several problems, among them, narrow depth range and necessity of wide baseline are crucial. In the paper, we solve the problems by introducing a light field projector, which can project a depth-dependent pattern. The light field projector is realized by attaching a coded aperture with a high frequency mask in front of the lens of the video projector, which also projects a high frequency pattern. Because the light field projector cannot be approximated by a thin lens model and a precise calibration method is not established yet, an image-based approach is proposed to apply a stereo technique to the system. Although image-based techniques usually require a large database and often imply heavy computational costs, we propose a hierarchical approach and a feature-based search for solution. In the experiments, it is confirmed that our method can accurately recover the dense shape of curved and textured objects for a wide range of depths from a single captured image.",
"fno": "8391d568",
"keywords": [
"Apertures",
"Cameras",
"Shape",
"Lenses",
"Image Reconstruction",
"Optical Imaging",
"Convolution"
],
"authors": [
{
"affiliation": null,
"fullName": "Hiroshi Kawasaki",
"givenName": "Hiroshi",
"surname": "Kawasaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Satoshi Ono",
"givenName": "Satoshi",
"surname": "Ono",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuki Horita",
"givenName": "Yuki",
"surname": "Horita",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuki Shiba",
"givenName": "Yuki",
"surname": "Shiba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ryo Furukawa",
"givenName": "Ryo",
"surname": "Furukawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shinsaku Hiura",
"givenName": "Shinsaku",
"surname": "Hiura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "3568-3576",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391d559",
"articleId": "12OmNzVoBvI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391d577",
"articleId": "12OmNxzMnOy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2013/2840/0/2840a489",
"title": "A Rotational Stereo Model Based on XSlit Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a489/12OmNAPjA5D",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460424",
"title": "Coded aperture for projector and camera for robust 3D measurement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460424/12OmNBpVQ2Y",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543496",
"title": "Selection of temporally dithered codes for increasing virtual depth of field in structured light systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543496/12OmNqJHFGd",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460391",
"title": "Direct imaging with printed microlens arrays",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460391/12OmNs0TL48",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a910",
"title": "Depth Camera Based on Color-Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a910/12OmNvm6VHm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528303",
"title": "High-rank coded aperture projection for extended depth of field",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528303/12OmNxUMHoq",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c672",
"title": "Time-Multiplexed Coded Aperture Imaging: Learned Coded Aperture and Pixel Exposures for Compressive Imaging Systems",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c672/1BmLc0RAlck",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930626",
"title": "A Monocular Projector-Camera System using Modular Architecture",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930626/1HMOYkaK9Ww",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798245",
"title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxxdZCj",
"doi": "10.1109/CVPR.2017.21",
"title": "A Practical Method for Fully Automatic Intrinsic Camera Calibration Using Directionally Encoded Light",
"normalizedTitle": "A Practical Method for Fully Automatic Intrinsic Camera Calibration Using Directionally Encoded Light",
"abstract": "Calibrating the intrinsic properties of a camera is one of the fundamental tasks required for a variety of computer vision and image processing tasks. The precise measurement of focal length, location of the principal point as well as distortion parameters of the lens is crucial, for example, for 3D reconstruction [27]. Although a variety of methods exist to achieve this goal, they are often cumbersome to carry out, require substantial manual interaction, expert knowledge, and a significant operating volume. We propose a novel calibration method based on the usage of directionally encoded light rays for estimating the intrinsic parameters. It enables a fully automatic calibration with a small device mounted close to the front lens element and still enables an accuracy comparable to standard methods even when the lens is focused up to infinity. Our method overcomes the mentioned limitations since it guarantees an accurate calibration without any human intervention while requiring only a limited amount of space. Besides that, the approach also allows to estimate the distance of the focal plane as well as the size of the aperture. We demonstrate the advantages of the proposed method by evaluating several camera/lens configurations using prototypical devices.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Calibrating the intrinsic properties of a camera is one of the fundamental tasks required for a variety of computer vision and image processing tasks. The precise measurement of focal length, location of the principal point as well as distortion parameters of the lens is crucial, for example, for 3D reconstruction [27]. Although a variety of methods exist to achieve this goal, they are often cumbersome to carry out, require substantial manual interaction, expert knowledge, and a significant operating volume. We propose a novel calibration method based on the usage of directionally encoded light rays for estimating the intrinsic parameters. It enables a fully automatic calibration with a small device mounted close to the front lens element and still enables an accuracy comparable to standard methods even when the lens is focused up to infinity. Our method overcomes the mentioned limitations since it guarantees an accurate calibration without any human intervention while requiring only a limited amount of space. Besides that, the approach also allows to estimate the distance of the focal plane as well as the size of the aperture. We demonstrate the advantages of the proposed method by evaluating several camera/lens configurations using prototypical devices.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Calibrating the intrinsic properties of a camera is one of the fundamental tasks required for a variety of computer vision and image processing tasks. The precise measurement of focal length, location of the principal point as well as distortion parameters of the lens is crucial, for example, for 3D reconstruction [27]. Although a variety of methods exist to achieve this goal, they are often cumbersome to carry out, require substantial manual interaction, expert knowledge, and a significant operating volume. We propose a novel calibration method based on the usage of directionally encoded light rays for estimating the intrinsic parameters. It enables a fully automatic calibration with a small device mounted close to the front lens element and still enables an accuracy comparable to standard methods even when the lens is focused up to infinity. Our method overcomes the mentioned limitations since it guarantees an accurate calibration without any human intervention while requiring only a limited amount of space. Besides that, the approach also allows to estimate the distance of the focal plane as well as the size of the aperture. We demonstrate the advantages of the proposed method by evaluating several camera/lens configurations using prototypical devices.",
"fno": "0457a125",
"keywords": [
"Calibration",
"Cameras",
"Computer Vision",
"Focal Planes",
"Fully Automatic Intrinsic Camera Calibration",
"Intrinsic Properties",
"Computer Vision",
"Image Processing Tasks",
"Focal Length",
"Distortion Parameters",
"Directionally Encoded Light Rays",
"Intrinsic Parameters",
"Lens Element",
"Focal Plane",
"Lens Configurations",
"Calibration",
"Cameras",
"Lenses",
"Apertures",
"Optical Distortion",
"Reliability"
],
"authors": [
{
"affiliation": null,
"fullName": "Mahdi Abbaspour Tehrani",
"givenName": "Mahdi Abbaspour",
"surname": "Tehrani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Thabo Beeler",
"givenName": "Thabo",
"surname": "Beeler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anselm Grundhöfer",
"givenName": "Anselm",
"surname": "Grundhöfer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "125-133",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457a115",
"articleId": "12OmNyOq4RS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457a134",
"articleId": "12OmNxEBz1a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ecmsm/2013/6298/0/06648945",
"title": "Comparison and error analysis of the standard pin-hole and Scheimpflug camera calibration models",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2013/06648945/12OmNAIMO81",
"parentPublication": {
"id": "proceedings/ecmsm/2013/6298/0",
"title": "2013 IEEE 11th International Workshop of Electronics, Control, Measurement, Signals and their application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a202",
"title": "Non-frontal Camera Calibration Using Focal Stack Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a202/12OmNC943xl",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecms/2011/398/0/05952376",
"title": "A new method for Scheimpflug camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iwecms/2011/05952376/12OmNx57HFV",
"parentPublication": {
"id": "proceedings/iwecms/2011/398/0",
"title": "2011 10th International Workshop on Electronics, Control, Measurement and Signals (ECMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391c345",
"title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341087",
"title": "Self-calibration of the intrinsic parameters of cameras for active vision systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341087/12OmNz5JCfy",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/08/i1322",
"title": "A Variational Approach to Problems in Calibration of Multiple Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2007/08/i1322/13rRUEgs2D4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1996/11/i1105",
"title": "Some Aspects of Zoom Lens Camera Calibration",
"doi": null,
"abstractUrl": "/journal/tp/1996/11/i1105/13rRUNvgz5p",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/02/07432007",
"title": "Geometric Calibration of Micro-Lens-Based Light Field Cameras Using Line Features",
"doi": null,
"abstractUrl": "/journal/tp/2017/02/07432007/13rRUwjXZKZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545604",
"title": "Generic calibration of cameras with non-parallel optical elements",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545604/17D45Xh13t3",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzDehgc",
"doi": "10.1109/ICPR.2010.87",
"title": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"normalizedTitle": "Active Calibration of Camera-Projector Systems Based on Planar Homography",
"abstract": "This paper presents a simple and active calibration technique of camera-projector systems based on planar homography. From the camera image of a planar calibration pattern, we generate a projector image of the pattern through the homography between the camera and the projector. To determine the coordinates of the pattern corners from the view of the projector, we actively project a corner marker from the projector to align the marker with the printed pattern corners. Calibration is done in two steps. First, four outer corners of the pattern are identified. Second, all other inner corners are identified. The pattern image from the projector is then used to calibrate the projector. Experimental results of two types of camera-projector systems show that the projection errors of both camera and projector are less than 1 pixel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a simple and active calibration technique of camera-projector systems based on planar homography. From the camera image of a planar calibration pattern, we generate a projector image of the pattern through the homography between the camera and the projector. To determine the coordinates of the pattern corners from the view of the projector, we actively project a corner marker from the projector to align the marker with the printed pattern corners. Calibration is done in two steps. First, four outer corners of the pattern are identified. Second, all other inner corners are identified. The pattern image from the projector is then used to calibrate the projector. Experimental results of two types of camera-projector systems show that the projection errors of both camera and projector are less than 1 pixel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a simple and active calibration technique of camera-projector systems based on planar homography. From the camera image of a planar calibration pattern, we generate a projector image of the pattern through the homography between the camera and the projector. To determine the coordinates of the pattern corners from the view of the projector, we actively project a corner marker from the projector to align the marker with the printed pattern corners. Calibration is done in two steps. First, four outer corners of the pattern are identified. Second, all other inner corners are identified. The pattern image from the projector is then used to calibrate the projector. Experimental results of two types of camera-projector systems show that the projection errors of both camera and projector are less than 1 pixel.",
"fno": "4109a320",
"keywords": [
"Calibration",
"Camera",
"Projector"
],
"authors": [
{
"affiliation": null,
"fullName": "Soon-Yong Park",
"givenName": "Soon-Yong",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Go Gwang Park",
"givenName": "Go Gwang",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "320-323",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109a316",
"articleId": "12OmNCcbEh3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109a324",
"articleId": "12OmNCctfnl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2001/1272/2/127220504",
"title": "A Self-Correcting Projector",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/2",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a307",
"title": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a307/12OmNs0C9zQ",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a464",
"title": "Simple, Accurate, and Robust Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270475",
"title": "Projector Calibration using Arbitrary Planes and Calibrated Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270475/12OmNxYtu7r",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2012/4836/0/4836a007",
"title": "Real-time Continuous Geometric Calibration for Projector-Camera System under Ambient Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2012/4836a007/12OmNzahc85",
"parentPublication": {
"id": "proceedings/icvrv/2012/4836/0",
"title": "2012 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2005/12/i1845",
"title": "Autocalibration of a Projector-Camera System",
"doi": null,
"abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/10/ttp2008101831",
"title": "Robust and Accurate Visual Echo Cancelation in a Full-duplex Projector-Camera System",
"doi": null,
"abstractUrl": "/journal/tp/2008/10/ttp2008101831/13rRUxjQyip",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699178",
"title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1gJ1v0M",
"doi": "10.1109/VR.2018.8446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"normalizedTitle": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"abstract": "We propose a calibration method for deploying a large-scale projection-based floor display system. In our system, multiple projectors are installed on the ceiling of a large indoor space like a gymnasium to achieve a large projection area on the floor. The projection results suffer from both perspective distortion and lens distortion. In this paper, we use projector-camera systems, in which a camera is mounted on each projector, with the “straight lines have to be straight” methodology, to calibrate our projection system. Different from conventional approaches, our method does not use any calibration board and makes no requirement on the overlapping among the projections and the cameras' fields of view.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a calibration method for deploying a large-scale projection-based floor display system. In our system, multiple projectors are installed on the ceiling of a large indoor space like a gymnasium to achieve a large projection area on the floor. The projection results suffer from both perspective distortion and lens distortion. In this paper, we use projector-camera systems, in which a camera is mounted on each projector, with the “straight lines have to be straight” methodology, to calibrate our projection system. Different from conventional approaches, our method does not use any calibration board and makes no requirement on the overlapping among the projections and the cameras' fields of view.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a calibration method for deploying a large-scale projection-based floor display system. In our system, multiple projectors are installed on the ceiling of a large indoor space like a gymnasium to achieve a large projection area on the floor. The projection results suffer from both perspective distortion and lens distortion. In this paper, we use projector-camera systems, in which a camera is mounted on each projector, with the “straight lines have to be straight” methodology, to calibrate our projection system. Different from conventional approaches, our method does not use any calibration board and makes no requirement on the overlapping among the projections and the cameras' fields of view.",
"fno": "08446433",
"keywords": [
"Calibration",
"Cameras",
"Display Instrumentation",
"Optical Projectors",
"Calibration Method",
"Large Scale Projection Based Floor Display System",
"Projection Area",
"Perspective Distortion",
"Lens Distortion",
"Projector Camera Systems",
"Projection System",
"Calibration Board",
"Camera Field Of View",
"Straight Lines Have To Be Straight Methodology",
"Distortion",
"Cameras",
"Calibration",
"Lenses",
"Floors",
"Image Color Analysis",
"Optimization",
"Large Scale Projection",
"Projector Camera System",
"Lens Distortion",
"Calibration",
"Augmented Reality Human Centered Computing X 007 E Displays And Imagers Human Centered Computing X 007 E Mixed Augmented Reality Computing Methodologies X 007 E Camera Calibration"
],
"authors": [
{
"affiliation": "University of Tsukuba, Japan",
"fullName": "Chun Xie",
"givenName": "Chun",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba, Japan",
"fullName": "Hidehiko Shishido",
"givenName": "Hidehiko",
"surname": "Shishido",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba, Japan",
"fullName": "Yoshinari Kameda",
"givenName": "Yoshinari",
"surname": "Kameda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba, Japan",
"fullName": "Kenji Suzuki",
"givenName": "Kenji",
"surname": "Suzuki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tsukuba, Japan",
"fullName": "Itaru Kitahara",
"givenName": "Itaru",
"surname": "Kitahara",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "725-726",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446457",
"articleId": "13bd1fph1yg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446287",
"articleId": "13bd1h03qOm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a063",
"title": "Practical and Precise Projector-Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981781",
"title": "Simultaneous self-calibration of a projector and a camera using structured light",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728917",
"title": "Wearable input/output interface for floor projection using hands and a toe",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728917/12OmNC8dgkV",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007213",
"title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007213/13rRUxcsYLX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466021",
"title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466021/14M3DYlzziw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a261",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523844",
"title": "Directionally Decomposing Structured Light for Projector Calibration",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1fKQxqX",
"doi": "10.1109/VR.2018.8446619",
"title": "Touchless Haptic Feedback for VR Rhythm Games",
"normalizedTitle": "Touchless Haptic Feedback for VR Rhythm Games",
"abstract": "Haptics is an important part of the VR space as seen by the plethora of haptic controllers available today. Recent advancements have enabled touchless haptic feedback through the use of focused ultrasound thereby removing the need for a controller. Here, we present the world's first mid-air haptic rhythm game in VR and describe the reasoning behind its interface and gameplay, and in particular, how these were enabled by the effective use of state-of-the-art ultrasonic haptic technology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Haptics is an important part of the VR space as seen by the plethora of haptic controllers available today. Recent advancements have enabled touchless haptic feedback through the use of focused ultrasound thereby removing the need for a controller. Here, we present the world's first mid-air haptic rhythm game in VR and describe the reasoning behind its interface and gameplay, and in particular, how these were enabled by the effective use of state-of-the-art ultrasonic haptic technology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Haptics is an important part of the VR space as seen by the plethora of haptic controllers available today. Recent advancements have enabled touchless haptic feedback through the use of focused ultrasound thereby removing the need for a controller. Here, we present the world's first mid-air haptic rhythm game in VR and describe the reasoning behind its interface and gameplay, and in particular, how these were enabled by the effective use of state-of-the-art ultrasonic haptic technology.",
"fno": "08446619",
"keywords": [
"Computer Games",
"Haptic Interfaces",
"Virtual Reality",
"Touchless Haptic Feedback",
"VR Rhythm Games",
"VR Space",
"Mid Air Haptic Rhythm Game",
"Haptic Controllers",
"Ultrasonic Haptic Technology",
"Haptic Interfaces",
"Games",
"Rhythm",
"Ultrasonic Imaging",
"Road Transportation",
"Human Computer Interaction",
"Haptics",
"Ultrasound",
"HCI",
"VR",
"Rhythm Games",
"H 5 1 Human Computer Interaction HCI Interaction Devices Haptic Devices",
"H 5 2 Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Orestis Georgiou",
"givenName": "Orestis",
"surname": "Georgiou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Craig Jeffrey",
"givenName": "Craig",
"surname": "Jeffrey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Ziyuan Chen",
"givenName": "Ziyuan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Bao Xiao Tong",
"givenName": "Bao",
"surname": "Xiao Tong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Shing Hei Chan",
"givenName": "Shing",
"surname": "Hei Chan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Boyin Yang",
"givenName": "Boyin",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Adam Harwood",
"givenName": "Adam",
"surname": "Harwood",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ultrahaptics Ltd., Bristol, United Kingdom",
"fullName": "Tom Carter",
"givenName": "Tom",
"surname": "Carter",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "553-554",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446324",
"articleId": "13bd1ftOBDo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446137",
"articleId": "13bd1fKQxs0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2010/4215/0/4215a038",
"title": "Haptic Rendering of Mixed Haptic Effects",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a038/12OmNCdk2IT",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ivs/2005/8961/0/01505201",
"title": "VR haptic interfaces for teleoperation: an evaluation study",
"doi": null,
"abstractUrl": "/proceedings-article/ivs/2005/01505201/12OmNx5piQE",
"parentPublication": {
"id": "proceedings/ivs/2005/8961/0",
"title": "2005 IEEE Intelligent Vehicles Symposium Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890352",
"title": "Haptic Interface for Center-of-Workspace Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890352/12OmNzTppyy",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446522",
"title": "Touchless Haptic Feedback for Supernatural VR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446522/13bd1fWcuDF",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/04/07113879",
"title": "The Effect of Haptic Support Systems on Driver Performance: A Literature Survey",
"doi": null,
"abstractUrl": "/journal/th/2015/04/07113879/13rRUyuvRoW",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a964",
"title": "Mid-air Haptic Texture Exploration in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a964/1CJeOwwf1Nm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a905",
"title": "Haptics in VR Using Origami-Augmented Drones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a538",
"title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797906",
"title": "Haptic Force Guided Sound Synthesis in Multisensory Virtual Reality (VR) Simulation for Rigid-Fluid Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797906/1cJ0NFasbcc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09273221",
"title": "Crowd Navigation in VR: Exploring Haptic Rendering of Collisions",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09273221/1pb9BhAe16o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1fWcuD9",
"doi": "10.1109/VR.2018.8446277",
"title": "Batmen Forever: Unified Virtual Hand Metaphor for Consumer VR Setups",
"normalizedTitle": "Batmen Forever: Unified Virtual Hand Metaphor for Consumer VR Setups",
"abstract": "In this work, we present a hand-based natural interaction that allows performing fundamental actions such as moving or controlling objects and climbing ladders. The setup was restricted to available consumer VR technology, aiming to advance towards a practical unified framework for 3D interaction. The strategy was syncing the closest natural movement allowed by the device with primary task actions, either directly or indirectly, creating hypernatural UIs. The prototype allowed successful completion of the three challenges proposed by the 2018 3DUI Contest, as validated by a preliminary user study with participants from the target audience and also from the general public.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we present a hand-based natural interaction that allows performing fundamental actions such as moving or controlling objects and climbing ladders. The setup was restricted to available consumer VR technology, aiming to advance towards a practical unified framework for 3D interaction. The strategy was syncing the closest natural movement allowed by the device with primary task actions, either directly or indirectly, creating hypernatural UIs. The prototype allowed successful completion of the three challenges proposed by the 2018 3DUI Contest, as validated by a preliminary user study with participants from the target audience and also from the general public.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we present a hand-based natural interaction that allows performing fundamental actions such as moving or controlling objects and climbing ladders. The setup was restricted to available consumer VR technology, aiming to advance towards a practical unified framework for 3D interaction. The strategy was syncing the closest natural movement allowed by the device with primary task actions, either directly or indirectly, creating hypernatural UIs. The prototype allowed successful completion of the three challenges proposed by the 2018 3DUI Contest, as validated by a preliminary user study with participants from the target audience and also from the general public.",
"fno": "08446277",
"keywords": [
"Human Computer Interaction",
"User Interfaces",
"Virtual Reality",
"Unified Virtual Hand Metaphor",
"Consumer VR Setups",
"Hand Based Natural Interaction",
"Primary Task Actions",
"Hypernatural U Is",
"Ladder Climbing",
"Consumer VR Technology",
"Three Dimensional Displays",
"User Interfaces",
"Conferences",
"Virtual Reality",
"Industrial Engineering",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques"
],
"authors": [
{
"affiliation": "Interdisciplinary Center in Interactive Technologies - Polytechnic School - University of São Paulo",
"fullName": "André Montes Rodrigues",
"givenName": "André",
"surname": "Montes Rodrigues",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interdisciplinary Center in Interactive Technologies - Polytechnic School - University of São Paulo",
"fullName": "Mario Nagamura",
"givenName": "Mario",
"surname": "Nagamura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interdisciplinary Center in Interactive Technologies - Polytechnic School - University of São Paulo",
"fullName": "Luis Gustavo Freire da Costa",
"givenName": "Luis Gustavo",
"surname": "Freire da Costa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interdisciplinary Center in Interactive Technologies - Polytechnic School - University of São Paulo",
"fullName": "Marcelo Knorich Zuffo",
"givenName": "Marcelo Knorich",
"surname": "Zuffo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "854-855",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446131",
"articleId": "13bd1fKQxrm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446244",
"articleId": "13bd1AIBM1R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2017/6716/0/07893367",
"title": "Augmented Reality exhibits of constructive art: 8th annual 3DUI Contest",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2017/07893367/12OmNCcbEjD",
"parentPublication": {
"id": "proceedings/3dui/2017/6716/0",
"title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798861",
"title": "Poster: Superhumans: A 3DUI design metaphor",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798861/12OmNxecRW8",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446131",
"title": "Climb, Direct, Stack: Smart Interfaces for ELeague Contest",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446131/13bd1fKQxrm",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446437",
"title": "Fluid VR: Extended Object Associations for Automatic Mode Switching in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446437/13bd1ftOBCR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/10/ttg2011101355",
"title": "Exploring the Benefits of Augmented Reality Documentation for Maintenance and Repair",
"doi": null,
"abstractUrl": "/journal/tg/2011/10/ttg2011101355/13rRUxly8XD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a920",
"title": "Clean the Ocean: An Immersive VR Experience Proposing New Modifications to Go-Go and WiM Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a920/1CJettpbljW",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090617",
"title": "Get the job! An immersive simulation of sensory overload",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090617/1jIxiD0E0h2",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090541",
"title": "Another day at the Office: Visuohaptic schizophrenia VR simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090541/1jIxmYZ9Txu",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a502",
"title": "Visual Indicators for Monitoring Students in a VR class",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a502/1tnXkpvZfqg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523846",
"title": "Directions for 3D User Interface Research from Consumer VR Games",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523846/1wpqw9G3Lws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJelwYgfOE",
"doi": "10.1109/VRW55335.2022.00058",
"title": "From attention to action: Key drivers to augment VR experience for everyday consumer applications",
"normalizedTitle": "From attention to action: Key drivers to augment VR experience for everyday consumer applications",
"abstract": "The rapid growth of AR/VR/XR technology resulted in development of various applications for branding, advertising and commerce. However, understanding consumer demands, and brand consumer-dynamics in creating immersive and engaging experiences is still a challenge. Part of the challenge reflects the lack of understanding on the fundamental processes underlying human behaviour (from attention to action) between virtual and real worlds. Note also that marketing in real worlds and in virtual worlds may differ, and thus, fostering reconsideration of existing VR theories and marketing strategies. The present study addresses the above challenges and aims at providing better understanding on how to augment AR/VR/XR experience for everyday consumer applications. In particular, multisensory processing and social interactions will be addressed in rendering VR experience that is much more immersive and engaging for the user, and thus, being a prerequisite for enhanced consumer journey. We suggest a framework encompassing the key determinants from attention to action, and thus, providing understanding on how to augment experience. Implementing the conceptual framework offers innovative ways for brands to reach, attract, and retain customers via appealing multisensory experiences enhancing the brand portfolio beyond the traditional shopping environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The rapid growth of AR/VR/XR technology resulted in development of various applications for branding, advertising and commerce. However, understanding consumer demands, and brand consumer-dynamics in creating immersive and engaging experiences is still a challenge. Part of the challenge reflects the lack of understanding on the fundamental processes underlying human behaviour (from attention to action) between virtual and real worlds. Note also that marketing in real worlds and in virtual worlds may differ, and thus, fostering reconsideration of existing VR theories and marketing strategies. The present study addresses the above challenges and aims at providing better understanding on how to augment AR/VR/XR experience for everyday consumer applications. In particular, multisensory processing and social interactions will be addressed in rendering VR experience that is much more immersive and engaging for the user, and thus, being a prerequisite for enhanced consumer journey. We suggest a framework encompassing the key determinants from attention to action, and thus, providing understanding on how to augment experience. Implementing the conceptual framework offers innovative ways for brands to reach, attract, and retain customers via appealing multisensory experiences enhancing the brand portfolio beyond the traditional shopping environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The rapid growth of AR/VR/XR technology resulted in development of various applications for branding, advertising and commerce. However, understanding consumer demands, and brand consumer-dynamics in creating immersive and engaging experiences is still a challenge. Part of the challenge reflects the lack of understanding on the fundamental processes underlying human behaviour (from attention to action) between virtual and real worlds. Note also that marketing in real worlds and in virtual worlds may differ, and thus, fostering reconsideration of existing VR theories and marketing strategies. The present study addresses the above challenges and aims at providing better understanding on how to augment AR/VR/XR experience for everyday consumer applications. In particular, multisensory processing and social interactions will be addressed in rendering VR experience that is much more immersive and engaging for the user, and thus, being a prerequisite for enhanced consumer journey. We suggest a framework encompassing the key determinants from attention to action, and thus, providing understanding on how to augment experience. Implementing the conceptual framework offers innovative ways for brands to reach, attract, and retain customers via appealing multisensory experiences enhancing the brand portfolio beyond the traditional shopping environment.",
"fno": "840200a247",
"keywords": [
"Augmented Reality",
"Marketing Data Processing",
"User Experience",
"Multisensory Experiences",
"Brand Portfolio",
"VR Experience",
"Everyday Consumer Applications",
"Consumer Demands",
"Brand Consumer Dynamics",
"Immersive Experiences",
"Marketing",
"Virtual Worlds",
"AR VR XR Technology",
"Three Dimensional Displays",
"Conferences",
"Brand Management",
"Virtual Reality",
"User Interfaces",
"Rendering Computer Graphics",
"Advertising",
"VR Environment",
"Marketing",
"Consumer Experience",
"Attention",
"Perception",
"Action",
"Augmented And Virtual Realities",
"Information Interfaces And Presentation",
"User Machine Systems Human Factors"
],
"authors": [
{
"affiliation": "Liverpool Business School, Liverpool John Moores University,United Kingdom",
"fullName": "Svetlana Bialkova",
"givenName": "Svetlana",
"surname": "Bialkova",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "247-252",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a243",
"articleId": "1CJcCZLUE5q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a253",
"articleId": "1CJcYz4K06s",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2010/4124/0/4124a009",
"title": "Consumer Adoption of Cross Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a009/12OmNy50gbH",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446277",
"title": "Batmen Forever: Unified Virtual Hand Metaphor for Consumer VR Setups",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446277/13bd1fWcuD9",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/06/mcg2017060019",
"title": "Experiencing the Sights, Smells, Sounds, and Climate of Southern Italy in VR",
"doi": null,
"abstractUrl": "/magazine/cg/2017/06/mcg2017060019/13rRUxDqSb2",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a734",
"title": "Exploring How, for Whom and in Which Contexts Extended Reality Training 'Works' in Upskilling Healthcare Workers: A Realist Review",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a734/1CJdEkUzZHW",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a744",
"title": "Who do you look like? - Gaze-based authentication for workers in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a744/1CJdaD5K7Vm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/03/09790021",
"title": "Situated VR: Toward a Congruent Hybrid Reality Without Experiential Artifacts",
"doi": null,
"abstractUrl": "/magazine/cg/2022/03/09790021/1E0Nh45Ca64",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2023/4839/0/483900a067",
"title": "IEEE VR 2023 Workshop: Datasets for developing intelligent XR applications (DATA4XR)",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2023/483900a067/1N0wLk9I85W",
"parentPublication": {
"id": "proceedings/vrw/2023/4839/null",
"title": "2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797906",
"title": "Haptic Force Guided Sound Synthesis in Multisensory Virtual Reality (VR) Simulation for Rigid-Fluid Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797906/1cJ0NFasbcc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523846",
"title": "Directions for 3D User Interface Research from Consumer VR Games",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523846/1wpqw9G3Lws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1FUU5pAuu8E",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1FUUbmWjuLu",
"doi": "10.1109/ICALT55010.2022.00112",
"title": "VR Empathy Game: Creating Empathic VR Environments for Children Based on a Social Constructivist Learning Approach",
"normalizedTitle": "VR Empathy Game: Creating Empathic VR Environments for Children Based on a Social Constructivist Learning Approach",
"abstract": "This paper discusses applications of the Social Constructivist learning approach in the design of virtual reality (VR) games to promote empathy in children. Early Childhood Development (ECD) research provides guidelines for engaging children in empathy development activities (i.e., learning through interactions, reflective activities, role-taking, dialogical inquiry), which are grounded in Social Constructivist Learning Theory. VR researchers suggest the affordances of VR technologies to create immersive learning experiences for empathy development in children, but this research is still at its early stages. This research aimed to explore ways to engage children in empathetic interactions with VR characters based on social constructivist principles. We developed a VR Empathy Game and conducted a qualitative study with 14 children (6-9 years old). Based on the thematic analysis, we found gender differences between the gameplay experiences of girls and boys. Girls were more interested in interacting with VR characters and, as a result, spent more time than boys asking them questions, listening to them, and using the role-taking features to explore the game world from the characters’ perspective. We suggest a follow-up research study exploring ways to better scaffold empathetic experiences for boys.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper discusses applications of the Social Constructivist learning approach in the design of virtual reality (VR) games to promote empathy in children. Early Childhood Development (ECD) research provides guidelines for engaging children in empathy development activities (i.e., learning through interactions, reflective activities, role-taking, dialogical inquiry), which are grounded in Social Constructivist Learning Theory. VR researchers suggest the affordances of VR technologies to create immersive learning experiences for empathy development in children, but this research is still at its early stages. This research aimed to explore ways to engage children in empathetic interactions with VR characters based on social constructivist principles. We developed a VR Empathy Game and conducted a qualitative study with 14 children (6-9 years old). Based on the thematic analysis, we found gender differences between the gameplay experiences of girls and boys. Girls were more interested in interacting with VR characters and, as a result, spent more time than boys asking them questions, listening to them, and using the role-taking features to explore the game world from the characters’ perspective. We suggest a follow-up research study exploring ways to better scaffold empathetic experiences for boys.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper discusses applications of the Social Constructivist learning approach in the design of virtual reality (VR) games to promote empathy in children. Early Childhood Development (ECD) research provides guidelines for engaging children in empathy development activities (i.e., learning through interactions, reflective activities, role-taking, dialogical inquiry), which are grounded in Social Constructivist Learning Theory. VR researchers suggest the affordances of VR technologies to create immersive learning experiences for empathy development in children, but this research is still at its early stages. This research aimed to explore ways to engage children in empathetic interactions with VR characters based on social constructivist principles. We developed a VR Empathy Game and conducted a qualitative study with 14 children (6-9 years old). Based on the thematic analysis, we found gender differences between the gameplay experiences of girls and boys. Girls were more interested in interacting with VR characters and, as a result, spent more time than boys asking them questions, listening to them, and using the role-taking features to explore the game world from the characters’ perspective. We suggest a follow-up research study exploring ways to better scaffold empathetic experiences for boys.",
"fno": "951900a360",
"keywords": [
"Computer Aided Instruction",
"Gender Issues",
"Serious Games Computing",
"Virtual Reality",
"Empathy Development Activities",
"VR Researchers",
"VR Technologies",
"Immersive Learning Experiences",
"VR Characters",
"Social Constructivist Principles",
"VR Empathy Game",
"Empathic VR Environments",
"Virtual Reality Games",
"Early Childhood Development Research",
"ECD",
"Social Constructivist Learning Theory",
"Affordances",
"Games",
"Virtual Reality",
"Guidelines",
"Social Constructivist Learning Theory",
"Educational Games",
"Children",
"Empathy",
"Empathy Games",
"Virtual Reality",
"Virtual Environments"
],
"authors": [
{
"affiliation": "University of Florida,School of Engineering,Gainesville,Florida,USA",
"fullName": "Ekaterina Muravevskaia",
"givenName": "Ekaterina",
"surname": "Muravevskaia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Florida,School of Engineering,Gainesville,Florida,USA",
"fullName": "Christina Gardner-McCune",
"givenName": "Christina",
"surname": "Gardner-McCune",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "360-362",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9519-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "951900a357",
"articleId": "1FUUaDK7qGA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "951900a363",
"articleId": "1FUUbg0hcHK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a038",
"title": "Empathic Mixed Reality: Sharing What You Feel and Interacting with What You See",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a038/12OmNBNM97G",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/var4good/2018/5977/0/08576883",
"title": "VR and Empathy: The Bad, the Good, and the Paradoxical",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576883/17D45WB0qcg",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a227",
"title": "Empathic Skills Training in Virtual Reality: A Scoping Review",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a227/1CJf9qYxfiM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a742",
"title": "Social Presence in VR Empathy Game for Children: Empathic Interaction with the Virtual Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a742/1CJfetqDtnO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09929546",
"title": "The Use of Storytelling in Virtual Reality for Studying Empathy: A Review",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09929546/1HYuWrZKF7G",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797864",
"title": "Design and Testing of a Virtual Reality Enabled Experience that Enhances Engagement and Simulates Empathy for Historical Events and Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797864/1cJ0QFaP9Nm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797899",
"title": "A VR Interactive Story Using POV and Flashback for Empathy",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797899/1cJ12bET8XK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/02/08930025",
"title": "Evoking Physiological Synchrony and Empathy Using Social VR With Biofeedback",
"doi": null,
"abstractUrl": "/journal/ta/2022/02/08930025/1fCCMiUcICY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccia/2020/6042/0/09178717",
"title": "The application of virtual reality in empathy establishment: Foresee the future",
"doi": null,
"abstractUrl": "/proceedings-article/iccia/2020/09178717/1mDu5E8vzDW",
"parentPublication": {
"id": "proceedings/iccia/2020/6042/0",
"title": "2020 5th International Conference on Computational Intelligence and Applications (ICCIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a060",
"title": "Depression Prevention by Mutual Empathy Training: Using Virtual Reality as a Tool",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a060/1tnWM3NlU8E",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrRaySJ7So",
"doi": "10.1109/ISMAR55827.2022.00070",
"title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards",
"normalizedTitle": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards",
"abstract": "Presence in virtual reality (VR) is meaningful for remotely connecting with others and facilitating social interactions despite great distance while providing a sense of “being there.” This work presents CardsVR, a two-person VR experience that allows remote participants to play a game of cards together. An entire deck of tracked cards are used to recreate the sense of playing cards in-person. Prior work in VR commonly provides passive haptic feedback either through a single object or through static objects in the environment. CardsVR is novel in providing passive haptic feedback through multiple cards that are individually tracked and represented in the virtual environment. Participants interact with the physical cards by picking them up, holding them, playing them, or moving them on the physical table. Our participant study (N=23) shows that passive haptic feedback provides significant improvement in three standard measures of presence: Possibility to Act, Realism, and Haptics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presence in virtual reality (VR) is meaningful for remotely connecting with others and facilitating social interactions despite great distance while providing a sense of “being there.” This work presents CardsVR, a two-person VR experience that allows remote participants to play a game of cards together. An entire deck of tracked cards are used to recreate the sense of playing cards in-person. Prior work in VR commonly provides passive haptic feedback either through a single object or through static objects in the environment. CardsVR is novel in providing passive haptic feedback through multiple cards that are individually tracked and represented in the virtual environment. Participants interact with the physical cards by picking them up, holding them, playing them, or moving them on the physical table. Our participant study (N=23) shows that passive haptic feedback provides significant improvement in three standard measures of presence: Possibility to Act, Realism, and Haptics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presence in virtual reality (VR) is meaningful for remotely connecting with others and facilitating social interactions despite great distance while providing a sense of “being there.” This work presents CardsVR, a two-person VR experience that allows remote participants to play a game of cards together. An entire deck of tracked cards are used to recreate the sense of playing cards in-person. Prior work in VR commonly provides passive haptic feedback either through a single object or through static objects in the environment. CardsVR is novel in providing passive haptic feedback through multiple cards that are individually tracked and represented in the virtual environment. Participants interact with the physical cards by picking them up, holding them, playing them, or moving them on the physical table. Our participant study (N=23) shows that passive haptic feedback provides significant improvement in three standard measures of presence: Possibility to Act, Realism, and Haptics.",
"fno": "532500a538",
"keywords": [
"Computer Games",
"Feedback",
"Haptic Interfaces",
"Virtual Reality",
"Cards VR",
"Passive Haptic Feedback",
"Physical Cards",
"Two Person VR Experience",
"Virtual Environment",
"Virtual Reality",
"Atmospheric Measurements",
"Virtual Environments",
"Games",
"Particle Measurements",
"Haptic Interfaces",
"Standards",
"Augmented Reality",
"Human Centered Computing",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "University of California,Santa Barbara",
"fullName": "Andrew Huard",
"givenName": "Andrew",
"surname": "Huard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,Santa Barbara",
"fullName": "Mengyu Chen",
"givenName": "Mengyu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,Santa Barbara",
"fullName": "Misha Sra",
"givenName": "Misha",
"surname": "Sra",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "538-547",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1JrRavJIHw4",
"name": "pismar202253250-09995530s1-mm_532500a538.zip",
"size": "218 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995530s1-mm_532500a538.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "532500a528",
"articleId": "1JrR84Tl3So",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a548",
"articleId": "1JrQZCCdOIo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2014/3624/0/06798850",
"title": "A comparison of different methods for reducing the unintended positional drift accompanying walking-in-place locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798850/12OmNvCzFbu",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07833030",
"title": "Shifty: A Weight-Shifting Dynamic Passive Haptic Proxy to Enhance Object Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07833030/13rRUwgQpqL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a748",
"title": "Wormholes in VR: Teleporting Hands for Flexible Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a748/1JrR93EDicE",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809589",
"title": "Passive Haptic Menus for Desk-Based and HMD-Projected Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809589/1cI61Rx4b9m",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797718",
"title": "Haptic Prop: A Tangible Prop for Semi-passive Haptic Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797718/1cJ0Lqfe0gw",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797900",
"title": "Menus on the Desk? System Control in DeskVR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797900/1cJ18TJZQf6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a042",
"title": "Smart Haproxy: A Novel Vibrotactile Feedback Prototype Combining Passive and Active Haptic in AR Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a042/1gysov56h20",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090521",
"title": "A Constrained Path Redirection for Passive Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090521/1jIxpAQuq8o",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090536",
"title": "Elastic-Move: Passive Haptic Device with Force Feedback for Virtual Reality Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090536/1jIxqFQXvSE",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382898",
"title": "Combining Dynamic Passive Haptics and Haptic Retargeting for Enhanced Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382898/1saZv7Dd9Ty",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pQIKkf0MSY",
"title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"acronym": "sbgames",
"groupId": "1800056",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pQIKqrlF0k",
"doi": "10.1109/SBGames51465.2020.00020",
"title": "Evaluation of Graphical User Interfaces Guidelines for Virtual Reality Games",
"normalizedTitle": "Evaluation of Graphical User Interfaces Guidelines for Virtual Reality Games",
"abstract": "Virtual Reality presents a new form of human-computer interaction for the video game world, introducing new challenges for various aspects of game development. Many of the traditional practices in the design and development of Graphical User Interfaces do not fit the context of Virtual Reality (VR), requiring adaptations or the creation of new solutions. This work proposes to analyze some of the GUI guidelines for VR, and from this, investigate the perception of players about the GUI guidelines. An analysis of some of the manufacturers and game engine guidelines for VR was carried out with the purpose of identifying which recommendations are more common. After that, a survey was applied with players to define, from the perspective of the user, the level of importance of each guideline. In addition to the analysis of the users' perspective, a set of games has been chosen and analyzed to understand the game's compliance with the guidelines. The results of the analysis showed that the games are respecting the guidelines, but users still perceive some issues with the VR games GUIs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality presents a new form of human-computer interaction for the video game world, introducing new challenges for various aspects of game development. Many of the traditional practices in the design and development of Graphical User Interfaces do not fit the context of Virtual Reality (VR), requiring adaptations or the creation of new solutions. This work proposes to analyze some of the GUI guidelines for VR, and from this, investigate the perception of players about the GUI guidelines. An analysis of some of the manufacturers and game engine guidelines for VR was carried out with the purpose of identifying which recommendations are more common. After that, a survey was applied with players to define, from the perspective of the user, the level of importance of each guideline. In addition to the analysis of the users' perspective, a set of games has been chosen and analyzed to understand the game's compliance with the guidelines. The results of the analysis showed that the games are respecting the guidelines, but users still perceive some issues with the VR games GUIs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality presents a new form of human-computer interaction for the video game world, introducing new challenges for various aspects of game development. Many of the traditional practices in the design and development of Graphical User Interfaces do not fit the context of Virtual Reality (VR), requiring adaptations or the creation of new solutions. This work proposes to analyze some of the GUI guidelines for VR, and from this, investigate the perception of players about the GUI guidelines. An analysis of some of the manufacturers and game engine guidelines for VR was carried out with the purpose of identifying which recommendations are more common. After that, a survey was applied with players to define, from the perspective of the user, the level of importance of each guideline. In addition to the analysis of the users' perspective, a set of games has been chosen and analyzed to understand the game's compliance with the guidelines. The results of the analysis showed that the games are respecting the guidelines, but users still perceive some issues with the VR games GUIs.",
"fno": "843200a071",
"keywords": [
"Computer Games",
"Graphical User Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Graphical User Interfaces Guidelines",
"Virtual Reality Games",
"Human Computer Interaction",
"Video Game World",
"Game Development",
"Traditional Practices",
"GUI Guidelines",
"Game Engine Guidelines",
"Users",
"VR Games GU Is",
"Virtual Reality",
"Three Dimensional Displays",
"Games",
"Guidelines",
"Head",
"Graphical User Interfaces",
"Visualization",
"Virtual Reality",
"User Interface",
"Guidelines",
"Games"
],
"authors": [
{
"affiliation": "Quixadá Campus, Federal University of Ceará,Quixadá,Brazil",
"fullName": "Samuel Alves",
"givenName": "Samuel",
"surname": "Alves",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Quixadá Campus, Federal University of Ceará,Quixadá,Brazil",
"fullName": "Arthur Callado",
"givenName": "Arthur",
"surname": "Callado",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Quixadá Campus, Federal University of Ceará,Quixadá,Brazil",
"fullName": "Paulyne Jucá",
"givenName": "Paulyne",
"surname": "Jucá",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sbgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "71-79",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8432-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "843200a062",
"articleId": "1pQILu1P6qA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "843200a080",
"articleId": "1pQILALn2Lu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223397",
"title": "Shark punch: A virtual reality game for aquatic rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223397/12OmNASILVi",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a188",
"title": "Usability Guidelines to Develop Gesture-Based Serious Games for Health: A Systematic Review",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a188/12OmNAlNiSE",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223452",
"title": "Shark punch: A virtual reality game for aquatic rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223452/12OmNqGA54Q",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2017/3091/0/3091a020",
"title": "Visual Representation of Gesture Interaction Feedback in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a020/12OmNx5Yviz",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a001",
"title": "A Learning Engagement Model of Educational Games Based on Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a001/17D45X2fUIw",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a808",
"title": "Towards a Virtual Reality Math Game for Learning In Schools - A User Study",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a808/1CJdQbsLPZ6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a360",
"title": "VR Empathy Game: Creating Empathic VR Environments for Children Based on a Social Constructivist Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a360/1FUUbmWjuLu",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a458",
"title": "Play with Emotional Characters: Improving User Emotional Experience by A Data-driven Approach in VR Volleyball Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a458/1tnWZju755K",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichci/2020/2316/0/231600a227",
"title": "Virtual Reality Games based on Brain Computer Interface",
"doi": null,
"abstractUrl": "/proceedings-article/ichci/2020/231600a227/1tuA5U2PsfC",
"parentPublication": {
"id": "proceedings/ichci/2020/2316/0",
"title": "2020 International Conference on Intelligent Computing and Human-Computer Interaction (ICHCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWZju755K",
"doi": "10.1109/VRW52623.2021.00111",
"title": "Play with Emotional Characters: Improving User Emotional Experience by A Data-driven Approach in VR Volleyball Games",
"normalizedTitle": "Play with Emotional Characters: Improving User Emotional Experience by A Data-driven Approach in VR Volleyball Games",
"abstract": "In real-world volleyball games, players are generally aware of the emotions of other players as they can observe facial expressions, body behaviors, etc., which evokes a rich emotional experience. However, most of the VR volleyball games mainly concentrate on modeling the game playing, rather than supporting an emotional experience. We introduce a data-driven framework to enhance the user's emotional experience and engagement by building emotional virtual characters in VR volleyball games. This framework enables virtual characters to arouse emotions according to the game state and express emotions through facial expressions. Evaluation results demonstrate our framework has benefits to enhance user's emotional experience and engagement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In real-world volleyball games, players are generally aware of the emotions of other players as they can observe facial expressions, body behaviors, etc., which evokes a rich emotional experience. However, most of the VR volleyball games mainly concentrate on modeling the game playing, rather than supporting an emotional experience. We introduce a data-driven framework to enhance the user's emotional experience and engagement by building emotional virtual characters in VR volleyball games. This framework enables virtual characters to arouse emotions according to the game state and express emotions through facial expressions. Evaluation results demonstrate our framework has benefits to enhance user's emotional experience and engagement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In real-world volleyball games, players are generally aware of the emotions of other players as they can observe facial expressions, body behaviors, etc., which evokes a rich emotional experience. However, most of the VR volleyball games mainly concentrate on modeling the game playing, rather than supporting an emotional experience. We introduce a data-driven framework to enhance the user's emotional experience and engagement by building emotional virtual characters in VR volleyball games. This framework enables virtual characters to arouse emotions according to the game state and express emotions through facial expressions. Evaluation results demonstrate our framework has benefits to enhance user's emotional experience and engagement.",
"fno": "405700a458",
"keywords": [
"Computer Games",
"Emotion Recognition",
"Virtual Reality",
"Facial Expressions",
"Rich Emotional Experience",
"VR Volleyball Games",
"Game Playing",
"Data Driven Framework",
"Emotional Virtual Characters",
"Arouse Emotions",
"Game State",
"Emotional Characters",
"Improving User Emotional Experience",
"Data Driven Approach",
"Real World Volleyball Games",
"Solid Modeling",
"Three Dimensional Displays",
"Conferences",
"Buildings",
"Games",
"Virtual Reality",
"User Interfaces",
"VR Volleyball Games",
"Emotional Experience",
"Emotional Virtual Characters",
"Sports Games"
],
"authors": [
{
"affiliation": "Chinese Academy of Sciences,Institute of Software,China",
"fullName": "Zechen Bai",
"givenName": "Zechen",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences,Institute of Software,China",
"fullName": "Naiming Yao",
"givenName": "Naiming",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,Singapore",
"fullName": "Nidhi Mishra",
"givenName": "Nidhi",
"surname": "Mishra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences,Institute of Software,China",
"fullName": "Hui Chen",
"givenName": "Hui",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese Academy of Sciences,Institute of Software,China",
"fullName": "Hongan Wang",
"givenName": "Hongan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,Singapore",
"fullName": "Nadia Magnenat Thalmann",
"givenName": "Nadia Magnenat",
"surname": "Thalmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "458-459",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a456",
"articleId": "1tnXaPRVToI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a460",
"articleId": "1tnXL2XEOw8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2017/5812/0/08056585",
"title": "Evaluation of a virtual gaming environment designed to access emotional reactions while playing",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2017/08056585/12OmNsd6viH",
"parentPublication": {
"id": "proceedings/vs-games/2017/5812/0",
"title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2015/0481/0/07394401",
"title": "Using physiological signal analysis to design affective VR games",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2015/07394401/12OmNySosHh",
"parentPublication": {
"id": "proceedings/isspit/2015/0481/0",
"title": "2015 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2012/1120/0/S8006",
"title": "Comparing behavior trees and emotional behavior networks for NPCs",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2012/S8006/12OmNzSh17d",
"parentPublication": {
"id": "proceedings/cgames/2012/1120/0",
"title": "2012 17th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a848",
"title": "Comparing Physiological and Emotional Effects of Happy and Sad Virtual Environments Experienced in Video and Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a848/1CJdhFm4Ez6",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a031",
"title": "Pericles VR: Insights into visual development and gamification of a lesser-known Shakespeare play",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a031/1CJeVETw35e",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a742",
"title": "Social Presence in VR Empathy Game for Children: Empathic Interaction with the Virtual Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a742/1CJfetqDtnO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2022/1647/0/09767281",
"title": "An Exploratory Analysis of Interactive VR-Based Framework for Multi-Componential Analysis of Emotion",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2022/09767281/1Df82pGW23e",
"parentPublication": {
"id": "proceedings/percom-workshops/2022/1647/0",
"title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ickg/2022/5101/0/510100a337",
"title": "VREN: Volleyball Rally Dataset with Expression Notation Language",
"doi": null,
"abstractUrl": "/proceedings-article/ickg/2022/510100a337/1KxU3tQFoPK",
"parentPublication": {
"id": "proceedings/ickg/2022/5101/0",
"title": "2022 IEEE International Conference on Knowledge Graph (ICKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09258960",
"title": "Touching Virtual Humans: Haptic Responses Reveal the Emotional Impact of Affective Agents",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09258960/1oIW8klCOiY",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a183",
"title": "Understanding Emotional Expression with Haptic Feedback Vest Patterns and Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a183/1tnX9YpX3Nu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWy6iYjMk",
"doi": "10.1109/VRW52623.2021.00099",
"title": "A-Visor and A-Camera: Arduino-based Cardboard Head-Mounted Controllers for VR Games",
"normalizedTitle": "A-Visor and A-Camera: Arduino-based Cardboard Head-Mounted Controllers for VR Games",
"abstract": "The Nintendo Labo: VR KitTM introduced several types of cardboard controllers that allow users to enjoy virtual reality through various interactions. However, it is not compatible with smartphone devices which many people can use to access VR easily. In this study, we used Arduino and a smartphone to create two customized low-cost cardboard head-mounted VR controllers which we called A-Visor and A-Camera. We also created VR games for AVisor and A-Camera using Unity3D. Thus, we present new DIY head-mounted VR controllers that are made by assembling corrugated cardboard materials, Arduino, and sensors, which are readily accessible to DIY enthusiasts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Nintendo Labo: VR KitTM introduced several types of cardboard controllers that allow users to enjoy virtual reality through various interactions. However, it is not compatible with smartphone devices which many people can use to access VR easily. In this study, we used Arduino and a smartphone to create two customized low-cost cardboard head-mounted VR controllers which we called A-Visor and A-Camera. We also created VR games for AVisor and A-Camera using Unity3D. Thus, we present new DIY head-mounted VR controllers that are made by assembling corrugated cardboard materials, Arduino, and sensors, which are readily accessible to DIY enthusiasts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Nintendo Labo: VR KitTM introduced several types of cardboard controllers that allow users to enjoy virtual reality through various interactions. However, it is not compatible with smartphone devices which many people can use to access VR easily. In this study, we used Arduino and a smartphone to create two customized low-cost cardboard head-mounted VR controllers which we called A-Visor and A-Camera. We also created VR games for AVisor and A-Camera using Unity3D. Thus, we present new DIY head-mounted VR controllers that are made by assembling corrugated cardboard materials, Arduino, and sensors, which are readily accessible to DIY enthusiasts.",
"fno": "405700a434",
"keywords": [
"Computer Games",
"Virtual Reality",
"Arduino Based Cardboard Head Mounted Controllers",
"VR Games",
"Nintendo Labo",
"VR Kit TM",
"Cardboard Controllers",
"Virtual Reality",
"Smartphone Devices",
"Low Cost Cardboard Head Mounted VR Controllers",
"A Visor",
"A Camera",
"A Visor",
"DIY Head Mounted VR Controllers",
"Corrugated Cardboard Materials",
"Unity 3 D",
"Three Dimensional Displays",
"Conferences",
"Virtual Reality",
"Games",
"User Interfaces",
"Sensors",
"Cardboard",
"Virtual Reality",
"Head Mounted Device",
"Arduino",
"Unity 3 D",
"Human Centered Computing Interaction Paradigms Virtual Reality",
"Human Centered Computing Interaction Devices"
],
"authors": [
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Sangmin Park",
"givenName": "Sangmin",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Hojun Aan",
"givenName": "Hojun",
"surname": "Aan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Junhyeong Jo",
"givenName": "Junhyeong",
"surname": "Jo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Hyeonkyu Kim",
"givenName": "Hyeonkyu",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Sangsun Han",
"givenName": "Sangsun",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Jimoon Kim",
"givenName": "Jimoon",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Pilhyoun Yoon",
"givenName": "Pilhyoun",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University,South Korea",
"fullName": "Kibum Kim",
"givenName": "Kibum",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "434-435",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnWxIOwvni",
"name": "pvrw202140570-09419143s1-mm_405700a434.zip",
"size": "62.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419143s1-mm_405700a434.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a432",
"articleId": "1tnXPIz34pa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a436",
"articleId": "1tnXtnnYjdu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892342",
"title": "Uni-CAVE: A Unity3D plugin for non-head mounted VR display systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892342/12OmNs5rkSv",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446399",
"title": "Three Haptic Shape-Feedback Controllers for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446399/13bd1fHrlRF",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a001",
"title": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a001/1CJdXjsLKBG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a742",
"title": "Social Presence in VR Empathy Game for Children: Empathic Interaction with the Virtual Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a742/1CJfetqDtnO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a109",
"title": "Generative RGB-D Face Completion for Head-Mounted Display Removal",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a695",
"title": "Influence of Interactivity and Social Environments on User Experience and Social Acceptability in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a695/1tuAf9n910Q",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1uiluGq0Oo8",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uilNtz7Mha",
"doi": "10.1109/ICME51207.2021.9428306",
"title": "The Impact of Black Edge Artifact on User Experience for the Interactive Cloud VR Services",
"normalizedTitle": "The Impact of Black Edge Artifact on User Experience for the Interactive Cloud VR Services",
"abstract": "When user turns head in an interactive Cloud VR service, such as the Cloud VR games, the black edge artifact often appears at the boundary of the viewport due to the inevitable latency, which significantly affects the user experience. In this paper, we focused on the interactive Cloud VR gaming services and investigated the influence of the black edge artifact on the user experience. The appearance of the black edge artifact during the playing period was regarded as a series of black edge events. The user experience on a single black edge event was first evaluated combining the influential factors of the area ratio and duration of the black edge artifact. Then, a pooling strategy was proposed to evaluate the user’s overall experience. Experimental results showed that the proposed model can accurately predict the user experience that influenced by the black edge artifact for the Cloud VR services. This work can be served as a guideline for service providers and network operators to improve their services.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When user turns head in an interactive Cloud VR service, such as the Cloud VR games, the black edge artifact often appears at the boundary of the viewport due to the inevitable latency, which significantly affects the user experience. In this paper, we focused on the interactive Cloud VR gaming services and investigated the influence of the black edge artifact on the user experience. The appearance of the black edge artifact during the playing period was regarded as a series of black edge events. The user experience on a single black edge event was first evaluated combining the influential factors of the area ratio and duration of the black edge artifact. Then, a pooling strategy was proposed to evaluate the user’s overall experience. Experimental results showed that the proposed model can accurately predict the user experience that influenced by the black edge artifact for the Cloud VR services. This work can be served as a guideline for service providers and network operators to improve their services.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When user turns head in an interactive Cloud VR service, such as the Cloud VR games, the black edge artifact often appears at the boundary of the viewport due to the inevitable latency, which significantly affects the user experience. In this paper, we focused on the interactive Cloud VR gaming services and investigated the influence of the black edge artifact on the user experience. The appearance of the black edge artifact during the playing period was regarded as a series of black edge events. The user experience on a single black edge event was first evaluated combining the influential factors of the area ratio and duration of the black edge artifact. Then, a pooling strategy was proposed to evaluate the user’s overall experience. Experimental results showed that the proposed model can accurately predict the user experience that influenced by the black edge artifact for the Cloud VR services. This work can be served as a guideline for service providers and network operators to improve their services.",
"fno": "09428306",
"keywords": [
"Cloud Computing",
"Computer Games",
"User Experience",
"Virtual Reality",
"Black Edge Artifact",
"User Experience",
"Single Black Edge Event",
"Interactive Cloud VR Service",
"Area Ratio",
"Pooling Strategy",
"Interactive Cloud VR Gaming Services",
"Conferences",
"Games",
"Predictive Models",
"User Experience",
"Guidelines",
"Cloud VR",
"Virtual Reality",
"User Experience",
"Latency",
"Black Edge Artifact"
],
"authors": [
{
"affiliation": "Xidian Univiersity,State Key Laboratory of Integrated Services Networks,Xi'an,China",
"fullName": "Jiarun Song",
"givenName": "Jiarun",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xidian Univiersity,State Key Laboratory of Integrated Services Networks,Xi'an,China",
"fullName": "Jianquan Zhou",
"givenName": "Jianquan",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xidian Univiersity,State Key Laboratory of Integrated Services Networks,Xi'an,China",
"fullName": "Xionghui Mao",
"givenName": "Xionghui",
"surname": "Mao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xidian Univiersity,State Key Laboratory of Integrated Services Networks,Xi'an,China",
"fullName": "Fuzheng Yang",
"givenName": "Fuzheng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3864-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09428424",
"articleId": "1uilDjO2weI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09428385",
"articleId": "1uilvIcmHqU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrw/2022/8402/0/840200a568",
"title": "Vibrating tilt platform enhancing immersive experience in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a568/1CJcYpd2qNq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049694",
"title": "Effect of Frame Rate on User Experience, Performance, and Simulator Sickness in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049694/1KYopPcDKk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798363",
"title": "RelivelnVR: Capturing and Reliving Virtual Reality Experiences Together",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798363/1cJ0JNak6w8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797971",
"title": "360-Degree Photo-realistic VR Conferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797971/1cJ1b26beEg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090400",
"title": "Creating a VR Experience of Solitary Confinement",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090400/1jIxqJMMWC4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a147",
"title": "An Exploratory Study for Designing Social Experience of Watching VR Movies Based on Audience’s Voice Comments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a147/1pBMiVCpEGY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2020/7002/0/700200a223",
"title": "Quality of Experience-Aware User Allocation in Edge Computing Systems: A Potential Game",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2020/700200a223/1rsiOwWULIY",
"parentPublication": {
"id": "proceedings/icdcs/2020/7002/0",
"title": "2020 IEEE 40th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a695",
"title": "Influence of Interactivity and Social Environments on User Experience and Social Acceptability in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a695/1tuAf9n910Q",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciddt/2020/0367/0/036700a323",
"title": "A Study on Evaluation Model of VR Shopping Experience based on User-Centered Approach",
"doi": null,
"abstractUrl": "/proceedings-article/iciddt/2020/036700a323/1wutE3VF3iM",
"parentPublication": {
"id": "proceedings/iciddt/2020/0367/0",
"title": "2020 International Conference on Innovation Design and Digital Technology (ICIDDT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxvNZX1",
"title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)",
"acronym": "iiki",
"groupId": "1805504",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBubOQf",
"doi": "10.1109/IIKI.2016.68",
"title": "Distributed Global Illumination Method Based on Photon Mapping",
"normalizedTitle": "Distributed Global Illumination Method Based on Photon Mapping",
"abstract": "As an excellent global illumination algorithm, photon mapping requires a large amount of memory to store photons, and it has obvious spatial locality in the step of photon searching. In this paper, we propose a distributed photon mapping algorithm. Firstly, a new delivering strategy of rendering tasks is proposed based on the ray intersection, while path tracing is used to calculate the direct illumination. Before the rendering, our work automatically synchronizes the acceleration of geometries and photons to realize data partitioning. Secondly, an overlapped KD-Tree is proposed to store photons in order to solve the problem of boundary data merging. The method of this paper is tested using multiple scenes. With ensuring good rendering results, both the rendering speed and storage load obtain the linearly optimization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As an excellent global illumination algorithm, photon mapping requires a large amount of memory to store photons, and it has obvious spatial locality in the step of photon searching. In this paper, we propose a distributed photon mapping algorithm. Firstly, a new delivering strategy of rendering tasks is proposed based on the ray intersection, while path tracing is used to calculate the direct illumination. Before the rendering, our work automatically synchronizes the acceleration of geometries and photons to realize data partitioning. Secondly, an overlapped KD-Tree is proposed to store photons in order to solve the problem of boundary data merging. The method of this paper is tested using multiple scenes. With ensuring good rendering results, both the rendering speed and storage load obtain the linearly optimization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As an excellent global illumination algorithm, photon mapping requires a large amount of memory to store photons, and it has obvious spatial locality in the step of photon searching. In this paper, we propose a distributed photon mapping algorithm. Firstly, a new delivering strategy of rendering tasks is proposed based on the ray intersection, while path tracing is used to calculate the direct illumination. Before the rendering, our work automatically synchronizes the acceleration of geometries and photons to realize data partitioning. Secondly, an overlapped KD-Tree is proposed to store photons in order to solve the problem of boundary data merging. The method of this paper is tested using multiple scenes. With ensuring good rendering results, both the rendering speed and storage load obtain the linearly optimization.",
"fno": "5952a022",
"keywords": [
"Ray Tracing",
"Rendering Computer Graphics",
"Tree Data Structures",
"Distributed Global Illumination Method",
"Photon Searching",
"Distributed Photon Mapping Algorithm",
"Rendering Tasks",
"Direct Illumination",
"Spatial Locality",
"Ray Intersection",
"Path Tracing",
"Overlapped KD Tree",
"Boundary Data Merging",
"Rendering Speed",
"Storage Load",
"Photonics",
"Rendering Computer Graphics",
"Lighting",
"Ray Tracing",
"Task Analysis",
"Geometry",
"Distributed Computing",
"Global Illumination",
"Photon Mapping",
"High Performance Computing"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiang Xu",
"givenName": "Xiang",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lu Wang",
"givenName": "Lu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yanning Xu",
"givenName": "Yanning",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chenglei Yang",
"givenName": "Chenglei",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiangxu Meng",
"givenName": "Xiangxu",
"surname": "Meng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiki",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-10-01T00:00:00",
"pubType": "proceedings",
"pages": "22-25",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-5952-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5952a014",
"articleId": "12OmNy5zsvF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5952a026",
"articleId": "12OmNweBUDF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a444",
"title": "Towards Interactive and Realistic Rendering of 3D Fetal Ultrasound via Photon Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a444/12OmNwNwzLh",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a069",
"title": "Photon Mapping Parallel Based on Shared Memory System",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a069/12OmNzVGcQg",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2016/8838/0/07945964",
"title": "The Performance Analysis of Quantum Illumination Radar",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2016/07945964/12OmNzkMlPR",
"parentPublication": {
"id": "proceedings/icnisc/2016/8838/0",
"title": "2016 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/01/ttg2009010049",
"title": "Hierarchical Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2009/01/ttg2009010049/13rRUNvyakG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534852",
"title": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534852/13rRUxZ0o1E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081317",
"title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081317/13rRUynHuja",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a242",
"title": "A Photon Tracing Approach to Solve Inverse Rendering Problems",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a242/17D45X0yjW4",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/01/08951772",
"title": "Origins of Global Illumination",
"doi": null,
"abstractUrl": "/magazine/cg/2020/01/08951772/1goL8Hzhdcs",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBV9Icj",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"acronym": "etvis",
"groupId": "1818666",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2pjL",
"doi": "10.1109/ETVIS.2016.7851170",
"title": "An analysis of eye-tracking data in foveated ray tracing",
"normalizedTitle": "An analysis of eye-tracking data in foveated ray tracing",
"abstract": "We present an analysis of eye tracking data produced during a quality-focused user study of our own foveated ray tracing method. Generally, foveated rendering serves the purpose of adapting actual rendering methods to a user’s gaze. This leads to performance improvements which also allow for the use of methods like ray tracing, which would be computationally too expensive otherwise, in fields like virtual reality (VR), where high rendering performance is important to achieve immersion, or fields like scientific and information visualization, where large amounts of data may hinder real-time rendering capabilities. We provide an overview of our rendering system itself as well as information about the data we collected during the user study, based on fixation tasks to be fulfilled during flights through virtual scenes displayed on a head-mounted display (HMD). We analyze the tracking data regarding its precision and take a closer look at the accuracy achieved by participants when focusing the fixation targets. This information is then put into context with the quality ratings given by the users, leading to a surprising relation between fixation accuracy and quality ratings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an analysis of eye tracking data produced during a quality-focused user study of our own foveated ray tracing method. Generally, foveated rendering serves the purpose of adapting actual rendering methods to a user’s gaze. This leads to performance improvements which also allow for the use of methods like ray tracing, which would be computationally too expensive otherwise, in fields like virtual reality (VR), where high rendering performance is important to achieve immersion, or fields like scientific and information visualization, where large amounts of data may hinder real-time rendering capabilities. We provide an overview of our rendering system itself as well as information about the data we collected during the user study, based on fixation tasks to be fulfilled during flights through virtual scenes displayed on a head-mounted display (HMD). We analyze the tracking data regarding its precision and take a closer look at the accuracy achieved by participants when focusing the fixation targets. This information is then put into context with the quality ratings given by the users, leading to a surprising relation between fixation accuracy and quality ratings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an analysis of eye tracking data produced during a quality-focused user study of our own foveated ray tracing method. Generally, foveated rendering serves the purpose of adapting actual rendering methods to a user’s gaze. This leads to performance improvements which also allow for the use of methods like ray tracing, which would be computationally too expensive otherwise, in fields like virtual reality (VR), where high rendering performance is important to achieve immersion, or fields like scientific and information visualization, where large amounts of data may hinder real-time rendering capabilities. We provide an overview of our rendering system itself as well as information about the data we collected during the user study, based on fixation tasks to be fulfilled during flights through virtual scenes displayed on a head-mounted display (HMD). We analyze the tracking data regarding its precision and take a closer look at the accuracy achieved by participants when focusing the fixation targets. This information is then put into context with the quality ratings given by the users, leading to a surprising relation between fixation accuracy and quality ratings.",
"fno": "07851170",
"keywords": [
"Rendering Computer Graphics",
"Visualization",
"Ray Tracing",
"Target Tracking",
"Electronic Mail",
"Tunneling",
"Focusing"
],
"authors": [
{
"affiliation": "Bonn-Rhein-Sieg University of Applied Sciences and Brunel University London",
"fullName": "Thorsten Roth",
"givenName": "Thorsten",
"surname": "Roth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bonn-Rhein-Sieg University of Applied Sciences and Saarland University",
"fullName": "Martin Weier",
"givenName": "Martin",
"surname": "Weier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bonn-Rhein-Sieg University of Applied Sciences",
"fullName": "Andre Hinkenjann",
"givenName": "Andre",
"surname": "Hinkenjann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Brunel University London",
"fullName": "Yongmin Li",
"givenName": "Yongmin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland University, Intel Visual Computing Institute and German Research Center for Artificial Intelligence (DFKI)",
"fullName": "Philipp Slusallek",
"givenName": "Philipp",
"surname": "Slusallek",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "etvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-10-01T00:00:00",
"pubType": "proceedings",
"pages": "69-73",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-4731-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07851169",
"articleId": "12OmNAnuTy9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07851171",
"articleId": "12OmNCmGNXy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2006/0693/0/04061539",
"title": "Ray Tracing for the Movie `Cars'",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061539/12OmNBBzoiL",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460202",
"title": "Ray Tracing Height Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460202/12OmNvrdI3r",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/3/3557e915",
"title": "Realtime Ray Tracing on a Hibrid Parallel Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557e915/12OmNxw5B5Q",
"parentPublication": {
"id": "proceedings/etcs/2009/3557/3",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2012/4725/0/4725a131",
"title": "Real Time Ray Tracing for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a131/12OmNzcPAGy",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/02/mcg2001020022",
"title": "A Benchmark for Animated Ray Tracing",
"doi": null,
"abstractUrl": "/magazine/cg/2001/02/mcg2001020022/13rRUyp7u1i",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000a466",
"title": "A Hardware-Oriented Fast Foveated Rendering Algorithm for Mobile Real-Time Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000a466/1gjRqzgLxCw",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222372",
"title": "Ray Tracing Structured AMR Data Using ExaBricks",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222372/1nTqdQ0THGw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBbaH9O",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwNwzLh",
"doi": "10.1109/ISM.2017.88",
"title": "Towards Interactive and Realistic Rendering of 3D Fetal Ultrasound via Photon Mapping",
"normalizedTitle": "Towards Interactive and Realistic Rendering of 3D Fetal Ultrasound via Photon Mapping",
"abstract": "Three-dimensional (3D) ultrasound is extensively used in obstetrics and gynecology, and realistic rendering results can both enhance the accuracy of diagnosis and facilitate communication between obstetricians and pregnant women. This paper proposes an interactive and realistic rendering method using global illumination effects for 3D ultrasound images with low signal-to-noise-ratio (SNR) values. The method is capable of generating both single and multiple scattering effects, and separately rendering these two illuminations using ray-casting. Volumetric photon mapping is used to simulate indirect illumination. In order to avoid complex storage structures and accelerate the computational speed, a new screen-space destiny estimation is proposed to calculate the radiance of each photon. In addition, to solve the issue of the foetal skin exceeding the dynamic range capability of the display device, high-dynamicrange (HDR) methods are further incorporated in our method. Experiments demonstrate that our method can produce realistic rendering results with enhanced depth information compared to traditional approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Three-dimensional (3D) ultrasound is extensively used in obstetrics and gynecology, and realistic rendering results can both enhance the accuracy of diagnosis and facilitate communication between obstetricians and pregnant women. This paper proposes an interactive and realistic rendering method using global illumination effects for 3D ultrasound images with low signal-to-noise-ratio (SNR) values. The method is capable of generating both single and multiple scattering effects, and separately rendering these two illuminations using ray-casting. Volumetric photon mapping is used to simulate indirect illumination. In order to avoid complex storage structures and accelerate the computational speed, a new screen-space destiny estimation is proposed to calculate the radiance of each photon. In addition, to solve the issue of the foetal skin exceeding the dynamic range capability of the display device, high-dynamicrange (HDR) methods are further incorporated in our method. Experiments demonstrate that our method can produce realistic rendering results with enhanced depth information compared to traditional approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Three-dimensional (3D) ultrasound is extensively used in obstetrics and gynecology, and realistic rendering results can both enhance the accuracy of diagnosis and facilitate communication between obstetricians and pregnant women. This paper proposes an interactive and realistic rendering method using global illumination effects for 3D ultrasound images with low signal-to-noise-ratio (SNR) values. The method is capable of generating both single and multiple scattering effects, and separately rendering these two illuminations using ray-casting. Volumetric photon mapping is used to simulate indirect illumination. In order to avoid complex storage structures and accelerate the computational speed, a new screen-space destiny estimation is proposed to calculate the radiance of each photon. In addition, to solve the issue of the foetal skin exceeding the dynamic range capability of the display device, high-dynamicrange (HDR) methods are further incorporated in our method. Experiments demonstrate that our method can produce realistic rendering results with enhanced depth information compared to traditional approaches.",
"fno": "2937a444",
"keywords": [
"Photonics",
"Lighting",
"Rendering Computer Graphics",
"Ultrasonic Imaging",
"Scattering",
"Three Dimensional Displays",
"Estimation",
"Volume Rendering",
"3 D Ultrasound",
"Global Illumination",
"High Dynamic Range",
"Photon Mapping"
],
"authors": [
{
"affiliation": null,
"fullName": "Jinta Zheng",
"givenName": "Jinta",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jing Qin",
"givenName": "Jing",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kup-Sze Choi",
"givenName": "Kup-Sze",
"surname": "Choi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-12-01T00:00:00",
"pubType": "proceedings",
"pages": "444-449",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2937-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2937a438",
"articleId": "12OmNyQph4v",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2937a450",
"articleId": "12OmNvUaNpe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iiki/2016/5952/0/5952a022",
"title": "Distributed Global Illumination Method Based on Photon Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/iiki/2016/5952a022/12OmNBubOQf",
"parentPublication": {
"id": "proceedings/iiki/2016/5952/0",
"title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534852",
"title": "Correlated Photon Mapping for Interactive Global Illumination of Time-Varying Volumetric Data",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534852/13rRUxZ0o1E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/10/08093692",
"title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries",
"doi": null,
"abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081317",
"title": "Real-Time Volume Rendering in Dynamic Lighting Environments Using Precomputed Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081317/13rRUynHuja",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a242",
"title": "A Photon Tracing Approach to Solve Inverse Rendering Problems",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a242/17D45X0yjW4",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08600345",
"title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09887904",
"title": "Fast and Accurate Illumination Estimation Using LDR Panoramic Images for Realistic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09887904/1GBRnHyZ1bW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10077440",
"title": "NeRC: Rendering Planar Caustics by Learning Implicit Neural Representations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10077440/1LFQ6PMpeik",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcj9wHjH2",
"doi": "10.1109/VR51125.2022.00097",
"title": "Rectangular Mapping-based Foveated Rendering",
"normalizedTitle": "Rectangular Mapping-based Foveated Rendering",
"abstract": "With the speedy increase of display resolution and the demand for interactive frame rate, rendering acceleration is becoming more critical for a wide range of virtual reality applications. Foveated rendering addresses this challenge by rendering with a non-uniform resolution for the display. Motivated by the non-linear optical lens equation, we present rectangular mapping-based foveated rendering (RMFR), a simple yet effective implementation of foveated rendering framework. RMFR supports varying level of foveation according to the eccentricity and the scene complexity. Compared with traditional foveated rendering methods, rectangular mapping-based foveated rendering provides a superior level of perceived visual quality while consuming minimal rendering cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the speedy increase of display resolution and the demand for interactive frame rate, rendering acceleration is becoming more critical for a wide range of virtual reality applications. Foveated rendering addresses this challenge by rendering with a non-uniform resolution for the display. Motivated by the non-linear optical lens equation, we present rectangular mapping-based foveated rendering (RMFR), a simple yet effective implementation of foveated rendering framework. RMFR supports varying level of foveation according to the eccentricity and the scene complexity. Compared with traditional foveated rendering methods, rectangular mapping-based foveated rendering provides a superior level of perceived visual quality while consuming minimal rendering cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the speedy increase of display resolution and the demand for interactive frame rate, rendering acceleration is becoming more critical for a wide range of virtual reality applications. Foveated rendering addresses this challenge by rendering with a non-uniform resolution for the display. Motivated by the non-linear optical lens equation, we present rectangular mapping-based foveated rendering (RMFR), a simple yet effective implementation of foveated rendering framework. RMFR supports varying level of foveation according to the eccentricity and the scene complexity. Compared with traditional foveated rendering methods, rectangular mapping-based foveated rendering provides a superior level of perceived visual quality while consuming minimal rendering cost.",
"fno": "961700a756",
"keywords": [
"Interactive Systems",
"Natural Scenes",
"Rendering Computer Graphics",
"Virtual Reality",
"Scene Complexity",
"RMFR",
"Nonlinear Optical Lens Equation",
"Virtual Reality",
"Rendering Acceleration",
"Interactive Frame Rate",
"Display Resolution",
"Rectangular Mapping Based Foveated Rendering",
"Visualization",
"Costs",
"Three Dimensional Displays",
"Pipelines",
"Optical Buffering",
"Virtual Reality",
"User Interfaces",
"Computing Methodologies",
"Computer Graphics",
"Rendering",
"Visibility",
"Human Centered Computing",
"Visualization",
"Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Shanghai Jiao Tong University,DALAB",
"fullName": "Jiannan Ye",
"givenName": "Jiannan",
"surname": "Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,DALAB",
"fullName": "Anqi Xie",
"givenName": "Anqi",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland,College Park",
"fullName": "Susmija Jabbireddy",
"givenName": "Susmija",
"surname": "Jabbireddy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland,College Park",
"fullName": "Yunchuan Li",
"givenName": "Yunchuan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Jiao Tong University,DALAB",
"fullName": "Xubo Yang",
"givenName": "Xubo",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent Games Digital Content Technology Center",
"fullName": "Xiaoxu Meng",
"givenName": "Xiaoxu",
"surname": "Meng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "756-764",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJcj2BNq9y",
"name": "pvr202296170-09756831s1-mm_961700a756.zip",
"size": "49.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756831s1-mm_961700a756.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a746",
"articleId": "1CJcc750PQI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a765",
"articleId": "1CJcxOuPom4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446142",
"title": "Phase-Aligned Foveated Rendering for Virtual Reality Headsets",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446142/13bd1sx4ZsZ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2018/4195/0/08551511",
"title": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551511/17D45WK5AoH",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a205",
"title": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a205/1MNgQoZswDu",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a471",
"title": "Locomotion-aware Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007492",
"title": "3D-Kernel Foveated Rendering for Light Fields",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08999630",
"title": "Toward Standardized Classification of Foveated Displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08999630/1hpPDGcaf9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a001",
"title": "Foveated Instant Radiosity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgzzb0RWg",
"doi": "10.1109/VR55154.2023.00062",
"title": "Locomotion-aware Foveated Rendering",
"normalizedTitle": "Locomotion-aware Foveated Rendering",
"abstract": "Optimizing rendering performance improves the user's immersion in virtual scene exploration. Foveated rendering uses the features of the human visual system (HVS) to improve rendering performance without sacrificing perceptual visual quality. We collect and analyze the viewing motion of different locomotion methods, and describe the effects of these viewing motions on HVS's sensitivity, as well as the advantages of these effects that may bring to foveated rendering. Then we propose the locomotion-aware foveated rendering method (LaFR) to further accelerate foveated rendering by leveraging the advantages. In LaFR, we first introduce the framework of LaFR. Secondly, we propose an eccentricity-based shading rate controller that provides the shading rate control of the given region in foveated rendering. Thirdly, we propose a locomotion-aware log-polar mapping method, which controls the foveal average shading rate, the peripheral shading rate decrease speed, and the overall shading quantity with the locomotion-aware coefficients based on the eccentricity-based shading rate controller. LaFR achieves similar perceptual visual quality as the conventional foveated rendering while achieving up to 1.6× speedup. Compared with the full resolution rendering, LaFR achieves up to 3.8× speedup.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optimizing rendering performance improves the user's immersion in virtual scene exploration. Foveated rendering uses the features of the human visual system (HVS) to improve rendering performance without sacrificing perceptual visual quality. We collect and analyze the viewing motion of different locomotion methods, and describe the effects of these viewing motions on HVS's sensitivity, as well as the advantages of these effects that may bring to foveated rendering. Then we propose the locomotion-aware foveated rendering method (LaFR) to further accelerate foveated rendering by leveraging the advantages. In LaFR, we first introduce the framework of LaFR. Secondly, we propose an eccentricity-based shading rate controller that provides the shading rate control of the given region in foveated rendering. Thirdly, we propose a locomotion-aware log-polar mapping method, which controls the foveal average shading rate, the peripheral shading rate decrease speed, and the overall shading quantity with the locomotion-aware coefficients based on the eccentricity-based shading rate controller. LaFR achieves similar perceptual visual quality as the conventional foveated rendering while achieving up to 1.6× speedup. Compared with the full resolution rendering, LaFR achieves up to 3.8× speedup.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optimizing rendering performance improves the user's immersion in virtual scene exploration. Foveated rendering uses the features of the human visual system (HVS) to improve rendering performance without sacrificing perceptual visual quality. We collect and analyze the viewing motion of different locomotion methods, and describe the effects of these viewing motions on HVS's sensitivity, as well as the advantages of these effects that may bring to foveated rendering. Then we propose the locomotion-aware foveated rendering method (LaFR) to further accelerate foveated rendering by leveraging the advantages. In LaFR, we first introduce the framework of LaFR. Secondly, we propose an eccentricity-based shading rate controller that provides the shading rate control of the given region in foveated rendering. Thirdly, we propose a locomotion-aware log-polar mapping method, which controls the foveal average shading rate, the peripheral shading rate decrease speed, and the overall shading quantity with the locomotion-aware coefficients based on the eccentricity-based shading rate controller. LaFR achieves similar perceptual visual quality as the conventional foveated rendering while achieving up to 1.6× speedup. Compared with the full resolution rendering, LaFR achieves up to 3.8× speedup.",
"fno": "481500a471",
"keywords": [
"Visualization",
"Three Dimensional Displays",
"Sensitivity",
"Virtual Reality",
"Visual Systems",
"User Interfaces",
"Rendering Computer Graphics",
"Virtual Reality",
"Foveated Rendering",
"Gaze Contingent Rendering",
"Perception"
],
"authors": [
{
"affiliation": "Beihang University",
"fullName": "Xuehuai Shi",
"givenName": "Xuehuai",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University",
"fullName": "Lili Wang",
"givenName": "Lili",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University",
"fullName": "Jian Wu",
"givenName": "Jian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPU",
"fullName": "Wei Ke",
"givenName": "Wei",
"surname": "Ke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPU",
"fullName": "Chan-Tong Lam",
"givenName": "Chan-Tong",
"surname": "Lam",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "471-481",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1MNgztG6CIM",
"name": "pvr202348150-010108455s1-mm_481500a471.zip",
"size": "38.2 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202348150-010108455s1-mm_481500a471.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "481500a460",
"articleId": "1MNgkshFgXK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a482",
"articleId": "1MNgRYfCXU4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446142",
"title": "Phase-Aligned Foveated Rendering for Virtual Reality Headsets",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446142/13bd1sx4ZsZ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a756",
"title": "Rectangular Mapping-based Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903564",
"title": "FoVolNet: Fast Volume Rendering using Foveated Deep Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903564/1GZombIreEg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000a466",
"title": "A Hardware-Oriented Fast Foveated Rendering Algorithm for Mobile Real-Time Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000a466/1gjRqzgLxCw",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09007492",
"title": "3D-Kernel Foveated Rendering for Light Fields",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090471",
"title": "Efficient Peripheral Flicker Reduction for Foveated Rendering in Mobile VR Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090471/1jIxm9DsWDS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a398",
"title": "Rendering Optimizations for Virtual Reality Using Eye-Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a398/1oZBBw6BBa8",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a001",
"title": "Foveated Instant Radiosity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysxhw4Bqw",
"doi": "10.1109/ISMAR50242.2020.00017",
"title": "Foveated Instant Radiosity",
"normalizedTitle": "Foveated Instant Radiosity",
"abstract": "Foveated rendering distributes computational resources based on visual acuity, more in the foveal regions of our eyes and less in the periphery. The traditional rasterization method can be adapted into the foveated rendering framework in a quite straightforward way, but it's difficult for estimating global illumination. Instant Radiosity is an efficient global illumination method. It generates Virtual Point Lights (VPLs) on the surface of the virtual scenes from light sources and uses these VPLs to simulate light bounces. However, instant radiosity can not be adapted into the foveated rendering pipeline directly, and is too slow for virtual reality experience. What's more, instant radiosity does not consider temporal coherence, therefore it lacks temporal stability for dynamic scenes. In this paper, we propose a foveated rendering method for instant radiosity with more accurate global illumination effects in the foveal region and less accurate global illumination in the peripheral region. We define a foveated importance for each VPL, and use it to smartly distribute the VPLs to guarantee the rendering precision of the foveal region. Meanwhile, we propose a novel VPL reuse scheme, which updates only a small fraction of VPLs over frames, which ensures temporal coherence and improves time efficiency. Our method supports dynamic scenes and achieves high quality in the foveal regions at interactive frame rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Foveated rendering distributes computational resources based on visual acuity, more in the foveal regions of our eyes and less in the periphery. The traditional rasterization method can be adapted into the foveated rendering framework in a quite straightforward way, but it's difficult for estimating global illumination. Instant Radiosity is an efficient global illumination method. It generates Virtual Point Lights (VPLs) on the surface of the virtual scenes from light sources and uses these VPLs to simulate light bounces. However, instant radiosity can not be adapted into the foveated rendering pipeline directly, and is too slow for virtual reality experience. What's more, instant radiosity does not consider temporal coherence, therefore it lacks temporal stability for dynamic scenes. In this paper, we propose a foveated rendering method for instant radiosity with more accurate global illumination effects in the foveal region and less accurate global illumination in the peripheral region. We define a foveated importance for each VPL, and use it to smartly distribute the VPLs to guarantee the rendering precision of the foveal region. Meanwhile, we propose a novel VPL reuse scheme, which updates only a small fraction of VPLs over frames, which ensures temporal coherence and improves time efficiency. Our method supports dynamic scenes and achieves high quality in the foveal regions at interactive frame rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Foveated rendering distributes computational resources based on visual acuity, more in the foveal regions of our eyes and less in the periphery. The traditional rasterization method can be adapted into the foveated rendering framework in a quite straightforward way, but it's difficult for estimating global illumination. Instant Radiosity is an efficient global illumination method. It generates Virtual Point Lights (VPLs) on the surface of the virtual scenes from light sources and uses these VPLs to simulate light bounces. However, instant radiosity can not be adapted into the foveated rendering pipeline directly, and is too slow for virtual reality experience. What's more, instant radiosity does not consider temporal coherence, therefore it lacks temporal stability for dynamic scenes. In this paper, we propose a foveated rendering method for instant radiosity with more accurate global illumination effects in the foveal region and less accurate global illumination in the peripheral region. We define a foveated importance for each VPL, and use it to smartly distribute the VPLs to guarantee the rendering precision of the foveal region. Meanwhile, we propose a novel VPL reuse scheme, which updates only a small fraction of VPLs over frames, which ensures temporal coherence and improves time efficiency. Our method supports dynamic scenes and achieves high quality in the foveal regions at interactive frame rates.",
"fno": "850800a001",
"keywords": [
"Brightness",
"Lighting",
"Rendering Computer Graphics",
"Virtual Reality",
"Foveal Region",
"Traditional Rasterization Method",
"Foveated Rendering Framework",
"Efficient Global Illumination Method",
"Virtual Point Lights",
"VPL",
"Foveated Rendering Pipeline",
"Temporal Coherence",
"Dynamic Scenes",
"Foveated Rendering Method",
"Accurate Global Illumination Effects",
"Foveated Importance",
"Foveated Instant Radiosity",
"Visualization",
"Three Dimensional Displays",
"Pipelines",
"Lighting",
"Coherence",
"Rendering Computer Graphics",
"Stability Analysis",
"3 D Scene Rendering",
"Real Time Rendering",
"Foveated Rendering",
"Indirect Illumination"
],
"authors": [
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China",
"fullName": "Lili Wang",
"givenName": "Lili",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China",
"fullName": "Runze Li",
"givenName": "Runze",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China",
"fullName": "Xuehuai Shi",
"givenName": "Xuehuai",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California,Santa Barbara,California,U.S.",
"fullName": "Ling-Qi Yan",
"givenName": "Ling-Qi",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China",
"fullName": "Zhichao Li",
"givenName": "Zhichao",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-11",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800z041",
"articleId": "1pysvbIyYJq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a012",
"articleId": "1pysvDRGQq4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643556",
"title": "Differential Instant Radiosity for mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643556/12OmNAkWvti",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/05/07867077",
"title": "Sequential Monte Carlo Instant Radiosity",
"doi": null,
"abstractUrl": "/journal/tg/2017/05/07867077/13rRUwwJWFT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a756",
"title": "Rectangular Mapping-based Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/coins/2022/8356/0/09855009",
"title": "Electronically Foveated Dynamic Vision Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/coins/2022/09855009/1FWm8Ll0qty",
"parentPublication": {
"id": "proceedings/coins/2022/8356/0",
"title": "2022 IEEE International Conference on Omni-layer Intelligent Systems (COINS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873853",
"title": "Foveated Stochastic Lightcuts",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873853/1GjwMIuxYUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a471",
"title": "Locomotion-aware Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000a466",
"title": "A Hardware-Oriented Fast Foveated Rendering Algorithm for Mobile Real-Time Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000a466/1gjRqzgLxCw",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/09005240",
"title": "Eye-dominance-guided Foveated Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD8bFOZos",
"doi": "10.1109/ISMAR52148.2021.00058",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"normalizedTitle": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"abstract": "Although ray tracing produces significantly more realistic images than traditional rasterization techniques, it is still considered computationally burdensome when implemented on a head-mounted display (HMD) system that demands both wide field of view and high rendering rate. A further challenge is that to present high-quality images on an HMD screen, a sufficient number of ray samples should be taken per pixel for effective antialiasing to reduce visually annoying artifacts. In this paper, we present a novel foveated real-time rendering framework that realizes classic Whitted-style ray tracing on an HMD system. In particular, our method proposes combining the selective supersampling technique by Jin et al. [8] with the foveated rendering scheme, resulting in perceptually highly efficient pixel sampling suitable for HMD ray tracing. We show that further enhanced by foveated temporal antialiasing, our ray tracer renders nontrivial 3D scenes in real time on commodity GPUs at high sampling rates as effective as up to 36 samples per pixel (spp) in the foveal area, gradually reducing to at least 1 spp in the periphery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although ray tracing produces significantly more realistic images than traditional rasterization techniques, it is still considered computationally burdensome when implemented on a head-mounted display (HMD) system that demands both wide field of view and high rendering rate. A further challenge is that to present high-quality images on an HMD screen, a sufficient number of ray samples should be taken per pixel for effective antialiasing to reduce visually annoying artifacts. In this paper, we present a novel foveated real-time rendering framework that realizes classic Whitted-style ray tracing on an HMD system. In particular, our method proposes combining the selective supersampling technique by Jin et al. [8] with the foveated rendering scheme, resulting in perceptually highly efficient pixel sampling suitable for HMD ray tracing. We show that further enhanced by foveated temporal antialiasing, our ray tracer renders nontrivial 3D scenes in real time on commodity GPUs at high sampling rates as effective as up to 36 samples per pixel (spp) in the foveal area, gradually reducing to at least 1 spp in the periphery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although ray tracing produces significantly more realistic images than traditional rasterization techniques, it is still considered computationally burdensome when implemented on a head-mounted display (HMD) system that demands both wide field of view and high rendering rate. A further challenge is that to present high-quality images on an HMD screen, a sufficient number of ray samples should be taken per pixel for effective antialiasing to reduce visually annoying artifacts. In this paper, we present a novel foveated real-time rendering framework that realizes classic Whitted-style ray tracing on an HMD system. In particular, our method proposes combining the selective supersampling technique by Jin et al. [8] with the foveated rendering scheme, resulting in perceptually highly efficient pixel sampling suitable for HMD ray tracing. We show that further enhanced by foveated temporal antialiasing, our ray tracer renders nontrivial 3D scenes in real time on commodity GPUs at high sampling rates as effective as up to 36 samples per pixel (spp) in the foveal area, gradually reducing to at least 1 spp in the periphery.",
"fno": "015800a413",
"keywords": [
"Antialiasing",
"Computer Graphic Equipment",
"Helmet Mounted Displays",
"Image Resolution",
"Image Sampling",
"Ray Tracing",
"Rendering Computer Graphics",
"Head Mounted Display System",
"High Rendering Rate",
"High Quality Images",
"HMD Screen",
"Ray Samples",
"Effective Antialiasing",
"Visually Annoying Artifacts",
"Foveated Real Time",
"Classic Whitted Style Ray",
"HMD System",
"Selective Supersampling Technique",
"Foveated Rendering Scheme",
"Perceptually Highly Efficient Pixel",
"HMD Ray Tracing",
"Foveated Temporal Antialiasing",
"Ray Tracer Renders",
"High Sampling Rates",
"Selective Foveated Ray Tracing",
"Head Mounted Displays",
"Realistic Images",
"Traditional Rasterization Techniques",
"Head Mounted Displays",
"Three Dimensional Displays",
"Resists",
"Ray Tracing",
"Rendering Computer Graphics",
"Real Time Systems",
"Augmented Reality",
"Computing Methodologies",
"Computer Graphics",
"Rendering",
"Ray Tracing",
"Human Centered Computing",
"Visualization",
"Visualization Technique"
],
"authors": [
{
"affiliation": "Sogang University,Department of Computer Science and Engineering,Korea",
"fullName": "Youngwook Kim",
"givenName": "Youngwook",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Snow Corp.,Korea",
"fullName": "Yunmin Ko",
"givenName": "Yunmin",
"surname": "Ko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sogang University,Department of Computer Science and Engineering,Korea",
"fullName": "Insung Ihm",
"givenName": "Insung",
"surname": "Ihm",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "413-421",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeD7jYvOLK",
"name": "pismar202101580-09583734s1-mm_015800a413.zip",
"size": "175 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583734s1-mm_015800a413.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a403",
"articleId": "1yeD8DDATSw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a422",
"articleId": "1yeD2Kh0vxS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2008/2741/0/04634613",
"title": "Ray-specialized acceleration structures for ray tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634613/12OmNBt3qlA",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a505",
"title": "A Hardware Acceleration Engine for Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a505/12OmNrYlmAx",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etvis/2016/4731/0/07851170",
"title": "An analysis of eye-tracking data in foveated ray tracing",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851170/12OmNvT2pjL",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2011/4648/0/4648a011",
"title": "GPU-Based Data Structure for a Parallel Ray Tracing Illumination Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2011/4648a011/12OmNvwC5ve",
"parentPublication": {
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a087",
"title": "SIMD Friendly Ray Tracing on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a087/12OmNxFaLiE",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870027",
"title": "A Hardware Acceleration Method for Volumetric Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2005/9330/0/01500343",
"title": "Ray tracing on the desktop: when and how?",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2005/01500343/12OmNyjccyJ",
"parentPublication": {
"id": "proceedings/cgi/2005/9330/0",
"title": "Computer Graphics International 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2001/02/mcg2001020022",
"title": "A Benchmark for Animated Ray Tracing",
"doi": null,
"abstractUrl": "/magazine/cg/2001/02/mcg2001020022/13rRUyp7u1i",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micro/2022/6272/0/627200a263",
"title": "Vulkan-Sim: A GPU Architecture Simulator for Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/micro/2022/627200a263/1HMSwhI3lO8",
"parentPublication": {
"id": "proceedings/micro/2022/6272/0",
"title": "2022 55th IEEE/ACM International Symposium on Microarchitecture (MICRO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523836",
"title": "Foveated Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvs4vpO",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"acronym": "iccms",
"groupId": "1002645",
"volume": "2",
"displayVolume": "2",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBhpS5P",
"doi": "10.1109/ICCMS.2010.292",
"title": "SAR Speckle Reduction Based on Nonlocal Means Method",
"normalizedTitle": "SAR Speckle Reduction Based on Nonlocal Means Method",
"abstract": "Synthetic aperture radar (SAR) images are inherently affected by speckle noise, which is due to the coherent nature of the scattering phenomenon. This paper presents a novel SAR speckle reduction method base on nonlocal means (NLM) filter. NLM is applied to remove additive noise after taking the logarithm of the original speckle noise. The proposed method can preserve edges and protect more fine details. Results on real speckle SAR images are given. We have also compared our method with some related methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Synthetic aperture radar (SAR) images are inherently affected by speckle noise, which is due to the coherent nature of the scattering phenomenon. This paper presents a novel SAR speckle reduction method base on nonlocal means (NLM) filter. NLM is applied to remove additive noise after taking the logarithm of the original speckle noise. The proposed method can preserve edges and protect more fine details. Results on real speckle SAR images are given. We have also compared our method with some related methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Synthetic aperture radar (SAR) images are inherently affected by speckle noise, which is due to the coherent nature of the scattering phenomenon. This paper presents a novel SAR speckle reduction method base on nonlocal means (NLM) filter. NLM is applied to remove additive noise after taking the logarithm of the original speckle noise. The proposed method can preserve edges and protect more fine details. Results on real speckle SAR images are given. We have also compared our method with some related methods.",
"fno": "05421106",
"keywords": [
"Filtering Theory",
"Image Denoising",
"Radar Imaging",
"Speckle",
"Synthetic Aperture Radar",
"SAR",
"Speckle Reduction",
"Nonlocal Means Method",
"Synthetic Aperture Radar Image",
"Speckle Noise",
"Nonlocal Means Filter",
"Additive Noise",
"Speckle",
"Filters",
"Anisotropic Magnetoresistance",
"Synthetic Aperture Radar",
"Radar Scattering",
"Additive Noise",
"Image Restoration",
"Computer Science",
"Noise Reduction",
"Image Edge Detection",
"SAR Image",
"Speckle Noise",
"Nonlocal Means"
],
"authors": [
{
"affiliation": null,
"fullName": "Bibo Lu",
"givenName": "Bibo",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Linlin Zhang",
"givenName": "Linlin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Fuqiang Xing",
"givenName": "Fuqiang",
"surname": "Xing",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-01-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-5642-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05421105",
"articleId": "12OmNBv2Ck8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05421103",
"articleId": "12OmNxy4N5o",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ncm/2009/3769/0/3769b335",
"title": "Speckle Filtering by Generalized Gamma Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b335/12OmNAT0mMH",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/1/00982773",
"title": "A speckle reduction algorithm for SAR images",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00982773/12OmNAlvI4j",
"parentPublication": {
"id": "proceedings/icii/2001/7010/1",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a700",
"title": "Speckle Reduction with Multiresolution Bilateral Filtering for SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a700/12OmNBO3JYb",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscsct/2008/3498/1/3498a752",
"title": "Speckle Reduction of Polarimetric SAR Image Based on ICA-SCS Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498a752/12OmNrJAebX",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/1/00413278",
"title": "Wavelet based speckle reduction with application to SAR based ATD/R",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413278/12OmNxEBzea",
"parentPublication": {
"id": "proceedings/icip/1994/6952/3",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2014/6239/0/6239a143",
"title": "SAR Image Despeckling Based on the Nonlocally Centralized Sparse Representation Model",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2014/6239a143/12OmNz61dqQ",
"parentPublication": {
"id": "proceedings/cit/2014/6239/0",
"title": "2014 IEEE International Conference on Computer and Information Technology (CIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/2/00754657",
"title": "Optimal Speckle Reduction In Polarimetric Sar Imagery*",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754657/12OmNzYwcah",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/2",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726762",
"title": "Combined curvelet transform and multispinning algorithm for despeckling of SAR images",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726762/12OmNzwpU4y",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2022/6382/0/09959170",
"title": "SAR Image Denoising in High Dynamic Range with Speckle and Thermal Noise Refinement Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2022/09959170/1Iz5f7tXFgA",
"parentPublication": {
"id": "proceedings/avss/2022/6382/0",
"title": "2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs0C9QC",
"title": "2016 International Symposium on Computer, Consumer and Control (IS3C)",
"acronym": "is3c",
"groupId": "1801670",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBr4etJ",
"doi": "10.1109/IS3C.2016.211",
"title": "A Blind Estimation for Speckle Noise Based on Gaussian-Hermite Moments",
"normalizedTitle": "A Blind Estimation for Speckle Noise Based on Gaussian-Hermite Moments",
"abstract": "As a kind of multiplicative noise, Speckle often degrades image quality. To estimate speckle noise helps to image restoration and consequent analysis. This paper suggests a Gaussian-Hermite moments based method to evaluate the variance of speckle. In the method, the characteristics of Gaussian-Hermite moments are discussed first. Then speckle noise with different variances are respectively added to a certain image where all the greyscales of pixels are the same, and the distribution of feature vectors based on Gaussian-Hermite moments is employed to analyze the noise intensity. Next, a conception of noise characteristic value is used to stand for noise intensity and herefrom construct a function mapping of noise variances and noise characteristic values, with which an estimate function is finally established via a polynomial curve fitting. Experimental results indicate that the estimation function can rapidly and correctly provide with the intensity of speckle without any prior knowledge. Potential applications include noise assessment, image quality analysis, and guidance for noise reduction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As a kind of multiplicative noise, Speckle often degrades image quality. To estimate speckle noise helps to image restoration and consequent analysis. This paper suggests a Gaussian-Hermite moments based method to evaluate the variance of speckle. In the method, the characteristics of Gaussian-Hermite moments are discussed first. Then speckle noise with different variances are respectively added to a certain image where all the greyscales of pixels are the same, and the distribution of feature vectors based on Gaussian-Hermite moments is employed to analyze the noise intensity. Next, a conception of noise characteristic value is used to stand for noise intensity and herefrom construct a function mapping of noise variances and noise characteristic values, with which an estimate function is finally established via a polynomial curve fitting. Experimental results indicate that the estimation function can rapidly and correctly provide with the intensity of speckle without any prior knowledge. Potential applications include noise assessment, image quality analysis, and guidance for noise reduction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As a kind of multiplicative noise, Speckle often degrades image quality. To estimate speckle noise helps to image restoration and consequent analysis. This paper suggests a Gaussian-Hermite moments based method to evaluate the variance of speckle. In the method, the characteristics of Gaussian-Hermite moments are discussed first. Then speckle noise with different variances are respectively added to a certain image where all the greyscales of pixels are the same, and the distribution of feature vectors based on Gaussian-Hermite moments is employed to analyze the noise intensity. Next, a conception of noise characteristic value is used to stand for noise intensity and herefrom construct a function mapping of noise variances and noise characteristic values, with which an estimate function is finally established via a polynomial curve fitting. Experimental results indicate that the estimation function can rapidly and correctly provide with the intensity of speckle without any prior knowledge. Potential applications include noise assessment, image quality analysis, and guidance for noise reduction.",
"fno": "3071a829",
"keywords": [
"Speckle",
"Estimation",
"Benchmark Testing",
"Two Dimensional Displays",
"Image Quality",
"Synthetic Aperture Radar",
"Feature Extraction",
"Function Mapping",
"Noise Estimation",
"Speckle Noise",
"Gaussian Hermite Moments"
],
"authors": [
{
"affiliation": null,
"fullName": "Ma Miao",
"givenName": "Ma",
"surname": "Miao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zheng Xue",
"givenName": "Zheng",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pei Zhao",
"givenName": "Pei",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "is3c",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "829-832",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3071-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3071a825",
"articleId": "12OmNAoUTx8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3071a833",
"articleId": "12OmNBtl1qD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ncm/2009/3769/0/3769b335",
"title": "Speckle Filtering by Generalized Gamma Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b335/12OmNAT0mMH",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/1/00982773",
"title": "A speckle reduction algorithm for SAR images",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00982773/12OmNAlvI4j",
"parentPublication": {
"id": "proceedings/icii/2001/7010/1",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a700",
"title": "Speckle Reduction with Multiresolution Bilateral Filtering for SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a700/12OmNBO3JYb",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/5642/2/05421106",
"title": "SAR Speckle Reduction Based on Nonlocal Means Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/05421106/12OmNBhpS5P",
"parentPublication": {
"id": "proceedings/iccms/2010/5642/2",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031b985",
"title": "Speckle Reduction for SAR Images Based on Adaptive Gaussian Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b985/12OmNC8MsEQ",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmete/2016/3411/0/07938926",
"title": "Using High Order Total Variation for Denoising Speckle, Gaussian, Salt & Pepper",
"doi": null,
"abstractUrl": "/proceedings-article/icmete/2016/07938926/12OmNqI04Zs",
"parentPublication": {
"id": "proceedings/icmete/2016/3411/0",
"title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2007/2875/2/28750450",
"title": "Some Aspects of Gaussian-Hermite Moments in Image Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2007/28750450/12OmNzDehfb",
"parentPublication": {
"id": "proceedings/icnc/2007/2875/2",
"title": "Third International Conference on Natural Computation (ICNC 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/2/00754657",
"title": "Optimal Speckle Reduction In Polarimetric Sar Imagery*",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754657/12OmNzYwcah",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/2",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413017",
"title": "DSPNet: Deep Learning-Enabled Blind Reduction of Speckle Noise",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413017/1tmi7NCvm2A",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxYUU",
"title": "2007 International Machine Vision and Image Processing Conference",
"acronym": "imvip",
"groupId": "1001328",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4eSz7",
"doi": "10.1109/IMVIP.2007.24",
"title": "Speckle reduction using the discrete Fourier filtering technique",
"normalizedTitle": "Speckle reduction using the discrete Fourier filtering technique",
"abstract": "We present a digital signal processing technique that reduces the speckle content in reconstructed digital holograms. The method is based on sequential sampling of the discrete Fourier transform of the reconstructed image field. The resulting images show a reduction in speckle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a digital signal processing technique that reduces the speckle content in reconstructed digital holograms. The method is based on sequential sampling of the discrete Fourier transform of the reconstructed image field. The resulting images show a reduction in speckle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a digital signal processing technique that reduces the speckle content in reconstructed digital holograms. The method is based on sequential sampling of the discrete Fourier transform of the reconstructed image field. The resulting images show a reduction in speckle.",
"fno": "28870201",
"keywords": [
"Discrete Fourier Transforms",
"Filtering Theory",
"Holography",
"Image Reconstruction",
"Image Sampling",
"Speckle",
"Speckle Reduction",
"Discrete Fourier Filtering",
"Digital Signal Processing",
"Reconstructed Digital Hologram",
"Sequential Sampling",
"Discrete Fourier Transform",
"Image Reconstruction",
"Speckle",
"Filtering",
"Image Reconstruction",
"Holography",
"Discrete Fourier Transforms",
"Optical Filters",
"Apertures",
"Optical Recording",
"Digital Filters",
"Machine Vision"
],
"authors": [
{
"affiliation": "Nat. Univ. of Ireland Maynooth, Maynooth",
"fullName": "J. Maycock",
"givenName": "J.",
"surname": "Maycock",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nat. Univ. of Ireland Maynooth, Maynooth",
"fullName": "B.M. Hennelly",
"givenName": "B.M.",
"surname": "Hennelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nat. Univ. of Ireland Maynooth, Maynooth",
"fullName": "J.B. McDonald",
"givenName": "J.B.",
"surname": "McDonald",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Y. Frauel",
"givenName": "Y.",
"surname": "Frauel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "A. Castro",
"givenName": "A.",
"surname": "Castro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "B. Javidi",
"givenName": "B.",
"surname": "Javidi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "T.J. Naughton",
"givenName": "T.J.",
"surname": "Naughton",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "imvip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-09-01T00:00:00",
"pubType": "proceedings",
"pages": "201-201",
"year": "2007",
"issn": null,
"isbn": "0-7695-2887-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "28870098",
"articleId": "12OmNrIae75",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "28870106",
"articleId": "12OmNvjQ8Sd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mvhi/2010/4009/0/4009a700",
"title": "Speckle Reduction with Multiresolution Bilateral Filtering for SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a700/12OmNBO3JYb",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/5642/2/05421106",
"title": "SAR Speckle Reduction Based on Nonlocal Means Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/05421106/12OmNBhpS5P",
"parentPublication": {
"id": "proceedings/iccms/2010/5642/2",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cine/2015/7548/0/7549a148",
"title": "A Comparative Study on Approaches to Speckle Noise Reduction in Images",
"doi": null,
"abstractUrl": "/proceedings-article/cine/2015/7549a148/12OmNCuDzqv",
"parentPublication": {
"id": "proceedings/cine/2015/7548/0",
"title": "2015 International Conference on Computational Intelligence & Networks (CINE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoac/2012/5583/0/06416803",
"title": "Application of Fast Fourier Transform (FFT) in Laser Speckle Image Pattern Correlation technique for the metrological measurement",
"doi": null,
"abstractUrl": "/proceedings-article/icoac/2012/06416803/12OmNwIHoAi",
"parentPublication": {
"id": "proceedings/icoac/2012/5583/0",
"title": "2012 Fourth International Conference on Advanced Computing (ICoAC 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100358",
"title": "Ultrasound speckle reduction by directional median filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100358/12OmNxGj9RJ",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761408",
"title": "Edge-preserving unscented Kalman filter for speckle reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761408/12OmNzFv4hS",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/2/00754657",
"title": "Optimal Speckle Reduction In Polarimetric Sar Imagery*",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754657/12OmNzYwcah",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/2",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1975/04/01672827",
"title": "Thermoplastic Holographic Recording of Binary Patterns in PLZT Line Composer",
"doi": null,
"abstractUrl": "/journal/tc/1975/04/01672827/13rRUx0geoJ",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956194",
"title": "Speckle Image Restoration without Clean Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956194/1IHqaphQgZG",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a705",
"title": "DCGH: Dynamic Computer Generated Holography for Speckle-Free, High Fidelity 3D Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a705/1tuB1hj1VhS",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBrDqDF",
"title": "BioMedical Engineering and Informatics, International Conference on",
"acronym": "bmei",
"groupId": "1001754",
"volume": "2",
"displayVolume": "2",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqHqSnj",
"doi": "10.1109/BMEI.2008.299",
"title": "Speckle Noise Reduction of Ultrasound Images Using M-band Wavelet Transform and Wiener Filter in a Homomorphic Framework",
"normalizedTitle": "Speckle Noise Reduction of Ultrasound Images Using M-band Wavelet Transform and Wiener Filter in a Homomorphic Framework",
"abstract": "In this paper we make use of M-band discrete wavelet transform (MDWT) and Wiener filter for speckle noise suppression of ultrasound images. Assuming multiplicative model for speckle noise, by applying logarithmic transform to noisy observation we convert multiplicative noise to additive one. Then, speckle noise can be reduced by taking the M-band wavelet transform of log-transformed observation and employing an adaptive thresholding technique. In order to improve the performance of this denosing approach, we propose a preprocessing stage which exploits Wiener filtering. Through this stage log-transformed observation is separated into two images and then each of these two images is denoised individually in M-band wavelet transform domain. The summation of these two images constructs the despeckled image. We have tested our method on realistic ultrasound images and compared the results with results of other speckle suppression methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we make use of M-band discrete wavelet transform (MDWT) and Wiener filter for speckle noise suppression of ultrasound images. Assuming multiplicative model for speckle noise, by applying logarithmic transform to noisy observation we convert multiplicative noise to additive one. Then, speckle noise can be reduced by taking the M-band wavelet transform of log-transformed observation and employing an adaptive thresholding technique. In order to improve the performance of this denosing approach, we propose a preprocessing stage which exploits Wiener filtering. Through this stage log-transformed observation is separated into two images and then each of these two images is denoised individually in M-band wavelet transform domain. The summation of these two images constructs the despeckled image. We have tested our method on realistic ultrasound images and compared the results with results of other speckle suppression methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we make use of M-band discrete wavelet transform (MDWT) and Wiener filter for speckle noise suppression of ultrasound images. Assuming multiplicative model for speckle noise, by applying logarithmic transform to noisy observation we convert multiplicative noise to additive one. Then, speckle noise can be reduced by taking the M-band wavelet transform of log-transformed observation and employing an adaptive thresholding technique. In order to improve the performance of this denosing approach, we propose a preprocessing stage which exploits Wiener filtering. Through this stage log-transformed observation is separated into two images and then each of these two images is denoised individually in M-band wavelet transform domain. The summation of these two images constructs the despeckled image. We have tested our method on realistic ultrasound images and compared the results with results of other speckle suppression methods.",
"fno": "3118b510",
"keywords": [
"Image Denoising",
"Ultrasound Speckle",
"M Band Wavelet",
"Homomorphic Framework"
],
"authors": [
{
"affiliation": null,
"fullName": "Arash Vosoughi",
"givenName": "Arash",
"surname": "Vosoughi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mohammad Bagher Shamsollahi",
"givenName": "Mohammad Bagher",
"surname": "Shamsollahi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bmei",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-05-01T00:00:00",
"pubType": "proceedings",
"pages": "510-515",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3118-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3118b506",
"articleId": "12OmNwdtwjR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3118b516",
"articleId": "12OmNxwncJG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572723",
"title": "Speckle noise modeling in the dual-tree complex wavelet domain",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572723/12OmNAObbET",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a627",
"title": "Adaptive Ultrasound Image Matching for Automatic Liver Disease Diagnosis Using Generalized Hough Transform",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a627/12OmNAtK4qL",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a700",
"title": "Speckle Reduction with Multiresolution Bilateral Filtering for SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a700/12OmNBO3JYb",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2000/0862/0/08620245",
"title": "Classification and Estimation of Ultrasound Speckle Noise with Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2000/08620245/12OmNvsm6AZ",
"parentPublication": {
"id": "proceedings/bibe/2000/0862/0",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100358",
"title": "Ultrasound speckle reduction by directional median filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100358/12OmNxGj9RJ",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f228",
"title": "Comparative Analysis of Spatial filters for Speckle Reduction in Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f228/12OmNy2agYp",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042240",
"title": "Prostate Tissue Characterization via Ultrasound Speckle Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042240/12OmNyUWQRF",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a113",
"title": "General Tendencies in Segmentation of Medical Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a113/12OmNyv7mee",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a122",
"title": "Speckle Noise Suppression Techniques for Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a122/12OmNz5apEs",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwJPMXt",
"title": "Optoelectronics and Image Processing, International Conference on",
"acronym": "icoip",
"groupId": "1800228",
"volume": "1",
"displayVolume": "1",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqJq4BI",
"doi": "10.1109/ICOIP.2010.309",
"title": "A Speckle Reduction and Characteristic Enhancement Algorithm to Ultrasonic Image Based on Wavelet Technology",
"normalizedTitle": "A Speckle Reduction and Characteristic Enhancement Algorithm to Ultrasonic Image Based on Wavelet Technology",
"abstract": "The inherent characteristic of ultrasonic and the application environment make ultrasonic image have some defects: image with speckle noise, edge blur and low contrast. In order to reduce the speckle noise and enhance the image characteristics, a speckle reduction and characteristic enhancement anisotropic diffusion algorithm is proposed based on the wavelet technology in this paper. The algorithm and parameters choice and algorithm realization steps had been analyzed and illuminated particularly. A compare research experiment for real ultrasonic image has been done using the algorithm and other traditional methods. The experimental result indicates that the proposed algorithm has strong speckle reduction and enhancement image ability. The purpose of removing speckle noise and enhancing image characteristics at same time has been reached.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The inherent characteristic of ultrasonic and the application environment make ultrasonic image have some defects: image with speckle noise, edge blur and low contrast. In order to reduce the speckle noise and enhance the image characteristics, a speckle reduction and characteristic enhancement anisotropic diffusion algorithm is proposed based on the wavelet technology in this paper. The algorithm and parameters choice and algorithm realization steps had been analyzed and illuminated particularly. A compare research experiment for real ultrasonic image has been done using the algorithm and other traditional methods. The experimental result indicates that the proposed algorithm has strong speckle reduction and enhancement image ability. The purpose of removing speckle noise and enhancing image characteristics at same time has been reached.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The inherent characteristic of ultrasonic and the application environment make ultrasonic image have some defects: image with speckle noise, edge blur and low contrast. In order to reduce the speckle noise and enhance the image characteristics, a speckle reduction and characteristic enhancement anisotropic diffusion algorithm is proposed based on the wavelet technology in this paper. The algorithm and parameters choice and algorithm realization steps had been analyzed and illuminated particularly. A compare research experiment for real ultrasonic image has been done using the algorithm and other traditional methods. The experimental result indicates that the proposed algorithm has strong speckle reduction and enhancement image ability. The purpose of removing speckle noise and enhancing image characteristics at same time has been reached.",
"fno": "4252a135",
"keywords": [
"Ultrasonic Image",
"Anisotropic Diffusion",
"Wavelet",
"Speckle Noise",
"Characteristic Enhancement"
],
"authors": [
{
"affiliation": null,
"fullName": "Yue-qin Li",
"givenName": "Yue-qin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jin-ping Li",
"givenName": "Jin-ping",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lei Han",
"givenName": "Lei",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "135-138",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4252-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4252a131",
"articleId": "12OmNwqft0F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4252a139",
"articleId": "12OmNqNosa4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/alpit/2008/3273/0/3273a198",
"title": "Speckle Reducing Anisotropic Diffusion Based on Directions of Gradient",
"doi": null,
"abstractUrl": "/proceedings-article/alpit/2008/3273a198/12OmNA0MZ73",
"parentPublication": {
"id": "proceedings/alpit/2008/3273/0",
"title": "Advanced Language Processing and Web Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcecs/2008/3555/0/3555a258",
"title": "Multi-scale Based Adaptive SRAD for Ultrasound Images Enhancement",
"doi": null,
"abstractUrl": "/proceedings-article/wcecs/2008/3555a258/12OmNApLGxu",
"parentPublication": {
"id": "proceedings/wcecs/2008/3555/0",
"title": "World Congress on Engineering and Computer Science, Advances in Electrical and Electronics Engineering - IAENG Special Edition of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/citworkshops/2008/3242/0/3242a396",
"title": "Anisotropic Diffusion Transform Based on Directions of Edges",
"doi": null,
"abstractUrl": "/proceedings-article/citworkshops/2008/3242a396/12OmNAs2tql",
"parentPublication": {
"id": "proceedings/citworkshops/2008/3242/0",
"title": "Computer and Information Technology, IEEE 8th International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00151032",
"title": "Speckle reduction in ultrasonic imaging for medical applications",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00151032/12OmNvT2ped",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1995/7310/1/73100358",
"title": "Ultrasound speckle reduction by directional median filtering",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73100358/12OmNxGj9RJ",
"parentPublication": {
"id": "proceedings/icip/1995/7310/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f228",
"title": "Comparative Analysis of Spatial filters for Speckle Reduction in Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f228/12OmNy2agYp",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2004/8689/0/01433682",
"title": "A novel multiscale nonlinear diffusion method for ultrasonic speckle reduction",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2004/01433682/12OmNyFU79D",
"parentPublication": {
"id": "proceedings/isspit/2004/8689/0",
"title": "Proceedings of the Fourth IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a703",
"title": "Low-contrast Edge Enhancing Anisotropic Diffusion for Speckle Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a703/12OmNzhELor",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956194",
"title": "Speckle Image Restoration without Clean Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956194/1IHqaphQgZG",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx5YvqR",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"acronym": "bibe",
"groupId": "1000075",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvsm6AZ",
"doi": "10.1109/BIBE.2000.889614",
"title": "Classification and Estimation of Ultrasound Speckle Noise with Neural Networks",
"normalizedTitle": "Classification and Estimation of Ultrasound Speckle Noise with Neural Networks",
"abstract": "Presents a neural-based approach to classifying and estimating the statistical parameters of speckle noise found in biomedical ultrasound images. Speckle noise, a very complex phenomenon, has been modeled in a variety of different ways: and there is currently no clear consensus as to its precise statistical characteristics. In this study, different neural network architectures are used to classify ultrasound images contaminated with three types of noise, based upon three one-parameter statistical distributions. At the same time: the parameter is estimated. It is expected that accurate characterization of ultrasound speckle noise will benefit existing post-processing methods, and may lead to new refinements in these techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents a neural-based approach to classifying and estimating the statistical parameters of speckle noise found in biomedical ultrasound images. Speckle noise, a very complex phenomenon, has been modeled in a variety of different ways: and there is currently no clear consensus as to its precise statistical characteristics. In this study, different neural network architectures are used to classify ultrasound images contaminated with three types of noise, based upon three one-parameter statistical distributions. At the same time: the parameter is estimated. It is expected that accurate characterization of ultrasound speckle noise will benefit existing post-processing methods, and may lead to new refinements in these techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents a neural-based approach to classifying and estimating the statistical parameters of speckle noise found in biomedical ultrasound images. Speckle noise, a very complex phenomenon, has been modeled in a variety of different ways: and there is currently no clear consensus as to its precise statistical characteristics. In this study, different neural network architectures are used to classify ultrasound images contaminated with three types of noise, based upon three one-parameter statistical distributions. At the same time: the parameter is estimated. It is expected that accurate characterization of ultrasound speckle noise will benefit existing post-processing methods, and may lead to new refinements in these techniques.",
"fno": "08620245",
"keywords": [
"Speckle Noise Parameter Estimation Neural Nets Medical Image Processing Biomedical Ultrasonics Image Classification Neural Net Architecture Statistical Analysis Ultrasound Speckle Noise Estimation Ultrasound Speckle Noise Classification Statistical Parameters Estimation Biomedical Ultrasound Images Precise Statistical Characteristics One Parameter Statistical Distributions Post Processing Methods Medical Diagnostic Imaging"
],
"authors": [
{
"affiliation": "Comput. Sci. & Eng. Program, Louisville Univ., KY, USA",
"fullName": "M.P. Wachowiak",
"givenName": "M.P.",
"surname": "Wachowiak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Sci. & Eng. Program, Louisville Univ., KY, USA",
"fullName": "A.S. Elmaghraby",
"givenName": "A.S.",
"surname": "Elmaghraby",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Sci. & Eng. Program, Louisville Univ., KY, USA",
"fullName": "R. Smolikova",
"givenName": "R.",
"surname": "Smolikova",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Sci. & Eng. Program, Louisville Univ., KY, USA",
"fullName": "J.M. Zurada",
"givenName": "J.M.",
"surname": "Zurada",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibe",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-11-01T00:00:00",
"pubType": "proceedings",
"pages": "245",
"year": "2000",
"issn": null,
"isbn": "0-7695-0862-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08620236",
"articleId": "12OmNzd7bk7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08620253",
"articleId": "12OmNvk7K24",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwCJOWD",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"acronym": "icassp",
"groupId": "1000002",
"volume": "0",
"displayVolume": "0",
"year": "1991",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwtn3pn",
"doi": "10.1109/ICASSP.1991.150105",
"title": "Restoration of speckle-degraded images using bispectra",
"normalizedTitle": "Restoration of speckle-degraded images using bispectra",
"abstract": "Coherent speckle noise is modeled as a multiplicative noise process. Using a logarithmic transformation, this speckle noise is converted to a signal-independent additive process which is close to Gaussian when an integrating aperture is used. Bispectral reconstruction of speckle-degraded images is performed on such logarithmically transformed images when independent multiple snapshots are available.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "Coherent speckle noise is modeled as a multiplicative noise process. Using a logarithmic transformation, this speckle noise is converted to a signal-independent additive process which is close to Gaussian when an integrating aperture is used. Bispectral reconstruction of speckle-degraded images is performed on such logarithmically transformed images when independent multiple snapshots are available.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Coherent speckle noise is modeled as a multiplicative noise process. Using a logarithmic transformation, this speckle noise is converted to a signal-independent additive process which is close to Gaussian when an integrating aperture is used. Bispectral reconstruction of speckle-degraded images is performed on such logarithmically transformed images when independent multiple snapshots are available.",
"fno": "00150105",
"keywords": [
"Noise",
"Picture Processing",
"Spectral Analysis",
"Speckle Degraded Image Restoration",
"Coherent Speckle Noise",
"Bispectral Reconstruction",
"Multiplicative Noise Process",
"Logarithmic Transformation",
"Signal Independent Additive Process",
"Integrating Aperture",
"Logarithmically Transformed Images",
"Independent Multiple Snapshots",
"Image Restoration",
"Speckle",
"Apertures",
"Additive Noise",
"Image Converters",
"Probability Density Function",
"Signal Restoration",
"Gaussian Noise",
"Signal Processing",
"Image Reconstruction"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng., Rochester Inst. of Technol., NY, USA",
"fullName": "M.R. Raghuveer",
"givenName": "M.R.",
"surname": "Raghuveer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Rochester Inst. of Technol., NY, USA",
"fullName": "S. Wear",
"givenName": "S.",
"surname": "Wear",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Rochester Inst. of Technol., NY, USA",
"fullName": "J. Song",
"givenName": "J.",
"surname": "Song",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1991-01-01T00:00:00",
"pubType": "proceedings",
"pages": "3077,3078,3079",
"year": "1991",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00150104",
"articleId": "12OmNvy25cp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00030001",
"articleId": "12OmNx9nGL6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572723",
"title": "Speckle noise modeling in the dual-tree complex wavelet domain",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572723/12OmNAObbET",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572554",
"title": "Speckle noise reduction of ultrasound images using Extra-Energy Reduction function",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572554/12OmNASILT9",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/1/00982773",
"title": "A speckle reduction algorithm for SAR images",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00982773/12OmNAlvI4j",
"parentPublication": {
"id": "proceedings/icii/2001/7010/1",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/5642/2/05421106",
"title": "SAR Speckle Reduction Based on Nonlocal Means Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/05421106/12OmNBhpS5P",
"parentPublication": {
"id": "proceedings/iccms/2010/5642/2",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cine/2015/7548/0/7549a148",
"title": "A Comparative Study on Approaches to Speckle Noise Reduction in Images",
"doi": null,
"abstractUrl": "/proceedings-article/cine/2015/7549a148/12OmNCuDzqv",
"parentPublication": {
"id": "proceedings/cine/2015/7548/0",
"title": "2015 International Conference on Computational Intelligence & Networks (CINE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a122",
"title": "Speckle Noise Suppression Techniques for Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a122/12OmNz5apEs",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/2/01326384",
"title": "ICA method for speckle signals [blind source separation application]",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326384/12OmNzwZ6ks",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/2",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/5555/01/09881880",
"title": "Progressive Feature Fusion Attention Dense Network for Speckle Noise Removal in OCT Images",
"doi": null,
"abstractUrl": "/journal/tb/5555/01/09881880/1Gv8QwuLcGY",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956194",
"title": "Speckle Image Restoration without Clean Data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956194/1IHqaphQgZG",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1oT5Q",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"acronym": "icicse",
"groupId": "1001756",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz5apEs",
"doi": "10.1109/ICICSE.2009.26",
"title": "Speckle Noise Suppression Techniques for Ultrasound Images",
"normalizedTitle": "Speckle Noise Suppression Techniques for Ultrasound Images",
"abstract": "Speckle is a multiplicative noise that degrades the visual evaluation in ultrasound imaging. In medical ultrasound image processing, speckle noise suppression has become a very essential exercise for diagnose. The recent advancements in ultrasound devices necessitate the need of more robust despeckling techniques for enhancing ultrasound medical imaging in routine clinical practice. Many denoising techniques have been proposed for effective suppression of speckle noise. This paper compiles the performance of various techniques in medical B-mode ultrasound images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Speckle is a multiplicative noise that degrades the visual evaluation in ultrasound imaging. In medical ultrasound image processing, speckle noise suppression has become a very essential exercise for diagnose. The recent advancements in ultrasound devices necessitate the need of more robust despeckling techniques for enhancing ultrasound medical imaging in routine clinical practice. Many denoising techniques have been proposed for effective suppression of speckle noise. This paper compiles the performance of various techniques in medical B-mode ultrasound images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Speckle is a multiplicative noise that degrades the visual evaluation in ultrasound imaging. In medical ultrasound image processing, speckle noise suppression has become a very essential exercise for diagnose. The recent advancements in ultrasound devices necessitate the need of more robust despeckling techniques for enhancing ultrasound medical imaging in routine clinical practice. Many denoising techniques have been proposed for effective suppression of speckle noise. This paper compiles the performance of various techniques in medical B-mode ultrasound images.",
"fno": "4027a122",
"keywords": [
"Speckle Noise",
"Ultrasound Images",
"Denoising Techniques"
],
"authors": [
{
"affiliation": null,
"fullName": "Changming Zhu",
"givenName": "Changming",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Ni",
"givenName": "Jun",
"surname": "Ni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yanbo Li",
"givenName": "Yanbo",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guochang Gu",
"givenName": "Guochang",
"surname": "Gu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icicse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-12-01T00:00:00",
"pubType": "proceedings",
"pages": "122-125",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-4027-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4027a118",
"articleId": "12OmNzxyiNS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4027a126",
"articleId": "12OmNASILFd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572723",
"title": "Speckle noise modeling in the dual-tree complex wavelet domain",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572723/12OmNAObbET",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572554",
"title": "Speckle noise reduction of ultrasound images using Extra-Energy Reduction function",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572554/12OmNASILT9",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b510",
"title": "Speckle Noise Reduction of Ultrasound Images Using M-band Wavelet Transform and Wiener Filter in a Homomorphic Framework",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b510/12OmNqHqSnj",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbmi/2011/4623/0/4623a120",
"title": "Quantitative Study on Despeckle Methods of Medical Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icbmi/2011/4623a120/12OmNrIJqrc",
"parentPublication": {
"id": "proceedings/icbmi/2011/4623/0",
"title": "Intelligent Computation and Bio-Medical Instrumentation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2000/0862/0/08620245",
"title": "Classification and Estimation of Ultrasound Speckle Noise with Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2000/08620245/12OmNvsm6AZ",
"parentPublication": {
"id": "proceedings/bibe/2000/0862/0",
"title": "13th IEEE International Conference on BioInformatics and BioEngineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f228",
"title": "Comparative Analysis of Spatial filters for Speckle Reduction in Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f228/12OmNy2agYp",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572601",
"title": "Speckle noise reduction and segmentation of kidney regions from ultrasound image",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572601/12OmNyjccAX",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a113",
"title": "General Tendencies in Segmentation of Medical Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a113/12OmNyv7mee",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a281",
"title": "Measuring and Mitigating Speckle Noise in Dual-Axis Confocal Microscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a281/19RSqjMC14k",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVba",
"title": "2009 WRI Global Congress on Intelligent Systems",
"acronym": "gcis",
"groupId": "1002842",
"volume": "4",
"displayVolume": "4",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzICETH",
"doi": "10.1109/GCIS.2009.219",
"title": "Speckle Noise Filtering for Sea SAR Image",
"normalizedTitle": "Speckle Noise Filtering for Sea SAR Image",
"abstract": "The aliasing of the speckle noise and echo information of sea SAR blurs the structure character of sea SAR image. It is difficult that to enhance structure character of sea SAR image. Aiming at this difficulty, we propose a amalgamation frame based on EMD and exponent. The frame can effectively filter the speckle noise and enhance the structure character which finally can be seen by people’s eyes. Using the different frequency character of each level, the content of each level noise and non-noise are analyzed and exponent of each level is adjusted. And the speckle noise of simple SAR image in different scale is filtered. At last, we obtain the new denoised SAR image. The experiment result indicates that speckle noise of our result is substantially decreased and the structure character and texture of the result is clearer. And ours eyes can distinguish the character of sea SAR image. So that it proves that our filtering algorithm is effective to filter the speckle noise of sea SAR image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The aliasing of the speckle noise and echo information of sea SAR blurs the structure character of sea SAR image. It is difficult that to enhance structure character of sea SAR image. Aiming at this difficulty, we propose a amalgamation frame based on EMD and exponent. The frame can effectively filter the speckle noise and enhance the structure character which finally can be seen by people’s eyes. Using the different frequency character of each level, the content of each level noise and non-noise are analyzed and exponent of each level is adjusted. And the speckle noise of simple SAR image in different scale is filtered. At last, we obtain the new denoised SAR image. The experiment result indicates that speckle noise of our result is substantially decreased and the structure character and texture of the result is clearer. And ours eyes can distinguish the character of sea SAR image. So that it proves that our filtering algorithm is effective to filter the speckle noise of sea SAR image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The aliasing of the speckle noise and echo information of sea SAR blurs the structure character of sea SAR image. It is difficult that to enhance structure character of sea SAR image. Aiming at this difficulty, we propose a amalgamation frame based on EMD and exponent. The frame can effectively filter the speckle noise and enhance the structure character which finally can be seen by people’s eyes. Using the different frequency character of each level, the content of each level noise and non-noise are analyzed and exponent of each level is adjusted. And the speckle noise of simple SAR image in different scale is filtered. At last, we obtain the new denoised SAR image. The experiment result indicates that speckle noise of our result is substantially decreased and the structure character and texture of the result is clearer. And ours eyes can distinguish the character of sea SAR image. So that it proves that our filtering algorithm is effective to filter the speckle noise of sea SAR image.",
"fno": "3571d523",
"keywords": [
"Synthetic Aperture Radar",
"Image Denoising",
"Speckle Noise",
"Holder Exponent"
],
"authors": [
{
"affiliation": null,
"fullName": "Yongxin Jiang",
"givenName": "Yongxin",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaotong Wang",
"givenName": "Xiaotong",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaogang Xu",
"givenName": "Xiaogang",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiyong Ye",
"givenName": "Xiyong",
"surname": "Ye",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gcis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "523-527",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3571-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3571d518",
"articleId": "12OmNwF0BPd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3571d528",
"articleId": "12OmNCmpcFY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ncm/2009/3769/0/3769b335",
"title": "Speckle Filtering by Generalized Gamma Distribution",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b335/12OmNAT0mMH",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a700",
"title": "Speckle Reduction with Multiresolution Bilateral Filtering for SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a700/12OmNBO3JYb",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/5642/2/05421106",
"title": "SAR Speckle Reduction Based on Nonlocal Means Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/05421106/12OmNBhpS5P",
"parentPublication": {
"id": "proceedings/iccms/2010/5642/2",
"title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cccrv/2004/2127/0/01301480",
"title": "Sea ice boundary detection in SAR satellite images using conflicting strength",
"doi": null,
"abstractUrl": "/proceedings-article/cccrv/2004/01301480/12OmNvlxJw5",
"parentPublication": {
"id": "proceedings/cccrv/2004/2127/0",
"title": "First Canadian Conference on Computer and Robot Vision, 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2009/3651/0/3651a101",
"title": "JEDI: Adaptive Stochastic Estimation for Joint Enhancement and Despeckling of Images for SAR",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2009/3651a101/12OmNwcCIRr",
"parentPublication": {
"id": "proceedings/crv/2009/3651/0",
"title": "2009 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/psivt/2010/4285/0/4285a288",
"title": "SAR Image Speckle Noise Suppression Based on DFB Hidden Markov Models Using Immune Clonal Selection Thresholding",
"doi": null,
"abstractUrl": "/proceedings-article/psivt/2010/4285a288/12OmNx5Yvjl",
"parentPublication": {
"id": "proceedings/psivt/2010/4285/0",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicse/2009/4027/0/4027a122",
"title": "Speckle Noise Suppression Techniques for Ultrasound Images",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2009/4027a122/12OmNz5apEs",
"parentPublication": {
"id": "proceedings/icicse/2009/4027/0",
"title": "2009 Fourth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1988/9999/2/00754657",
"title": "Optimal Speckle Reduction In Polarimetric Sar Imagery*",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1988/00754657/12OmNzYwcah",
"parentPublication": {
"id": "proceedings/acssc/1988/9999/2",
"title": "Twenty-Second Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726762",
"title": "Combined curvelet transform and multispinning algorithm for despeckling of SAR images",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726762/12OmNzwpU4y",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2022/6382/0/09959170",
"title": "SAR Image Denoising in High Dynamic Range with Speckle and Thermal Noise Refinement Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2022/09959170/1Iz5f7tXFgA",
"parentPublication": {
"id": "proceedings/avss/2022/6382/0",
"title": "2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1IHotVZum6Q",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "9956007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHqaphQgZG",
"doi": "10.1109/ICPR56361.2022.9956194",
"title": "Speckle Image Restoration without Clean Data",
"normalizedTitle": "Speckle Image Restoration without Clean Data",
"abstract": "Speckle noise is an inherent disturbance in coherent imaging systems such as digital holography, synthetic aperture radar, optical coherence tomography, or ultrasound systems. These systems usually produce only single observation per view angle of the same interest object, imposing the difficulty to leverage the statistic among observations. We propose a novel image restoration algorithm that can perform speckle noise removal without clean data and does not require multiple noisy observations in the same view angle. Our proposed method can also be applied to the situation without knowing the noise distribution as prior. We demonstrate our method is especially well-suited for spectral images by first validating on the synthetic dataset, and also applied on real-world digital holography samples. The results are superior in both quantitative measurement and visual inspection compared to several widely applied baselines. Our method even shows promising results across different speckle noise strengths, without the clean data needed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Speckle noise is an inherent disturbance in coherent imaging systems such as digital holography, synthetic aperture radar, optical coherence tomography, or ultrasound systems. These systems usually produce only single observation per view angle of the same interest object, imposing the difficulty to leverage the statistic among observations. We propose a novel image restoration algorithm that can perform speckle noise removal without clean data and does not require multiple noisy observations in the same view angle. Our proposed method can also be applied to the situation without knowing the noise distribution as prior. We demonstrate our method is especially well-suited for spectral images by first validating on the synthetic dataset, and also applied on real-world digital holography samples. The results are superior in both quantitative measurement and visual inspection compared to several widely applied baselines. Our method even shows promising results across different speckle noise strengths, without the clean data needed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Speckle noise is an inherent disturbance in coherent imaging systems such as digital holography, synthetic aperture radar, optical coherence tomography, or ultrasound systems. These systems usually produce only single observation per view angle of the same interest object, imposing the difficulty to leverage the statistic among observations. We propose a novel image restoration algorithm that can perform speckle noise removal without clean data and does not require multiple noisy observations in the same view angle. Our proposed method can also be applied to the situation without knowing the noise distribution as prior. We demonstrate our method is especially well-suited for spectral images by first validating on the synthetic dataset, and also applied on real-world digital holography samples. The results are superior in both quantitative measurement and visual inspection compared to several widely applied baselines. Our method even shows promising results across different speckle noise strengths, without the clean data needed.",
"fno": "09956194",
"keywords": [
"Biomedical Optical Imaging",
"Holography",
"Image Denoising",
"Image Restoration",
"Optical Tomography",
"Radar Imaging",
"Speckle",
"Synthetic Aperture Radar",
"Clean Data",
"Coherent Imaging Systems",
"Different Speckle Noise Strengths",
"Image Restoration Algorithm",
"Inherent Disturbance",
"Interest Object",
"Multiple Noisy Observations",
"Noise Distribution",
"Optical Coherence Tomography",
"Real World Digital Holography Samples",
"Single Observation",
"Speckle Image Restoration",
"Speckle Noise Removal",
"Spectral Images",
"Synthetic Aperture Radar",
"Synthetic Dataset",
"Ultrasound Systems",
"View Angle",
"Widely Applied Baselines",
"Visualization",
"Ultrasonic Imaging",
"Optical Coherence Tomography",
"Speckle",
"Holography",
"Radar Imaging",
"Image Restoration"
],
"authors": [
{
"affiliation": "NVIDIA AI Technology Center",
"fullName": "Tsung-Ming Tai",
"givenName": "Tsung-Ming",
"surname": "Tai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University,Department of Computer Science and Information Engineering",
"fullName": "Yun-Jie Jhang",
"givenName": "Yun-Jie",
"surname": "Jhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University,Department of Computer Science and Information Engineering",
"fullName": "Wen-Jyi Hwang",
"givenName": "Wen-Jyi",
"surname": "Hwang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan Normal University,Institute of Electro-Optical Science and Technology",
"fullName": "Chau-Jern Cheng",
"givenName": "Chau-Jern",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "61-67",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9062-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09956294",
"articleId": "1IHq7umgrE4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09956521",
"articleId": "1IHqxVrfspG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572554",
"title": "Speckle noise reduction of ultrasound images using Extra-Energy Reduction function",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572554/12OmNASILT9",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a135",
"title": "A Speckle Reduction and Characteristic Enhancement Algorithm to Ultrasonic Image Based on Wavelet Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a135/12OmNqJq4BI",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150105",
"title": "Restoration of speckle-degraded images using bispectra",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150105/12OmNwtn3pn",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a493",
"title": "A Non-local Low-Rank Framework for Ultrasound Speckle Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a493/12OmNyL0TiU",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042240",
"title": "Prostate Tissue Characterization via Ultrasound Speckle Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042240/12OmNyUWQRF",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572601",
"title": "Speckle noise reduction and segmentation of kidney regions from ultrasound image",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572601/12OmNyjccAX",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gcis/2009/3571/4/3571d523",
"title": "Speckle Noise Filtering for Sea SAR Image",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571d523/12OmNzICETH",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/4",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413017",
"title": "DSPNet: Deep Learning-Enabled Blind Reduction of Speckle Noise",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413017/1tmi7NCvm2A",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412518",
"title": "Ultrasound Image Restoration Using Weighted Nuclear Norm Minimization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412518/1tmiqjLFHY4",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523842",
"title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523842/1wpqr1B6wA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwkR5tU",
"doi": "10.1109/VR.2017.7892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"normalizedTitle": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"abstract": "We propose a facial expression mapping technology between virtual avatars and Head-Mounted Display (HMD) users. HMD allow people to enjoy an immersive Virtual Reality (VR) experience. A virtual avatar can be a representative of the user in the virtual environment. However, the synchronization of the the virtual avatar's expressions with those of the HMD user is limited. The major problem of wearing an HMD is that a large portion of the user's face is occluded, making facial recognition difficult in an HMD-based virtual environment. To overcome this problem, we propose a facial expression mapping technology using retro-reflective photoelectric sensors. The sensors attached inside the HMD measures the distance between the sensors and the user's face. The distance values of five basic facial expressions (Neutral, Happy, Angry, Surprised, and Sad) are used for training the neural network to estimate the facial expression of a user. We achieved an overall accuracy of 88% in recognizing the facial expressions. Our system can also reproduce facial expression change in real-time through an existing avatar using regression. Consequently, our system enables estimation and reconstruction of facial expressions that correspond to the user's emotional changes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a facial expression mapping technology between virtual avatars and Head-Mounted Display (HMD) users. HMD allow people to enjoy an immersive Virtual Reality (VR) experience. A virtual avatar can be a representative of the user in the virtual environment. However, the synchronization of the the virtual avatar's expressions with those of the HMD user is limited. The major problem of wearing an HMD is that a large portion of the user's face is occluded, making facial recognition difficult in an HMD-based virtual environment. To overcome this problem, we propose a facial expression mapping technology using retro-reflective photoelectric sensors. The sensors attached inside the HMD measures the distance between the sensors and the user's face. The distance values of five basic facial expressions (Neutral, Happy, Angry, Surprised, and Sad) are used for training the neural network to estimate the facial expression of a user. We achieved an overall accuracy of 88% in recognizing the facial expressions. Our system can also reproduce facial expression change in real-time through an existing avatar using regression. Consequently, our system enables estimation and reconstruction of facial expressions that correspond to the user's emotional changes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a facial expression mapping technology between virtual avatars and Head-Mounted Display (HMD) users. HMD allow people to enjoy an immersive Virtual Reality (VR) experience. A virtual avatar can be a representative of the user in the virtual environment. However, the synchronization of the the virtual avatar's expressions with those of the HMD user is limited. The major problem of wearing an HMD is that a large portion of the user's face is occluded, making facial recognition difficult in an HMD-based virtual environment. To overcome this problem, we propose a facial expression mapping technology using retro-reflective photoelectric sensors. The sensors attached inside the HMD measures the distance between the sensors and the user's face. The distance values of five basic facial expressions (Neutral, Happy, Angry, Surprised, and Sad) are used for training the neural network to estimate the facial expression of a user. We achieved an overall accuracy of 88% in recognizing the facial expressions. Our system can also reproduce facial expression change in real-time through an existing avatar using regression. Consequently, our system enables estimation and reconstruction of facial expressions that correspond to the user's emotional changes.",
"fno": "07892245",
"keywords": [
"Avatars",
"Resists",
"Face Recognition",
"Face",
"Neural Networks",
"Optical Sensors",
"H 5 M Information Interfaces And Presentation E G HCI Miscellaneous"
],
"authors": [
{
"affiliation": "Keio University",
"fullName": "Katsuhiro Suzuki",
"givenName": "Katsuhiro",
"surname": "Suzuki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Fumihiko Nakamura",
"givenName": "Fumihiko",
"surname": "Nakamura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Jiu Otsuka",
"givenName": "Jiu",
"surname": "Otsuka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Katsutoshi Masai",
"givenName": "Katsutoshi",
"surname": "Masai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Yuta Sugiura",
"givenName": "Yuta",
"surname": "Sugiura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Keio University",
"fullName": "Maki Sugimoto",
"givenName": "Maki",
"surname": "Sugimoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "177-185",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892244",
"articleId": "12OmNwlHSSf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892246",
"articleId": "12OmNvmG7ZP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a286",
"title": "Spotting the Details: The Various Facets of Facial Expressions",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a286/12OmNzzP5H0",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b626",
"title": "Eyemotion: Classifying Facial Expressions in VR Using Eye-Tracking Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b626/18j8FIomLfi",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09786815",
"title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09786815/1DSumaVNxG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797925",
"title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c100",
"title": "Landmark-Guided Deformation Transfer of Template Facial Expressions for Automatic Generation of Avatar Blendshapes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c100/1i5mNnnOzlu",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090630",
"title": "Embodied Realistic Avatar System with Body Motions and Facial Expressions for Communication in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090630/1jIxtbZL30Y",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09157962",
"title": "Facial Expression Retargeting From Human to Avatar Made Easy",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09157962/1m1eKuAoOoE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a101",
"title": "Digital Full-Face Mask Display with Expression Recognition using Embedded Photo Reflective Sensor Arrays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a101/1pystZgPICk",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a082",
"title": "Unmasking Communication Partners: A Low-Cost AI Solution for Digitally Removing Head-Mounted Displays in VR-Based Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a082/1qpzzJaiYqk",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs4S8vE",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzdoMAW",
"doi": "10.1109/3DUI.2008.4476604",
"title": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments",
"normalizedTitle": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments",
"abstract": "The ability to manipulate objects is fundamental to most virtual reality (VR) applications. A multitude of metaphors have been developed to facilitate object selection and manipulation, including the virtual hand metaphor, which remains by far the most popular technique in this category. The utility of the virtual hand depends on the user's ability to see it. Unfortunately, ensuring this is not always easy, especially in immersive systems which employ head mounted displays (HMD) with limited field of view (FOV), typically ranging from 40 to 60 degrees diagonally.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The ability to manipulate objects is fundamental to most virtual reality (VR) applications. A multitude of metaphors have been developed to facilitate object selection and manipulation, including the virtual hand metaphor, which remains by far the most popular technique in this category. The utility of the virtual hand depends on the user's ability to see it. Unfortunately, ensuring this is not always easy, especially in immersive systems which employ head mounted displays (HMD) with limited field of view (FOV), typically ranging from 40 to 60 degrees diagonally.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The ability to manipulate objects is fundamental to most virtual reality (VR) applications. A multitude of metaphors have been developed to facilitate object selection and manipulation, including the virtual hand metaphor, which remains by far the most popular technique in this category. The utility of the virtual hand depends on the user's ability to see it. Unfortunately, ensuring this is not always easy, especially in immersive systems which employ head mounted displays (HMD) with limited field of view (FOV), typically ranging from 40 to 60 degrees diagonally.",
"fno": "04476604",
"keywords": [
"Sliding Viewport",
"Head Mounted Display",
"Interactive Environment",
"Virtual Reality Application",
"Object Selection",
"Object Manipulation",
"Virtual Hand Metaphor",
"Immersive System"
],
"authors": [
{
"affiliation": "Univ. of Hawaii, Honolulu",
"fullName": "A. Sherstyuk",
"givenName": "A.",
"surname": "Sherstyuk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Hawaii, Honolulu",
"fullName": "D. Vincent",
"givenName": "D.",
"surname": "Vincent",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "C. Jay",
"givenName": "C.",
"surname": "Jay",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "135-136",
"year": "2008",
"issn": null,
"isbn": "978-1-4244-2047-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04476602",
"articleId": "12OmNxdVgVU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04476620",
"articleId": "12OmNAolGUm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vrais/1995/7084/0/70840056",
"title": "Quantification of adaptation to virtual-eye location in see-thru head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840056/12OmNyRPgJk",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09850416",
"title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a505",
"title": "A Stroop-based Long-term Cognitive Training Game for the Elderly in Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a505/1J7W7OAeKFa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090681",
"title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090681/1jIxoZtoPlK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a301",
"title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1AIBM1Q",
"doi": "10.1109/VR.2018.8446139",
"title": "The Effect of Immersive Displays on Situation Awareness in Virtual Environments for Aerial Firefighting Air Attack Supervisor Training",
"normalizedTitle": "The Effect of Immersive Displays on Situation Awareness in Virtual Environments for Aerial Firefighting Air Attack Supervisor Training",
"abstract": "Situation Awareness (SA) is an essential skill in Air Attack Supervision (AAS) for aerial based wildfire firefighting. The display types used for Virtual Reality Training Systems (VRTS) afford different visual SA depending on the Field of View (FoV) as well as the sense of presence users can obtain in the virtual environment. We conducted a study with 36 participants to evaluate SA acquisition in three display types: a high-definition TV (HDTV), an Oculus Rift Head-Mounted Display (HMD) and a 270° cylindrical simulation projection display called the SimPit. We found a significant difference between the HMD and the HDTV, as well as with the SimPit and the HDTV for the three levels of SA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Situation Awareness (SA) is an essential skill in Air Attack Supervision (AAS) for aerial based wildfire firefighting. The display types used for Virtual Reality Training Systems (VRTS) afford different visual SA depending on the Field of View (FoV) as well as the sense of presence users can obtain in the virtual environment. We conducted a study with 36 participants to evaluate SA acquisition in three display types: a high-definition TV (HDTV), an Oculus Rift Head-Mounted Display (HMD) and a 270° cylindrical simulation projection display called the SimPit. We found a significant difference between the HMD and the HDTV, as well as with the SimPit and the HDTV for the three levels of SA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Situation Awareness (SA) is an essential skill in Air Attack Supervision (AAS) for aerial based wildfire firefighting. The display types used for Virtual Reality Training Systems (VRTS) afford different visual SA depending on the Field of View (FoV) as well as the sense of presence users can obtain in the virtual environment. We conducted a study with 36 participants to evaluate SA acquisition in three display types: a high-definition TV (HDTV), an Oculus Rift Head-Mounted Display (HMD) and a 270° cylindrical simulation projection display called the SimPit. We found a significant difference between the HMD and the HDTV, as well as with the SimPit and the HDTV for the three levels of SA.",
"fno": "08446139",
"keywords": [
"Digital Simulation",
"Helmet Mounted Displays",
"Virtual Reality",
"Wildfires",
"HDTV",
"Immersive Displays",
"Situation Awareness",
"Aerial Based Wildfire Firefighting",
"Display Types",
"Virtual Environment",
"High Definition TV",
"270 X 00 B 0 Cylindrical Simulation Projection Display",
"Virtual Environments",
"Situation Awareness",
"Aerial Firefighting Air Attack Supervisor Training",
"Air Attack Supervision",
"Virtual Reality Training Systems",
"Oculus Rift Head Mounted Display",
"Resists",
"HDTV",
"Training",
"Task Analysis",
"Virtual Environments",
"Visualization",
"H 5 2 User Interfaces User Interfaces Graphical User Interfaces GUI",
"H 5 M Information Interfaces And Presentation"
],
"authors": [
{
"affiliation": "University of Canterbury, Human Interface Technology Laboratory New Zealand, New Zealand",
"fullName": "Rory M.S. Clifford",
"givenName": "Rory M.S.",
"surname": "Clifford",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury, Human Interface Technology Laboratory New Zealand, New Zealand",
"fullName": "Humayun Khan",
"givenName": "Humayun",
"surname": "Khan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury, Human Interface Technology Laboratory New Zealand, New Zealand",
"fullName": "Simon Hoermann",
"givenName": "Simon",
"surname": "Hoermann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury, Human Interface Technology Laboratory New Zealand, New Zealand",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Canterbury, Human Interface Technology Laboratory New Zealand, New Zealand",
"fullName": "Robert W. Lindeman",
"givenName": "Robert W.",
"surname": "Lindeman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-2",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446535",
"articleId": "13bd1eSlyu3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446545",
"articleId": "13bd1fdV4l3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wevr/2017/3881/0/07957709",
"title": "Immersive eating: evaluating the use of head-mounted displays for mixed reality meal sessions",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957709/12OmNwK7o9G",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892302",
"title": "Estimating the motion-to-photon latency in head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892302/12OmNznkKb4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446189",
"title": "Towards Revisiting Passability Judgments in Real and Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446189/13bd1fdV4lC",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/var4good/2018/5977/0/08576892",
"title": "Development of a Multi-Sensory Virtual Reality Training Simulator for Airborne Firefighters Supervising Aerial Wildfire Suppression",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576892/17D45Xtvp8L",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699319",
"title": "Effect of Navigation Speed and VR Devices on Cybersickness",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699319/19F1OrW6KxW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798029",
"title": "Studying the Mental Effort in Virtual Versus Real Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798029/1cJ0I9M7tVm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798174",
"title": "Comparison in Depth Perception between Virtual Reality and Augmented Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798174/1cJ11OY78k0",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08928535",
"title": "VR Disability Simulation Reduces Implicit Bias Towards Persons With Disabilities",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08928535/1fEi0BsQqBy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090440",
"title": "The Effects of Multi-sensory Aerial Firefighting Training in Virtual Reality on Situational Awareness, Workload, and Presence",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090440/1jIxmMZKrSw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2020/6497/0/649700a157",
"title": "Human Factors Assessment in VR-based Firefighting Training in Maritime: A Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2020/649700a157/1olHyrrUZuU",
"parentPublication": {
"id": "proceedings/cw/2020/6497/0",
"title": "2020 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0TRvTuOk",
"doi": "10.1109/VR.2019.8798320",
"title": "Evaluating Dynamic Characteristics of Head Mounted Display in Parallel Movement with Simultaneous Subjective Observation Method",
"normalizedTitle": "Evaluating Dynamic Characteristics of Head Mounted Display in Parallel Movement with Simultaneous Subjective Observation Method",
"abstract": "In the Head Mounted Display(HMD) system, there is an item called dynamic characteristics such as latency takes for the motion to be reflected on display and motion wave distortion caused by motion estimation. It is said that the difference between the user's sense of motion and visual information due to this deterioration of dynamic characteristics is the factor of virtual reality sickness. Therefore, the dynamic characteristics are an important item of HMD. The final purpose of this research is to establish a method for measuring the dynamic characteristics of HMD. In this paper, we examined whether our proposed simultaneous subjective observation method is valid for the parallel movement and measured dynamic characteristics under various conditions. In conclusion, the method had enough precision to measure the dynamic characteristics of an HMD in the parallel movement and could be applied to various usage situation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the Head Mounted Display(HMD) system, there is an item called dynamic characteristics such as latency takes for the motion to be reflected on display and motion wave distortion caused by motion estimation. It is said that the difference between the user's sense of motion and visual information due to this deterioration of dynamic characteristics is the factor of virtual reality sickness. Therefore, the dynamic characteristics are an important item of HMD. The final purpose of this research is to establish a method for measuring the dynamic characteristics of HMD. In this paper, we examined whether our proposed simultaneous subjective observation method is valid for the parallel movement and measured dynamic characteristics under various conditions. In conclusion, the method had enough precision to measure the dynamic characteristics of an HMD in the parallel movement and could be applied to various usage situation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the Head Mounted Display(HMD) system, there is an item called dynamic characteristics such as latency takes for the motion to be reflected on display and motion wave distortion caused by motion estimation. It is said that the difference between the user's sense of motion and visual information due to this deterioration of dynamic characteristics is the factor of virtual reality sickness. Therefore, the dynamic characteristics are an important item of HMD. The final purpose of this research is to establish a method for measuring the dynamic characteristics of HMD. In this paper, we examined whether our proposed simultaneous subjective observation method is valid for the parallel movement and measured dynamic characteristics under various conditions. In conclusion, the method had enough precision to measure the dynamic characteristics of an HMD in the parallel movement and could be applied to various usage situation.",
"fno": "08798320",
"keywords": [
"Helmet Mounted Displays",
"Motion Estimation",
"Three Dimensional Displays",
"Virtual Reality",
"Parallel Movement",
"Simultaneous Subjective Observation Method",
"Head Mounted Display",
"HMD System",
"Resists",
"Cameras",
"Tracking",
"Virtual Reality",
"Actuators",
"Dynamics",
"Visualization",
"Dynamic Characteristics",
"Latency",
"Simultaneous Subjective Observation Method"
],
"authors": [
{
"affiliation": "Graduate School of Nature Science and Technology, Gifu University",
"fullName": "Eisaku Miyamoto",
"givenName": "Eisaku",
"surname": "Miyamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Engineering, Gifu University",
"fullName": "Ryugo Kijima",
"givenName": "Ryugo",
"surname": "Kijima",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1084-1085",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798009",
"articleId": "1cJ1a4b7cfS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797726",
"articleId": "1cJ0VwwRIA0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a004",
"title": "Development of Racing Game Using Motion Seat",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a004/12OmNx965zU",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892255",
"title": "Robust optical see-through head-mounted display calibration: Taking anisotropic nature of user interaction errors into account",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892255/12OmNxvO04e",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892302",
"title": "Estimating the motion-to-photon latency in head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892302/12OmNznkKb4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446524",
"title": "HangerOVER: Development of HMO-Embedded Haptic Display Using the Hanger Reflex and VR Application",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446524/13bd1fdV4l2",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a660",
"title": "Towards Conducting Effective Locomotion Through Hardware Transformation in Head-Mounted-Device - A Review Study",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a660/1CJcC7q0PRu",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797925",
"title": "Mask-off: Synthesizing Face Images in the Presence of Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797925/1cJ0J09XMdy",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a058",
"title": "New System to Measure Motion Motion-to-Photon Latency of Virtual Reality Head Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a058/1gyskZKBOtq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089661",
"title": "Simultaneous Run-Time Measurement of Motion-to-Photon Latency and Latency Jitter",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089661/1jIxfrGAC8o",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a439",
"title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger’s Comfort Improvement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a439/1yeQPu8aFlm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0UskDCRa",
"doi": "10.1109/VR.2019.8797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"normalizedTitle": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"abstract": "Volumetric capture allows the creation of near-video-quality content that can be explored with six degrees of freedom. Due to limitations in these experiences, such as the content being fixed at the point of filming, an understanding of eye-gaze awareness is critical. A repeated measures experiment was conducted that explored users' ability to evaluate where a volumetrically captured avatar (VCA) was looking. Wearing one of two head-mounted displays (HMDs), 36 participants rotated a VCA to look at a target. The HMD resolution, target position, and VCA's eye-gaze direction were varied. Results did not show a difference in accuracy between HMD resolutions, while the task became significantly harder for target locations further away from the user. In contrast to real-world studies, participants consistently misjudged eye-gaze direction based on target location, but not based on the avatar's head turn direction. Implications are discussed, as results for VCAs viewed in HMDs appear to differ from face-to-face scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volumetric capture allows the creation of near-video-quality content that can be explored with six degrees of freedom. Due to limitations in these experiences, such as the content being fixed at the point of filming, an understanding of eye-gaze awareness is critical. A repeated measures experiment was conducted that explored users' ability to evaluate where a volumetrically captured avatar (VCA) was looking. Wearing one of two head-mounted displays (HMDs), 36 participants rotated a VCA to look at a target. The HMD resolution, target position, and VCA's eye-gaze direction were varied. Results did not show a difference in accuracy between HMD resolutions, while the task became significantly harder for target locations further away from the user. In contrast to real-world studies, participants consistently misjudged eye-gaze direction based on target location, but not based on the avatar's head turn direction. Implications are discussed, as results for VCAs viewed in HMDs appear to differ from face-to-face scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volumetric capture allows the creation of near-video-quality content that can be explored with six degrees of freedom. Due to limitations in these experiences, such as the content being fixed at the point of filming, an understanding of eye-gaze awareness is critical. A repeated measures experiment was conducted that explored users' ability to evaluate where a volumetrically captured avatar (VCA) was looking. Wearing one of two head-mounted displays (HMDs), 36 participants rotated a VCA to look at a target. The HMD resolution, target position, and VCA's eye-gaze direction were varied. Results did not show a difference in accuracy between HMD resolutions, while the task became significantly harder for target locations further away from the user. In contrast to real-world studies, participants consistently misjudged eye-gaze direction based on target location, but not based on the avatar's head turn direction. Implications are discussed, as results for VCAs viewed in HMDs appear to differ from face-to-face scenarios.",
"fno": "08797852",
"keywords": [
"Avatars",
"Helmet Mounted Displays",
"Image Capture",
"Image Resolution",
"Video Signal Processing",
"HMD Resolution",
"Target Location",
"Head Mounted Displays",
"Near Video Quality Content",
"Eye Gaze Awareness",
"Volumetrically Captured Avatar",
"Six Degrees Of Freedom",
"Volumetric Characters Eye Gaze Direction",
"Face To Face Scenarios",
"VCA",
"Avatars",
"Receivers",
"Three Dimensional Displays",
"Task Analysis",
"Cameras",
"Solid Modeling",
"Resists",
"User Study",
"Virtual Reality",
"Gaze Perception",
"1 1 1 Human Centered Computing X 2014 Empirical Studies In HCI",
"1 1 3 2 Human Centered Computing Interaction Devices X 2014 Displays And Imagers",
"1 1 4 Human Centered Computing X 2014 HCI Theory Concepts And Models"
],
"authors": [
{
"affiliation": "University College London, United Kingdom",
"fullName": "Andrew MacQuarrie",
"givenName": "Andrew",
"surname": "MacQuarrie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University College London, United Kingdom",
"fullName": "Anthony Steed",
"givenName": "Anthony",
"surname": "Steed",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "645-654",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797992",
"articleId": "1cJ0SIvkZnG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798239",
"articleId": "1cJ0KYcUvHq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a026",
"title": "Estimating Gaze Depth Using Multi-Layer Perceptron",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a026/12OmNAkWvFD",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223443",
"title": "Non-obscuring binocular eye tracking for wide field-of-view head-mounted-displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223443/12OmNqzu6MP",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926684",
"title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbesc/2016/2653/0/2653a024",
"title": "Towards a Low-Cost Augmented Reality Head-Mounted Display with Real-Time Eye Center Location Capability",
"doi": null,
"abstractUrl": "/proceedings-article/sbesc/2016/2653a024/12OmNzxgHyv",
"parentPublication": {
"id": "proceedings/sbesc/2016/2653/0",
"title": "2016 VI Brazilian Symposium on Computing Systems Engineering (SBESC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446494",
"title": "Real-Time 3D Face Reconstruction and Gaze Tracking for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446494/13bd1eSlytf",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07012105",
"title": "Corneal-Imaging Calibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07012105/13rRUxE04tC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09786815",
"title": "Analyzing the Effect of Diverse Gaze and Head Direction on Facial Expression Recognition with Photo-Reflective Sensors Embedded in a Head-Mounted Display",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09786815/1DSumaVNxG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1fHkkWQ0aEE",
"title": "2019 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHkmnjJYru",
"doi": "10.1109/CW.2019.00009",
"title": "Development of Easy Attachable Biological Information Measurement Device for Various Head Mounted Displays",
"normalizedTitle": "Development of Easy Attachable Biological Information Measurement Device for Various Head Mounted Displays",
"abstract": "It is important to measure the user's biological information when experiencing virtual reality (VR) content. By measuring such biological information during a VR stimulation, the body's response to the stimulation can be verified. In addition, it is possible to change the stimulation interactively by estimating the feeling from the measured biological information. However, the user load required to mount the sensor for biological information sensing under the existing VR content experience is significant, and the noise due to body movement is also a problem. In this paper, a biometric device that can be mounted on a head mounted display (HMD) was developed. Because an HMD is attached strongly to the face, it is thought to be robust to body movement and thus the mounting load of the sensor can be ignored. The developed device can simply be mounted on an HMD. A pulse waveform can be acquired from the optical pulse wave sensor arranged on the nose side of the HMD, and the respiration waveform can be acquired from a thermopile arranged in the nostril area of the HMD. We condacted the experiment to verified that a pulse wave and the respiration can be measured with sufficient accuracy for a calculation of the tension and excitement of the user. As a result of the experiment, it was confirmed that the pulse wave can be measured with an error of less than 1% in nine out of 14 users and that the respiration can be measured with an error of 0.6% if user does not move. The respiration was measured with high accuracy regardless of the type of HMD used.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is important to measure the user's biological information when experiencing virtual reality (VR) content. By measuring such biological information during a VR stimulation, the body's response to the stimulation can be verified. In addition, it is possible to change the stimulation interactively by estimating the feeling from the measured biological information. However, the user load required to mount the sensor for biological information sensing under the existing VR content experience is significant, and the noise due to body movement is also a problem. In this paper, a biometric device that can be mounted on a head mounted display (HMD) was developed. Because an HMD is attached strongly to the face, it is thought to be robust to body movement and thus the mounting load of the sensor can be ignored. The developed device can simply be mounted on an HMD. A pulse waveform can be acquired from the optical pulse wave sensor arranged on the nose side of the HMD, and the respiration waveform can be acquired from a thermopile arranged in the nostril area of the HMD. We condacted the experiment to verified that a pulse wave and the respiration can be measured with sufficient accuracy for a calculation of the tension and excitement of the user. As a result of the experiment, it was confirmed that the pulse wave can be measured with an error of less than 1% in nine out of 14 users and that the respiration can be measured with an error of 0.6% if user does not move. The respiration was measured with high accuracy regardless of the type of HMD used.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is important to measure the user's biological information when experiencing virtual reality (VR) content. By measuring such biological information during a VR stimulation, the body's response to the stimulation can be verified. In addition, it is possible to change the stimulation interactively by estimating the feeling from the measured biological information. However, the user load required to mount the sensor for biological information sensing under the existing VR content experience is significant, and the noise due to body movement is also a problem. In this paper, a biometric device that can be mounted on a head mounted display (HMD) was developed. Because an HMD is attached strongly to the face, it is thought to be robust to body movement and thus the mounting load of the sensor can be ignored. The developed device can simply be mounted on an HMD. A pulse waveform can be acquired from the optical pulse wave sensor arranged on the nose side of the HMD, and the respiration waveform can be acquired from a thermopile arranged in the nostril area of the HMD. We condacted the experiment to verified that a pulse wave and the respiration can be measured with sufficient accuracy for a calculation of the tension and excitement of the user. As a result of the experiment, it was confirmed that the pulse wave can be measured with an error of less than 1% in nine out of 14 users and that the respiration can be measured with an error of 0.6% if user does not move. The respiration was measured with high accuracy regardless of the type of HMD used.",
"fno": "229700a001",
"keywords": [
"Biomechanics",
"Helmet Mounted Displays",
"Optical Sensors",
"Pneumodynamics",
"Thermopiles",
"Virtual Reality",
"Body Movement",
"Biometric Device",
"Optical Pulse Wave Sensor",
"Respiration Waveform",
"Easy Attachable Biological Information Measurement Device",
"Head Mounted Displays",
"User Load",
"Biological Information Sensing",
"Virtual Reality Stimulation",
"Thermopile Arrangement",
"Nostril Area",
"Resists",
"Pulse Measurements",
"Temperature Measurement",
"Nose",
"Face",
"Robustness",
"Head Mounted Display",
"Blood Volume Pulse",
"Respiration",
"Biological Information Measurement"
],
"authors": [
{
"affiliation": "The University of Tokyo",
"fullName": "Masahiro Inazawa",
"givenName": "Masahiro",
"surname": "Inazawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Yuki Ban",
"givenName": "Yuki",
"surname": "Ban",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2297-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "229700z020",
"articleId": "1fHkl6d33UI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "229700a009",
"articleId": "1fHkm3f2V4A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wevr/2017/3881/0/07957709",
"title": "Immersive eating: evaluating the use of head-mounted displays for mixed reality meal sessions",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957709/12OmNwK7o9G",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2015/8562/0/07439544",
"title": "Chaotic synchronization of pulse waves and respiration",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439544/12OmNyQGSfF",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciibms/2015/8562/0/07439545",
"title": "The impact of trait anxiety under a painful stimulus on the chaotic synchronization of respiration and pulse waves",
"doi": null,
"abstractUrl": "/proceedings-article/iciibms/2015/07439545/12OmNyuPLp9",
"parentPublication": {
"id": "proceedings/iciibms/2015/8562/0",
"title": "2015 International Conference on Intelligent Informatics and Biomedical Sciences (ICIIBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892302",
"title": "Estimating the motion-to-photon latency in head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892302/12OmNznkKb4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446583",
"title": "Evaluation of Optical See-Through Head-Mounted Displays in Training for Critical Care and Trauma",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446583/13bd1ftOBDp",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a025",
"title": "Color Preference Differences between Head Mounted Displays and PC Screens",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a025/17D45XoXP5M",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a470",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798320",
"title": "Evaluating Dynamic Characteristics of Head Mounted Display in Parallel Movement with Simultaneous Subjective Observation Method",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798320/1cJ0TRvTuOk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxaeEdNkc",
"doi": "10.1109/VR46266.2020.00108",
"title": "Angular Dependence of the Spatial Resolution in Virtual Reality Displays",
"normalizedTitle": "Angular Dependence of the Spatial Resolution in Virtual Reality Displays",
"abstract": "We compare two methods for characterizing the angular dependence of the spatial resolution in virtual reality head-mounted displays (HMDs) by measuring the line spread response (LSR) across the field of view (FOV) of the device. While slanted-edge is the standard method for determining the resolution of cameras, the standard approach for display devices is to used a line or edge aligned to the display pixel array. However, applying the LSR to head-mounted displays (HMDs) presents additional challenges due to the neareye optics. The LSRs of the HTC Vive and HTC Vive Pro were measured using a line of single white pixels by setting the red, green, and blue subpixels at maximum driving level. The white line was swept along a single direction over a 30° range in the FOV and the spatial resolution was measured using two approaches: wide-field and angle-scanning. In the wide-field method, the 30° FOV is imaged onto a stationary camera. In the second method, the camera is rotated across the FOV such that the white line remains static on the camera with the rotation axis located behind the lens to mimic the human visual system. The results show that the wide-field method overestimates the spatial resolution of the HMD by approximately 40% for angles larger than 10°. Consistent results obtained for the Vive and the Vive Pro indicate that the cause of the resolution limitation depends on the location in the FOV. The limitation in the center of the FOV is the pixel density, whereas, the off-axis spatial resolution is limited by optical components. Achieving high resolution VR HMDs requires system-wide design and technology improvement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We compare two methods for characterizing the angular dependence of the spatial resolution in virtual reality head-mounted displays (HMDs) by measuring the line spread response (LSR) across the field of view (FOV) of the device. While slanted-edge is the standard method for determining the resolution of cameras, the standard approach for display devices is to used a line or edge aligned to the display pixel array. However, applying the LSR to head-mounted displays (HMDs) presents additional challenges due to the neareye optics. The LSRs of the HTC Vive and HTC Vive Pro were measured using a line of single white pixels by setting the red, green, and blue subpixels at maximum driving level. The white line was swept along a single direction over a 30° range in the FOV and the spatial resolution was measured using two approaches: wide-field and angle-scanning. In the wide-field method, the 30° FOV is imaged onto a stationary camera. In the second method, the camera is rotated across the FOV such that the white line remains static on the camera with the rotation axis located behind the lens to mimic the human visual system. The results show that the wide-field method overestimates the spatial resolution of the HMD by approximately 40% for angles larger than 10°. Consistent results obtained for the Vive and the Vive Pro indicate that the cause of the resolution limitation depends on the location in the FOV. The limitation in the center of the FOV is the pixel density, whereas, the off-axis spatial resolution is limited by optical components. Achieving high resolution VR HMDs requires system-wide design and technology improvement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We compare two methods for characterizing the angular dependence of the spatial resolution in virtual reality head-mounted displays (HMDs) by measuring the line spread response (LSR) across the field of view (FOV) of the device. While slanted-edge is the standard method for determining the resolution of cameras, the standard approach for display devices is to used a line or edge aligned to the display pixel array. However, applying the LSR to head-mounted displays (HMDs) presents additional challenges due to the neareye optics. The LSRs of the HTC Vive and HTC Vive Pro were measured using a line of single white pixels by setting the red, green, and blue subpixels at maximum driving level. The white line was swept along a single direction over a 30° range in the FOV and the spatial resolution was measured using two approaches: wide-field and angle-scanning. In the wide-field method, the 30° FOV is imaged onto a stationary camera. In the second method, the camera is rotated across the FOV such that the white line remains static on the camera with the rotation axis located behind the lens to mimic the human visual system. The results show that the wide-field method overestimates the spatial resolution of the HMD by approximately 40% for angles larger than 10°. Consistent results obtained for the Vive and the Vive Pro indicate that the cause of the resolution limitation depends on the location in the FOV. The limitation in the center of the FOV is the pixel density, whereas, the off-axis spatial resolution is limited by optical components. Achieving high resolution VR HMDs requires system-wide design and technology improvement.",
"fno": "09089608",
"keywords": [
"Cameras",
"Spatial Resolution",
"Resists",
"Apertures",
"Distortion Measurement",
"Optical Imaging",
"Lenses",
"Human Centered Computing",
"Virtual Reality",
"Human Centered Computing",
"Visualization",
"Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Food and Drug Administration,Center for Devices and Radiological Health,Silver Spring,MD",
"fullName": "Ryan Beams",
"givenName": "Ryan",
"surname": "Beams",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Food and Drug Administration,Center for Devices and Radiological Health,Silver Spring,MD",
"fullName": "Brendan Collins",
"givenName": "Brendan",
"surname": "Collins",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Food and Drug Administration,Center for Devices and Radiological Health,Silver Spring,MD",
"fullName": "Andrea S. Kim",
"givenName": "Andrea S.",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Food and Drug Administration,Center for Devices and Radiological Health,Silver Spring,MD",
"fullName": "Aldo Badano",
"givenName": "Aldo",
"surname": "Badano",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "836-841",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089460",
"articleId": "1jIxdmr8R32",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089636",
"articleId": "1jIx9StwsnK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549353",
"title": "A robust camera-based method for optical distortion calibration of head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549353/12OmNwvVrHy",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d737",
"title": "Variable Aperture Light Field Photography: Overcoming the Diffraction-Limited Spatio-Angular Resolution Tradeoff",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d737/12OmNyGbIgT",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a009",
"title": "Improving 360-Degree Video Field-of-View Prediction and Edge Caching",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a009/1A3j6YEkCxa",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09850416",
"title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798369",
"title": "Brain Activity in Virtual Reality: Assessing Signal Quality of High-Resolution EEG While Using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798369/1cJ18Pncw9y",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a259",
"title": "OSTNet: Calibration Method for Optical See-Through Head-Mounted Displays via Non-Parametric Distortion Map Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a259/1gysj1o4L16",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998293",
"title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998293/1hrXiCmKkak",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2020/1485/0/09105945",
"title": "The impact of screen resolution of HMD on perceptual quality of immersive videos",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2020/09105945/1kwqM4xaLNC",
"parentPublication": {
"id": "proceedings/icmew/2020/1485/0",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523894",
"title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrNh0vs",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"acronym": "icat",
"groupId": "1001485",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyen1lo",
"doi": "10.1109/ICAT.2013.6728904",
"title": "A proposal of two-handed multi-finger haptic interface with rotary frame",
"normalizedTitle": "A proposal of two-handed multi-finger haptic interface with rotary frame",
"abstract": "We propose a method to optimally control a rotary frame of two-handed multi-finger wire driven haptic device SPIDAR-10 which makes technical operation possible. SPIDAR-10 enable users to manipulate virtual objects in a VR world with ten fingers of both hands. Twenty wires are tensioned for five fingers of one hand in each cylindrical frame of right and left hand. Therefore, the interference of the wires in the frame causes a problem when performing a complex operation by using both hands. If the interference occurs, the operator may feel not only uncomfortable but also affect a bad influence on the accuracy of the position and orientation estimation of fingers. So, it is necessary to reduce the interference of the wires.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a method to optimally control a rotary frame of two-handed multi-finger wire driven haptic device SPIDAR-10 which makes technical operation possible. SPIDAR-10 enable users to manipulate virtual objects in a VR world with ten fingers of both hands. Twenty wires are tensioned for five fingers of one hand in each cylindrical frame of right and left hand. Therefore, the interference of the wires in the frame causes a problem when performing a complex operation by using both hands. If the interference occurs, the operator may feel not only uncomfortable but also affect a bad influence on the accuracy of the position and orientation estimation of fingers. So, it is necessary to reduce the interference of the wires.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a method to optimally control a rotary frame of two-handed multi-finger wire driven haptic device SPIDAR-10 which makes technical operation possible. SPIDAR-10 enable users to manipulate virtual objects in a VR world with ten fingers of both hands. Twenty wires are tensioned for five fingers of one hand in each cylindrical frame of right and left hand. Therefore, the interference of the wires in the frame causes a problem when performing a complex operation by using both hands. If the interference occurs, the operator may feel not only uncomfortable but also affect a bad influence on the accuracy of the position and orientation estimation of fingers. So, it is necessary to reduce the interference of the wires.",
"fno": "06728904",
"keywords": [
"Thumb",
"Wires",
"Haptic Interfaces",
"Force",
"Interference",
"DC Motors",
"Rotating Frame",
"SPIDAR",
"Haptic Devices",
"Multi Fingered Hands"
],
"authors": [
{
"affiliation": null,
"fullName": "Naoki Maruyama",
"givenName": "Naoki",
"surname": "Maruyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lanhai Liu",
"givenName": null,
"surname": "Lanhai Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Katsuhito Akahane",
"givenName": "Katsuhito",
"surname": "Akahane",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Makoto Sato",
"givenName": "Makoto",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "40-45",
"year": "2013",
"issn": null,
"isbn": "978-4-904490-11-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06728903",
"articleId": "12OmNrHjqIc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06728905",
"articleId": "12OmNxWcHfg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a786",
"title": "Reach Out and Touch Somebody's Virtual Hand: Affectively Connected through Mediated Touch",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a786/12OmNAq3hLn",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728908",
"title": "Development of string-based multi-finger haptic interface SPIDAR-MF",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728908/12OmNAsBFG5",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444670",
"title": "Perceptual thresholds for single vs. Multi-Finger Haptic interaction",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444670/12OmNqH9hix",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169803",
"title": "Haptic glove for finger rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169803/12OmNvSKO44",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2006/0226/0/02260076",
"title": "Future Haptic Science Encyclopedia: An Experimental Implementation of Networked Multi-Threaded Haptic Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260076/12OmNvrvjaq",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2007/3005/0/04390932",
"title": "Virtual Reality Systems Modelling Haptic Two-Finger Contact with Deformable Physical Surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2007/04390932/12OmNzJbR1B",
"parentPublication": {
"id": "proceedings/cw/2007/3005/0",
"title": "2007 International Conference on Cyberworlds (CW'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/07892978",
"title": "Evaluation of Wearable Haptic Systems for the Fingers in Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/th/2017/04/07892978/13rRUwInv4D",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/01/tth2011010014",
"title": "Five-Fingered Haptic Interface Robot: HIRO III",
"doi": null,
"abstractUrl": "/journal/th/2011/01/tth2011010014/13rRUwghd9f",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07401066",
"title": "Anticipatory Vibrotactile Cueing Facilitates Grip Force Adjustment during Perturbative Loading",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07401066/13rRUyueghh",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797718",
"title": "Haptic Prop: A Tangible Prop for Semi-passive Haptic Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797718/1cJ0Lqfe0gw",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1AITn9W",
"doi": "10.1109/VR.2018.8446217",
"title": "Effects of Image Size and Structural Complexity on Time and Precision of Hand Movements in Head Mounted Virtual Reality",
"normalizedTitle": "Effects of Image Size and Structural Complexity on Time and Precision of Hand Movements in Head Mounted Virtual Reality",
"abstract": "The effective design of virtual reality (VR) simulators requires a deeper understanding of VR mediated human actions such as hand movements, with specifically tailored experiments testing how different design parameters affect performance. The present experiment investigates the time and precision of hand (index finger) movements under varying conditions of structural complexity and image size in VR without tactile feed-back from object to hand/finger. 18 right-handed subjects followed a complex and a simple physiological structure of small, medium and large size in VR, with the index finger of one of their two hands, from right to left, and from left to right. The results show that subjects performed best with small-size-simple structures and large-size-complex structures in VR. Movement execution was generally faster and more precise on simple structures. Performance was less precise when the dominant hand was used to follow the complex structures and small object size in VR. It is concluded that both size and structural complexity critically influence task execution in VR when no tactile feed-back from object to finger is generated. Individual learning curves should be monitored from the beginning of the training as suggested by the individual speed-precision analyses.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The effective design of virtual reality (VR) simulators requires a deeper understanding of VR mediated human actions such as hand movements, with specifically tailored experiments testing how different design parameters affect performance. The present experiment investigates the time and precision of hand (index finger) movements under varying conditions of structural complexity and image size in VR without tactile feed-back from object to hand/finger. 18 right-handed subjects followed a complex and a simple physiological structure of small, medium and large size in VR, with the index finger of one of their two hands, from right to left, and from left to right. The results show that subjects performed best with small-size-simple structures and large-size-complex structures in VR. Movement execution was generally faster and more precise on simple structures. Performance was less precise when the dominant hand was used to follow the complex structures and small object size in VR. It is concluded that both size and structural complexity critically influence task execution in VR when no tactile feed-back from object to finger is generated. Individual learning curves should be monitored from the beginning of the training as suggested by the individual speed-precision analyses.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The effective design of virtual reality (VR) simulators requires a deeper understanding of VR mediated human actions such as hand movements, with specifically tailored experiments testing how different design parameters affect performance. The present experiment investigates the time and precision of hand (index finger) movements under varying conditions of structural complexity and image size in VR without tactile feed-back from object to hand/finger. 18 right-handed subjects followed a complex and a simple physiological structure of small, medium and large size in VR, with the index finger of one of their two hands, from right to left, and from left to right. The results show that subjects performed best with small-size-simple structures and large-size-complex structures in VR. Movement execution was generally faster and more precise on simple structures. Performance was less precise when the dominant hand was used to follow the complex structures and small object size in VR. It is concluded that both size and structural complexity critically influence task execution in VR when no tactile feed-back from object to finger is generated. Individual learning curves should be monitored from the beginning of the training as suggested by the individual speed-precision analyses.",
"fno": "08446217",
"keywords": [
"Biomechanics",
"Feedback",
"Helmet Mounted Displays",
"Image Processing",
"Physiological Models",
"Virtual Reality",
"Structural Complexity",
"Image Size",
"Hand Movements",
"Virtual Reality Simulators",
"VR Mediated Human Actions",
"Index Finger",
"Tactile Feedback",
"Physiological Structure",
"Complex Structures",
"Speed Precision Analyses",
"Head Mounted Virtual Reality",
"Learning Curves",
"Indexes",
"Complexity Theory",
"Three Dimensional Displays",
"Head",
"Software",
"Image Color Analysis",
"Virtual Reality",
"Computing Methodologies Computer Graphics Graphic Systems And Interfaces Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Devices",
"Human Centered Computing Interaction Design",
"Software And Its Engineering Software Organization And Properties Virtual Worlds Software Virtual Worlds Training Simulations"
],
"authors": [
{
"affiliation": "University of Strasbourg CNRS, ICube Laboratory, UMR 7357 Strasbourg, France",
"fullName": "Anil Ufuk Batmaz",
"givenName": "Anil Ufuk",
"surname": "Batmaz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Strasbourg CNRS, ICube Laboratory, UMR 7357 Strasbourg, France",
"fullName": "Michel de Mathelin",
"givenName": "Michel",
"surname": "de Mathelin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Strasbourg CNRS, ICube Laboratory, UMR 7357 Strasbourg, France",
"fullName": "Birgitta Dresp-Langley",
"givenName": "Birgitta",
"surname": "Dresp-Langley",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "167-174",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446059",
"articleId": "13bd1eSlysI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08448284",
"articleId": "13bd1eY1x40",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476604",
"title": "Poster: Sliding Viewport for Head Mounted Displays in Interactive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476604/12OmNzdoMAW",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699201",
"title": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699201/19F1VvOVhew",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a131",
"title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a131/1JrRdnGe43C",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090681",
"title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090681/1jIxoZtoPlK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a109",
"title": "Generative RGB-D Face Completion for Head-Mounted Display Removal",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a109/1tnXncnHsIg",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523895",
"title": "HPUI: Hand Proximate User Interfaces for One-Handed Interactions on Head Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523895/1wpqwrI9ISA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1VvOVhew",
"doi": "10.1109/ISMAR-Adjunct.2018.00130",
"title": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays",
"normalizedTitle": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays",
"abstract": "In this demonstration, we will show a prototype system with sensor fusion approach to robustly track 6 degrees of freedom of hand movement and support intuitive hand gesture interaction and 3D object manipulation for Mixed Reality head-mounted displays. Robust tracking of hand and finger with egocentric camera remains a challenging problem, especially with self-occlusion - for example, when user tries to grab a virtual object in midair by closing the palm. Our approach leverages the use of a common smart watch worn on the wrist to provide a more reliable palm and wrist orientation data, while fusing the data with camera to achieve robust hand motion and orientation for interaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this demonstration, we will show a prototype system with sensor fusion approach to robustly track 6 degrees of freedom of hand movement and support intuitive hand gesture interaction and 3D object manipulation for Mixed Reality head-mounted displays. Robust tracking of hand and finger with egocentric camera remains a challenging problem, especially with self-occlusion - for example, when user tries to grab a virtual object in midair by closing the palm. Our approach leverages the use of a common smart watch worn on the wrist to provide a more reliable palm and wrist orientation data, while fusing the data with camera to achieve robust hand motion and orientation for interaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this demonstration, we will show a prototype system with sensor fusion approach to robustly track 6 degrees of freedom of hand movement and support intuitive hand gesture interaction and 3D object manipulation for Mixed Reality head-mounted displays. Robust tracking of hand and finger with egocentric camera remains a challenging problem, especially with self-occlusion - for example, when user tries to grab a virtual object in midair by closing the palm. Our approach leverages the use of a common smart watch worn on the wrist to provide a more reliable palm and wrist orientation data, while fusing the data with camera to achieve robust hand motion and orientation for interaction.",
"fno": "08699201",
"keywords": [
"Augmented Reality",
"Gesture Recognition",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Image Motion Analysis",
"Object Tracking",
"Sensor Fusion",
"Wearable Computers",
"SWAG Demo",
"Smart Watch Assisted Gesture Interaction",
"Sensor Fusion Approach",
"3 D Object Manipulation",
"Robust Tracking",
"Robust Hand Motion",
"Finger Tracking",
"Intuitive Hand Gesture Interaction",
"Mixed Reality Head Mounted Displays",
"Robust Hand Movement Tracking",
"Egocentric Camera",
"Self Occlusion",
"Palm Orientation Data",
"Wrist Orientation Data",
"Augmented Reality",
"Wearable Computing",
"3 D User Interfaces",
"Hand Interaction",
"Virtual 3 D Object Manipulation",
"H 5 2 Information Interfaces And Presentation User Interfaces X 2014 Input Interaction Styles"
],
"authors": [
{
"affiliation": "KAIST, UVR Lab, Republic of Korea",
"fullName": "Hyung-il Kim",
"givenName": "Hyung-il",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST, UVR Lab, Republic of Korea",
"fullName": "Juyoung Lee",
"givenName": "Juyoung",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St Andrews, SACHI, Scotland, UK",
"fullName": "Hui-Shyong Yeo",
"givenName": "Hui-Shyong",
"surname": "Yeo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of St Andrews, SACHI, Scotland, UK",
"fullName": "Aaron Quigley",
"givenName": "Aaron",
"surname": "Quigley",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST, UVR Lab, Republic of Korea",
"fullName": "Woontack Woo",
"givenName": "Woontack",
"surname": "Woo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "428-429",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699312",
"articleId": "19F1Q6xx4BO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699222",
"articleId": "19F1PQOMxWg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2013/2322/0/2322a100",
"title": "Wrist Recognition and the Center of the Palm Estimation Based on Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2013/2322a100/12OmNAZOK21",
"parentPublication": {
"id": "proceedings/icvrv/2013/2322/0",
"title": "2013 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a093",
"title": "Gesture Recognition Based on Pixel Classification and Contour Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a093/12OmNBOllrq",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2007/1452/0/04373770",
"title": "The Gesture Watch: A Wireless Contact-free Gesture based Wrist Interface",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2007/04373770/12OmNC8uRvf",
"parentPublication": {
"id": "proceedings/iswc/2007/1452/0",
"title": "2007 11th IEEE International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643595",
"title": "Video stabilization to a global 3D frame of reference by fusing orientation sensor and image alignment data",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643595/12OmNyKrH3E",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2006/2688/0/26880052",
"title": "Near- and Far- Infrared Imaging for Vein Pattern Biometrics",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2006/26880052/12OmNyvY9AG",
"parentPublication": {
"id": "proceedings/avss/2006/2688/0",
"title": "2006 IEEE International Conference on Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504716",
"title": "Monochrome glove: A robust real-time hand gesture recognition method by using a fabric glove with design of structured markers",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504716/12OmNz2kqqa",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460065",
"title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090681",
"title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090681/1jIxoZtoPlK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a332",
"title": "ARPads: Mid-air Indirect Input for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a332/1pysxWDVgS4",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcBfmyX5K",
"doi": "10.1109/VRW55335.2022.00142",
"title": "Multi-Touch Smartphone-Based Progressive Refinement VR Selection",
"normalizedTitle": "Multi-Touch Smartphone-Based Progressive Refinement VR Selection",
"abstract": "We developed a progressive refinement technique for VR object selection using a smartphone as a controller. Our technique combines progressive refinement with the marking menu-based CountMarks, which uses multi-finger touch gestures to “short-circuit” multi-item marking menus. Users can indicate a specific item in a sub-menu by pressing a specific number of fingers on the screen while swiping in the desired menu's direction. This reduces the number of steps in progressive refinement selection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We developed a progressive refinement technique for VR object selection using a smartphone as a controller. Our technique combines progressive refinement with the marking menu-based CountMarks, which uses multi-finger touch gestures to “short-circuit” multi-item marking menus. Users can indicate a specific item in a sub-menu by pressing a specific number of fingers on the screen while swiping in the desired menu's direction. This reduces the number of steps in progressive refinement selection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We developed a progressive refinement technique for VR object selection using a smartphone as a controller. Our technique combines progressive refinement with the marking menu-based CountMarks, which uses multi-finger touch gestures to “short-circuit” multi-item marking menus. Users can indicate a specific item in a sub-menu by pressing a specific number of fingers on the screen while swiping in the desired menu's direction. This reduces the number of steps in progressive refinement selection.",
"fno": "840200a582",
"keywords": [
"Gesture Recognition",
"Smart Phones",
"Touch Sensitive Screens",
"User Interfaces",
"Virtual Reality",
"Marking Menu Based Count Marks",
"Multitouch Smartphone Based Progressive Refinement VR Selection",
"VR Object Selection",
"Multifinger Touch Gestures",
"Short Circuit Multiitem Marking Menus",
"Three Dimensional Displays",
"Navigation",
"Conferences",
"Fingers",
"Virtual Reality",
"Pressing",
"User Interfaces",
"Selection",
"Progressive Refinement",
"Touchscreen",
"Human Centered Computing X 2192 Virtual Reality"
],
"authors": [
{
"affiliation": "Carleton University,Ottawa,Canada",
"fullName": "Elaheh Samimi",
"givenName": "Elaheh",
"surname": "Samimi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carleton University,Ottawa,Canada",
"fullName": "Robert J. Teather",
"givenName": "Robert J.",
"surname": "Teather",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "582-583",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a580",
"articleId": "1CJenv6SXTO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a584",
"articleId": "1CJd297BiDu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2008/2047/0/04476592",
"title": "Tech-note: rapMenu: Remote Menu Selection Using Freehand Gestural Input",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476592/12OmNAS9zL1",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2011/0063/0/05759219",
"title": "Rapid and accurate 3D selection by progressive refinement",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2011/05759219/12OmNB1wkHF",
"parentPublication": {
"id": "proceedings/3dui/2011/0063/0",
"title": "2011 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2000/0506/0/05060461",
"title": "Mining Recurrent Items in Multimedia with Progressive Resolution Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2000/05060461/12OmNvUaNoQ",
"parentPublication": {
"id": "proceedings/icde/2000/0506/0",
"title": "Proceedings of 16th International Conference on Data Engineering (Cat. No.00CB37073)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131737",
"title": "Handymenu: Integrating menu selection into a multifunction smartphone-based VR controller",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131737/12OmNx19jZG",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504707",
"title": "Depth-based 3D gesture multi-level radial menu for virtual object manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504707/12OmNx3HI96",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1994/6660/0/00363635",
"title": "Reality bites-progressive querying and result visualization in logical and VR spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1994/00363635/12OmNy3RRAi",
"parentPublication": {
"id": "proceedings/vl/1994/6660/0",
"title": "Proceedings of 1994 IEEE Symposium on Visual Languages",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/prs/1993/4920/0/00586089",
"title": "Progressive refinement radiosity on ring-connected multicomputers",
"doi": null,
"abstractUrl": "/proceedings-article/prs/1993/00586089/12OmNzmclCE",
"parentPublication": {
"id": "proceedings/prs/1993/4920/0",
"title": "Proceedings of 1993 IEEE Parallel Rendering Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a832",
"title": "GazeDock: Gaze-Only Menu Selection in Virtual Reality using Auto-Triggering Peripheral Menu",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a832/1CJbR6qnKdW",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a964",
"title": "Mid-air Haptic Texture Exploration in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a964/1CJeOwwf1Nm",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a304",
"title": "IMPReSS: Improved Multi-Touch Progressive Refinement Selection Strategy",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a304/1CJetSxfyi4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJeVfhmmkg",
"doi": "10.1109/VRW55335.2022.00174",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"normalizedTitle": "A Pinch-based Text Entry Method for Head-mounted Displays",
"abstract": "Pinch gestures have been used for text entry in Head-mounted dis-plays (HMDs), enabling a comfortable and eyes-free text entry. However, the number of pinch gestures is limited, making it difficult to input all characters. In addition, the common pinch-based meth-ods with a QWERTY keyboard require accurate control of the hand position and angle, which could be affected by natural tremors and the Heisenberg effect. So, we propose a new text entry method for HMDs, which combines hand positions and pinch gestures with a condensed key-based keyboard, enabling one-handed text entry for HMDs. With this method, users move their hands with a naturally comfortable posture between three large different spaces in the air to choose one key set and then execute one of the pinch gestures to choose one character, where hand jitter does not affect the selection, helping to improve the input speed. The results of a primary study show that the mean input speed of the proposed method is 7.60 words-per-minute (WPM).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pinch gestures have been used for text entry in Head-mounted dis-plays (HMDs), enabling a comfortable and eyes-free text entry. However, the number of pinch gestures is limited, making it difficult to input all characters. In addition, the common pinch-based meth-ods with a QWERTY keyboard require accurate control of the hand position and angle, which could be affected by natural tremors and the Heisenberg effect. So, we propose a new text entry method for HMDs, which combines hand positions and pinch gestures with a condensed key-based keyboard, enabling one-handed text entry for HMDs. With this method, users move their hands with a naturally comfortable posture between three large different spaces in the air to choose one key set and then execute one of the pinch gestures to choose one character, where hand jitter does not affect the selection, helping to improve the input speed. The results of a primary study show that the mean input speed of the proposed method is 7.60 words-per-minute (WPM).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pinch gestures have been used for text entry in Head-mounted dis-plays (HMDs), enabling a comfortable and eyes-free text entry. However, the number of pinch gestures is limited, making it difficult to input all characters. In addition, the common pinch-based meth-ods with a QWERTY keyboard require accurate control of the hand position and angle, which could be affected by natural tremors and the Heisenberg effect. So, we propose a new text entry method for HMDs, which combines hand positions and pinch gestures with a condensed key-based keyboard, enabling one-handed text entry for HMDs. With this method, users move their hands with a naturally comfortable posture between three large different spaces in the air to choose one key set and then execute one of the pinch gestures to choose one character, where hand jitter does not affect the selection, helping to improve the input speed. The results of a primary study show that the mean input speed of the proposed method is 7.60 words-per-minute (WPM).",
"fno": "840200a646",
"keywords": [
"Gesture Recognition",
"Handicapped Aids",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Keyboards",
"Mobile Computing",
"Text Analysis",
"Touch Sensitive Screens",
"Pinch Based Text Entry Method",
"Head Mounted Displays",
"Pinch Gestures",
"Head Mounted Dis Plays",
"HM Ds",
"Comfortable Eyes Free Text Entry",
"Common Pinch Based Meth Ods",
"Hand Position",
"Hand Positions",
"Condensed Key Based Keyboard",
"Three Dimensional Displays",
"Head Mounted Displays",
"Conferences",
"Keyboards",
"Virtual Reality",
"User Interfaces",
"Jitter",
"Human Centered Computing X 2014 Human Computer Interaction X 2014 Interaction Paradigms X 2014 Virtual Reality",
"Human Centered Computing X 2014 Human Computer Interaction X 2014 Interaction Techniques X 2014 Text Input",
"Human Centered Computing X 2014 Human Computer Interaction X 2014 Interaction Devices X 2014 Haptic Devices"
],
"authors": [
{
"affiliation": "Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing Institute of Technology",
"fullName": "Haiyan Jiang",
"givenName": "Haiyan",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing Institute of Technology",
"fullName": "Dongdong Weng",
"givenName": "Dongdong",
"surname": "Weng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing Institute of Technology",
"fullName": "Xiaonuo Dongye",
"givenName": "Xiaonuo",
"surname": "Dongye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Engineering Research Center of Mixed Reality and Advanced Display, Beijing Institute of Technology",
"fullName": "Yue Liu",
"givenName": "Yue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "646-647",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJeV9FDJZe",
"name": "pvrw202284020-09757494s1-mm_840200a646.zip",
"size": "2.77 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757494s1-mm_840200a646.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a644",
"articleId": "1CJfpXY3WgM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a648",
"articleId": "1CJdmfeEjRu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2014/7615/0/07097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a470",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a131",
"title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a131/1JrRdnGe43C",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08723303",
"title": "Errata to “RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions” [May 19 1991-2001]",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2019/0987/0/08943748",
"title": "Pointing and Selection Methods for Text Entry in Augmented Reality Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2019/08943748/1grOMBKDBPa",
"parentPublication": {
"id": "proceedings/ismar/2019/0987/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrRaeV82L6",
"doi": "10.1109/ISMAR55827.2022.00021",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"normalizedTitle": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"abstract": "Hand-based interaction, such as using a handheld controller or making hand gestures, has been widely adopted as the primary method for interacting with both virtual reality (VR) and augmented reality (AR) head-mounted displays (HMDs). In contrast, hands-free interaction avoids the need for users’ hands and although it can afford additional benefits, there has been limited research in exploring and evaluating hands-free techniques for these HMDs. As VR HMDs become ubiquitous, people will need to do text editing, which requires selecting text segments. Similar to hands-free interaction, text selection is underexplored. This research focuses on both, text selection via hands-free interaction. Our exploration involves a user study with 24 participants to investigate the performance, user experience, and workload of three hands-free selection mechanisms (Dwell, Blink, Voice) to complement head-based pointing. Results indicate that Blink outperforms Dwell and Voice in completion time. Users’ subjective feedback also shows that Blink is the preferred technique for text selection. This work is the first to explore handsfree interaction for text selection in VR HMDs. Our results provide a solid platform for further research in this important area.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hand-based interaction, such as using a handheld controller or making hand gestures, has been widely adopted as the primary method for interacting with both virtual reality (VR) and augmented reality (AR) head-mounted displays (HMDs). In contrast, hands-free interaction avoids the need for users’ hands and although it can afford additional benefits, there has been limited research in exploring and evaluating hands-free techniques for these HMDs. As VR HMDs become ubiquitous, people will need to do text editing, which requires selecting text segments. Similar to hands-free interaction, text selection is underexplored. This research focuses on both, text selection via hands-free interaction. Our exploration involves a user study with 24 participants to investigate the performance, user experience, and workload of three hands-free selection mechanisms (Dwell, Blink, Voice) to complement head-based pointing. Results indicate that Blink outperforms Dwell and Voice in completion time. Users’ subjective feedback also shows that Blink is the preferred technique for text selection. This work is the first to explore handsfree interaction for text selection in VR HMDs. Our results provide a solid platform for further research in this important area.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hand-based interaction, such as using a handheld controller or making hand gestures, has been widely adopted as the primary method for interacting with both virtual reality (VR) and augmented reality (AR) head-mounted displays (HMDs). In contrast, hands-free interaction avoids the need for users’ hands and although it can afford additional benefits, there has been limited research in exploring and evaluating hands-free techniques for these HMDs. As VR HMDs become ubiquitous, people will need to do text editing, which requires selecting text segments. Similar to hands-free interaction, text selection is underexplored. This research focuses on both, text selection via hands-free interaction. Our exploration involves a user study with 24 participants to investigate the performance, user experience, and workload of three hands-free selection mechanisms (Dwell, Blink, Voice) to complement head-based pointing. Results indicate that Blink outperforms Dwell and Voice in completion time. Users’ subjective feedback also shows that Blink is the preferred technique for text selection. This work is the first to explore handsfree interaction for text selection in VR HMDs. Our results provide a solid platform for further research in this important area.",
"fno": "532500a074",
"keywords": [
"Augmented Reality",
"Gesture Recognition",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Text Analysis",
"Virtual Reality",
"Exploring Evaluating Hands Free Techniques",
"Hand Based Interaction",
"Handheld Controller",
"Hands Free Interaction",
"Hands Free Selection Mechanisms",
"Hands Free Text Selection",
"Users",
"Virtual Reality Head Mounted Displays",
"VR HM Ds",
"Head Mounted Displays",
"Solids",
"User Experience",
"Task Analysis",
"Augmented Reality",
"Text Selection",
"Virtual Reality",
"User Study",
"Hands Free Interaction",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Interaction Techniques",
"Interaction Design",
"Empirical Studies In Interaction Design"
],
"authors": [
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Suzhou,China",
"fullName": "Xuanru Meng",
"givenName": "Xuanru",
"surname": "Meng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Birmingham City University,DMT Lab,Birmingham,UK",
"fullName": "Wenge Xu",
"givenName": "Wenge",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Suzhou,China",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "74-81",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "532500a064",
"articleId": "1JrRc4SdYgU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a082",
"articleId": "1JrQQ8dsLKM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2009/3791/0/3791a043",
"title": "Enhancing Presence in Head-Mounted Display Environments by Visual Body Feedback Using Head-Mounted Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a043/12OmNxveNRr",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a503",
"title": "Optimal Font Size for Head-Mounted-Displays in Outdoor Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a503/12OmNzd7bBd",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a746",
"title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a131",
"title": "Evaluation of Text Selection Techniques in Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a131/1JrRdnGe43C",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08723303",
"title": "Errata to “RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions” [May 19 1991-2001]",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a344",
"title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a344/1pysyrYBX5C",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx7JtSOTC",
"doi": "10.1109/VR46266.2020.00092",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"normalizedTitle": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"abstract": "Text entry in virtual reality (VR) is currently a common activity and a challenging problem. In this paper, we introduce HiPad, leveraging a circular touchpad with a circular virtual keyboard, to support the one-hand text entry in mobile head-mounted displays (HMDs). The design of HiPad’s layout is based on a circle and a square with rounded corners, where the outer circle is subdivided into six keys’ regions containing letters. This technique input text by a common hand-held controller with a circular touchpad for HMDs and disambiguates the word based on the sequence of keys pressed by the user. In our first study, three potential layouts are considered and evaluated, leading to the design containing six keys. By analyzing the touch behavior of users, we optimize the 6-keys layout and conduct the second study, showing that the optimized layout has better performance. Then the third study is conducted to evaluate the performance of 6-keys HiPad with VE-layout and TP-layout and to study the learning curves. The results show that novices can achieve 13.57 Words per Minute (WPM) with VE-layout and 11.60 WPM with TP-layout and the speeds increase by 74.42% for VE-layout users and by 81.53% for TP-layout users through a short 60-phrase training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text entry in virtual reality (VR) is currently a common activity and a challenging problem. In this paper, we introduce HiPad, leveraging a circular touchpad with a circular virtual keyboard, to support the one-hand text entry in mobile head-mounted displays (HMDs). The design of HiPad’s layout is based on a circle and a square with rounded corners, where the outer circle is subdivided into six keys’ regions containing letters. This technique input text by a common hand-held controller with a circular touchpad for HMDs and disambiguates the word based on the sequence of keys pressed by the user. In our first study, three potential layouts are considered and evaluated, leading to the design containing six keys. By analyzing the touch behavior of users, we optimize the 6-keys layout and conduct the second study, showing that the optimized layout has better performance. Then the third study is conducted to evaluate the performance of 6-keys HiPad with VE-layout and TP-layout and to study the learning curves. The results show that novices can achieve 13.57 Words per Minute (WPM) with VE-layout and 11.60 WPM with TP-layout and the speeds increase by 74.42% for VE-layout users and by 81.53% for TP-layout users through a short 60-phrase training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text entry in virtual reality (VR) is currently a common activity and a challenging problem. In this paper, we introduce HiPad, leveraging a circular touchpad with a circular virtual keyboard, to support the one-hand text entry in mobile head-mounted displays (HMDs). The design of HiPad’s layout is based on a circle and a square with rounded corners, where the outer circle is subdivided into six keys’ regions containing letters. This technique input text by a common hand-held controller with a circular touchpad for HMDs and disambiguates the word based on the sequence of keys pressed by the user. In our first study, three potential layouts are considered and evaluated, leading to the design containing six keys. By analyzing the touch behavior of users, we optimize the 6-keys layout and conduct the second study, showing that the optimized layout has better performance. Then the third study is conducted to evaluate the performance of 6-keys HiPad with VE-layout and TP-layout and to study the learning curves. The results show that novices can achieve 13.57 Words per Minute (WPM) with VE-layout and 11.60 WPM with TP-layout and the speeds increase by 74.42% for VE-layout users and by 81.53% for TP-layout users through a short 60-phrase training.",
"fno": "09089533",
"keywords": [
"Layout",
"Keyboards",
"Training",
"Virtual Reality",
"Visualization",
"Sensors",
"Conferences",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Paradigms",
"Virtual Reality",
"Human Centered Computing",
"Interaction Techniques",
"Text Input"
],
"authors": [
{
"affiliation": "Beijing Institute of Technique",
"fullName": "Haiyan Jiang",
"givenName": "Haiyan",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technique,AICFVE of Beijing Film Academy",
"fullName": "Dongdong Weng",
"givenName": "Dongdong",
"surname": "Weng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "692-703",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089551",
"articleId": "1jIx95ncylO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089450",
"articleId": "1jIxe2m67ZK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2014/7615/0/07097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2017/3881/0/07957709",
"title": "Immersive eating: evaluating the use of head-mounted displays for mixed reality meal sessions",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957709/12OmNwK7o9G",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011070888",
"title": "Natural Perspective Projections for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011070888/13rRUwInvJd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456570",
"title": "PizzaText: Text Entry for Virtual Reality Systems Using Dual Thumbsticks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456570/14M3DYGRu3o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2017/0443/0/08103481",
"title": "Text entry using five to seven physical keys",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2017/08103481/17D45XDIXRA",
"parentPublication": {
"id": "proceedings/vlhcc/2017/0443/0",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08723303",
"title": "Errata to “RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions” [May 19 1991-2001]",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2019/9148/0/08767420",
"title": "HIBEY: Hide the Keyboard in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2019/08767420/1bQzm74HXBm",
"parentPublication": {
"id": "proceedings/percom/2019/9148/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxoZtoPlK",
"doi": "10.1109/VRW50115.2020.00253",
"title": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays",
"normalizedTitle": "Accuracy of Commodity Finger Tracking Systems for Virtual Reality Head-Mounted Displays",
"abstract": "Representing users’ hands and fingers in virtual reality is crucial for many tasks. Recently, virtual reality head-mounted displays, capable of camera-based inside-out tracking and finger and hand tracking, are becoming popular and complement add-on solutions, such as Leap Motion.However, interacting with physical objects requires an accurate grounded positioning of the virtual reality coordinate system relative to relevant objects, and a good spatial positioning of the user’s fingers and hands.In order to get a better understanding of the capabilities of Virtual Reality headset finger tracking solutions for interacting with physical objects, we ran a controlled experiment (n =24) comparing two commodity hand and finger tracking systems (HTC Vive and Leap Motion) and report on the accuracy of commodity hand tracking systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Representing users’ hands and fingers in virtual reality is crucial for many tasks. Recently, virtual reality head-mounted displays, capable of camera-based inside-out tracking and finger and hand tracking, are becoming popular and complement add-on solutions, such as Leap Motion.However, interacting with physical objects requires an accurate grounded positioning of the virtual reality coordinate system relative to relevant objects, and a good spatial positioning of the user’s fingers and hands.In order to get a better understanding of the capabilities of Virtual Reality headset finger tracking solutions for interacting with physical objects, we ran a controlled experiment (n =24) comparing two commodity hand and finger tracking systems (HTC Vive and Leap Motion) and report on the accuracy of commodity hand tracking systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Representing users’ hands and fingers in virtual reality is crucial for many tasks. Recently, virtual reality head-mounted displays, capable of camera-based inside-out tracking and finger and hand tracking, are becoming popular and complement add-on solutions, such as Leap Motion.However, interacting with physical objects requires an accurate grounded positioning of the virtual reality coordinate system relative to relevant objects, and a good spatial positioning of the user’s fingers and hands.In order to get a better understanding of the capabilities of Virtual Reality headset finger tracking solutions for interacting with physical objects, we ran a controlled experiment (n =24) comparing two commodity hand and finger tracking systems (HTC Vive and Leap Motion) and report on the accuracy of commodity hand tracking systems.",
"fno": "09090681",
"keywords": [
"Object Tracking",
"Virtual Reality",
"Task Analysis",
"Fingers",
"Euclidean Distance"
],
"authors": [
{
"affiliation": "Coburg University",
"fullName": "Daniel Schneider",
"givenName": "Daniel",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University",
"fullName": "Alexander Otte",
"givenName": "Alexander",
"surname": "Otte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University",
"fullName": "Axel Simon Kublin",
"givenName": "Axel Simon",
"surname": "Kublin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University",
"fullName": "Alexander Martschenko",
"givenName": "Alexander",
"surname": "Martschenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Cambridge",
"fullName": "Per Ola Kristensson",
"givenName": "Per Ola",
"surname": "Kristensson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Eyal Ofek",
"givenName": "Eyal",
"surname": "Ofek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research",
"fullName": "Michel Pahud",
"givenName": "Michel",
"surname": "Pahud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "804-805",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090471",
"articleId": "1jIxm9DsWDS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090493",
"articleId": "1jIxsdXB2mY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2007/1153/0/04341745",
"title": "Geometric Properties and Computation of Three-Finger Caging Grasps of Convex Polygons",
"doi": null,
"abstractUrl": "/proceedings-article/case/2007/04341745/12OmNAJDBwC",
"parentPublication": {
"id": "proceedings/case/2007/1153/0",
"title": "3rd Annual IEEE Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504739",
"title": "Evaluation of hand and stylus based calibration for optical see-through head-mounted displays using leap motion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504739/12OmNxE2mZD",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/umedia/2014/4266/0/06916339",
"title": "Real-Time Finger Tracking for Virtual Instruments",
"doi": null,
"abstractUrl": "/proceedings-article/umedia/2014/06916339/12OmNxRF702",
"parentPublication": {
"id": "proceedings/umedia/2014/4266/0",
"title": "2014 7th International Conference on Ubi-Media Computing and Workshops (UMEDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446173",
"title": "Real-Time Marker-Based Finger Tracking with Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446173/13bd1eW2l9y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263123",
"title": "MRTouch: Adding Touch Input to Head-Mounted Mixed Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263123/13rRUyft7D9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699201",
"title": "SWAG Demo: Smart Watch Assisted Gesture Interaction for Mixed Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699201/19F1VvOVhew",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscer/2022/8478/0/847800a262",
"title": "Piano Beginner: A Glove-based Finger Training VR Application",
"doi": null,
"abstractUrl": "/proceedings-article/iscer/2022/847800a262/1Hbby1S94oE",
"parentPublication": {
"id": "proceedings/iscer/2022/8478/0",
"title": "2022 International Symposium on Control Engineering and Robotics (ISCER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2020/7002/0/700200a552",
"title": "airFinger: Micro Finger Gesture Recognition via NIR Light Sensing for Smart Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2020/700200a552/1rsiORLSrio",
"parentPublication": {
"id": "proceedings/icdcs/2020/7002/0",
"title": "2020 IEEE 40th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413060",
"title": "Finger Vein Recognition and Intra-Subject Similarity Evaluation of Finger Veins using the CNN Triplet Loss",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413060/1tmjYmjqO0o",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523895",
"title": "HPUI: Hand Proximate User Interfaces for One-Handed Interactions on Head Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523895/1wpqwrI9ISA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuBtNYt0LC",
"doi": "10.1109/VR50410.2021.00076",
"title": "TapID: Rapid Touch Interaction in Virtual Reality using Wearable Sensing",
"normalizedTitle": "TapID: Rapid Touch Interaction in Virtual Reality using Wearable Sensing",
"abstract": "Current Virtual Reality systems typically use cameras to capture user input from controllers or free-hand mid-air interaction. In this paper, we argue that this is a key impediment to productivity scenarios in VR, which require continued interaction over prolonged periods of time-a requirement that controller or free-hand input in mid-air does not satisfy. To address this challenge, we bring rapid touch interaction on surfaces to Virtual Reality-the input modality that users have grown used to on phones and tablets for continued use. We present TapID, a wrist-based inertial sensing system that complements headset-tracked hand poses to trigger input in VR. TapID embeds a pair of inertial sensors in a flexible strap, one at either side of the wrist; from the combination of registered signals, TapID reliably detects surface touch events and, more importantly, identifies the finger used for touch. We evaluated TapID in a series of user studies on event-detection accuracy (F1 = 0.997) and hand-agnostic finger-identification accuracy (within-user: F1 = 0.93; across users: F1 = 0.91 after 10 refinement taps and F1 = 0.87 without refinement) in a seated table scenario. We conclude with a series of applications that complement hand tracking with touch input and that are uniquely enabled by TapID, including UI control, rapid keyboard typing and piano playing, as well as surface gestures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Current Virtual Reality systems typically use cameras to capture user input from controllers or free-hand mid-air interaction. In this paper, we argue that this is a key impediment to productivity scenarios in VR, which require continued interaction over prolonged periods of time-a requirement that controller or free-hand input in mid-air does not satisfy. To address this challenge, we bring rapid touch interaction on surfaces to Virtual Reality-the input modality that users have grown used to on phones and tablets for continued use. We present TapID, a wrist-based inertial sensing system that complements headset-tracked hand poses to trigger input in VR. TapID embeds a pair of inertial sensors in a flexible strap, one at either side of the wrist; from the combination of registered signals, TapID reliably detects surface touch events and, more importantly, identifies the finger used for touch. We evaluated TapID in a series of user studies on event-detection accuracy (F1 = 0.997) and hand-agnostic finger-identification accuracy (within-user: F1 = 0.93; across users: F1 = 0.91 after 10 refinement taps and F1 = 0.87 without refinement) in a seated table scenario. We conclude with a series of applications that complement hand tracking with touch input and that are uniquely enabled by TapID, including UI control, rapid keyboard typing and piano playing, as well as surface gestures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Current Virtual Reality systems typically use cameras to capture user input from controllers or free-hand mid-air interaction. In this paper, we argue that this is a key impediment to productivity scenarios in VR, which require continued interaction over prolonged periods of time-a requirement that controller or free-hand input in mid-air does not satisfy. To address this challenge, we bring rapid touch interaction on surfaces to Virtual Reality-the input modality that users have grown used to on phones and tablets for continued use. We present TapID, a wrist-based inertial sensing system that complements headset-tracked hand poses to trigger input in VR. TapID embeds a pair of inertial sensors in a flexible strap, one at either side of the wrist; from the combination of registered signals, TapID reliably detects surface touch events and, more importantly, identifies the finger used for touch. We evaluated TapID in a series of user studies on event-detection accuracy (F1 = 0.997) and hand-agnostic finger-identification accuracy (within-user: F1 = 0.93; across users: F1 = 0.91 after 10 refinement taps and F1 = 0.87 without refinement) in a seated table scenario. We conclude with a series of applications that complement hand tracking with touch input and that are uniquely enabled by TapID, including UI control, rapid keyboard typing and piano playing, as well as surface gestures.",
"fno": "255600a519",
"keywords": [
"Human Computer Interaction",
"Object Detection",
"User Interfaces",
"Virtual Reality",
"Wearable Sensors",
"UI Control",
"Rapid Keyboard Typing",
"Piano Playing",
"Rapid Touch Interaction",
"Wearable Sensing",
"Virtual Reality Systems",
"VR",
"Controller",
"Wrist Based Inertial Sensing System",
"Inertial Sensors",
"Surface Touch",
"Event Detection Accuracy",
"Hand Agnostic Finger Identification Accuracy",
"Tap ID",
"Cameras",
"Free Hand Mid Air Interaction",
"Headset Tracked Hand Poses",
"Flexible Strap",
"Surface Touch Event Detection",
"Complement Hand Tracking",
"Surface Gestures",
"Wrist",
"Productivity",
"Three Dimensional Displays",
"Wearable Computers",
"Keyboards",
"Virtual Reality",
"User Interfaces",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing Human Computer Interaction HCI Interaction Techniques Gestural Input"
],
"authors": [
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Manuel Meier",
"givenName": "Manuel",
"surname": "Meier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Paul Streli",
"givenName": "Paul",
"surname": "Streli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Andreas Fender",
"givenName": "Andreas",
"surname": "Fender",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zürich,Department of Computer Science,Switzerland",
"fullName": "Christian Holz",
"givenName": "Christian",
"surname": "Holz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "519-528",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a511",
"articleId": "1tuAKv1IB0s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a529",
"articleId": "1tuAMAuN6kU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bsn/2011/4431/0/4431a200",
"title": "Wireless Hand Gesture Capture through Wearable Passive Tag Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/bsn/2011/4431a200/12OmNBAIAOf",
"parentPublication": {
"id": "proceedings/bsn/2011/4431/0",
"title": "Wearable and Implantable Body Sensor Networks, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177390",
"title": "VTouch: Vision-enhanced interaction for large touch displays",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177390/12OmNrY3Lvv",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2007/1452/0/04373793",
"title": "Learning an Orchestra Conductor's Technique Using a Wearable Sensor Platform",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2007/04373793/12OmNscOU92",
"parentPublication": {
"id": "proceedings/iswc/2007/1452/0",
"title": "2007 11th IEEE International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/1999/0428/0/04280199",
"title": "The WristCam as Input Device",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/1999/04280199/12OmNwCJOYy",
"parentPublication": {
"id": "proceedings/iswc/1999/0428/0",
"title": "Digest of Papers. Third International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798863",
"title": "Poster: Wearable input device for smart glasses based on a wristband-type motion-aware touch panel",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798863/12OmNy5hRlX",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2014/4686/0/4686a705",
"title": "ZEBRA: Zero-Effort Bilateral Recurring Authentication",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2014/4686a705/12OmNzsJ7w6",
"parentPublication": {
"id": "proceedings/sp/2014/4686/0",
"title": "2014 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccps/2022/0967/0/096700a300",
"title": "Making Vibration-based On-body Interaction Robust",
"doi": null,
"abstractUrl": "/proceedings-article/iccps/2022/096700a300/1Et6j937eKc",
"parentPublication": {
"id": "proceedings/iccps/2022/0967/0",
"title": "2022 ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcoss/2020/4351/0/09183601",
"title": "HAWAD: Hand Washing Detection using Wrist Wearable Inertial Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/dcoss/2020/09183601/1mLMqYc3q80",
"parentPublication": {
"id": "proceedings/dcoss/2020/4351/0",
"title": "2020 16th International Conference on Distributed Computing in Sensor Systems (DCOSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/01/09400761",
"title": "MagAuth: Secure and Usable Two-Factor Authentication With Magnetic Wrist Wearables",
"doi": null,
"abstractUrl": "/journal/tm/2023/01/09400761/1sK2h8w8fAc",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a761",
"title": "Demonstrating the Use of Rapid Touch Interaction in Virtual Reality for Prolonged Interaction in Productivity Scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a761/1tnX9xsCTVC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyQYtf2",
"title": "2017 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAJ4peW",
"doi": "10.1109/3DV.2017.00027",
"title": "GSLAM: Initialization-Robust Monocular Visual SLAM via Global Structure-from-Motion",
"normalizedTitle": "GSLAM: Initialization-Robust Monocular Visual SLAM via Global Structure-from-Motion",
"abstract": "Many monocular visual SLAM algorithms are derived from incremental structure-from-motion (SfM) methods. This work proposes a novel monocular SLAM method which integrates recent advances made in global SfM. In particular, we present two main contributions to visual SLAM. First, we solve the visual odometry problem by a novel rank-1 matrix factorization technique which is more robust to the errors in map initialization. Second, we adopt a recent global SfM method for the pose-graph optimization, which leads to a multi-stage linear formulation and enables L1 optimization for better robustness to false loops. The combination of these two approaches generates more robust reconstruction and is significantly faster (4X) than recent state-of-the-art SLAM systems. We also present a new dataset recorded with ground truth camera motion in a Vicon motion capture room, and compare our method to prior systems on it and established benchmark datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many monocular visual SLAM algorithms are derived from incremental structure-from-motion (SfM) methods. This work proposes a novel monocular SLAM method which integrates recent advances made in global SfM. In particular, we present two main contributions to visual SLAM. First, we solve the visual odometry problem by a novel rank-1 matrix factorization technique which is more robust to the errors in map initialization. Second, we adopt a recent global SfM method for the pose-graph optimization, which leads to a multi-stage linear formulation and enables L1 optimization for better robustness to false loops. The combination of these two approaches generates more robust reconstruction and is significantly faster (4X) than recent state-of-the-art SLAM systems. We also present a new dataset recorded with ground truth camera motion in a Vicon motion capture room, and compare our method to prior systems on it and established benchmark datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many monocular visual SLAM algorithms are derived from incremental structure-from-motion (SfM) methods. This work proposes a novel monocular SLAM method which integrates recent advances made in global SfM. In particular, we present two main contributions to visual SLAM. First, we solve the visual odometry problem by a novel rank-1 matrix factorization technique which is more robust to the errors in map initialization. Second, we adopt a recent global SfM method for the pose-graph optimization, which leads to a multi-stage linear formulation and enables L1 optimization for better robustness to false loops. The combination of these two approaches generates more robust reconstruction and is significantly faster (4X) than recent state-of-the-art SLAM systems. We also present a new dataset recorded with ground truth camera motion in a Vicon motion capture room, and compare our method to prior systems on it and established benchmark datasets.",
"fno": "261001a155",
"keywords": [
"Cameras",
"Distance Measurement",
"Image Motion Analysis",
"Linear Programming",
"Matrix Decomposition",
"SLAM Robots",
"Visual Odometry Problem",
"Pose Graph Optimization",
"Multistage Linear Formulation",
"Ground Truth Camera Motion",
"Vicon Motion Capture Room",
"Initialization Robust Monocular Visual SLAM Algorithms",
"Rank 1 Matrix Factorization Technique",
"Incremental Global Structure From Motion Methods",
"Sf M Methods",
"L 1 Optimization",
"Cameras",
"Simultaneous Localization And Mapping",
"Optimization",
"Visual Odometry",
"Three Dimensional Displays",
"Robustness",
"Visualization",
"Initialization",
"SLAM",
"Global Structure From Motion"
],
"authors": [
{
"affiliation": null,
"fullName": "Chengzhou Tang",
"givenName": "Chengzhou",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Oliver Wang",
"givenName": "Oliver",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ping Tan",
"givenName": "Ping",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "155-164",
"year": "2017",
"issn": "2475-7888",
"isbn": "978-1-5386-2610-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "261001a145",
"articleId": "12OmNrHB1UR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "261001a165",
"articleId": "12OmNzTH0VQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c408",
"title": "Edge SLAM: Edge Points Based Monocular Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c408/12OmNCb3frz",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2011/0230/0/06167873",
"title": "Hand-Held Monocular SLAM Based on Line Segments",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2011/06167873/12OmNvStcIV",
"parentPublication": {
"id": "proceedings/imvip/2011/0230/0",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a350",
"title": "Monocular SLAM Algorithm Based on Improved Depth Map Estimation and Keyframe Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a350/12OmNyeECAZ",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a616",
"title": "Multi-planar Monocular Reconstruction of Manhattan Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a616/17D45XvMcbo",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a031",
"title": "EGO-SLAM: A Robust Monocular SLAM for Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a031/18j8QSyEfja",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a103",
"title": "Real-Time Monocular Visual SLAM by Combining Points and Lines",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a103/1cdORi5z7fa",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/4.803E306",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300a803",
"title": "Estimation of Absolute Scale in Monocular SLAM Using Synthetic Data",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300a803/1i5mGYlfA2s",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523838",
"title": "Instant Visual Odometry Initialization for Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523838/1wpqsbFen3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyugyQd",
"title": "Image and Video Technology, Pacific-Rim Symposium on",
"acronym": "psivt",
"groupId": "1800241",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB6UIc9",
"doi": "10.1109/PSIVT.2010.27",
"title": "Modeling of Unbounded Long-Range Drift in Visual Odometry",
"normalizedTitle": "Modeling of Unbounded Long-Range Drift in Visual Odometry",
"abstract": "Visual odometry is a new navigation technology using video data. For long-range navigation, an intrinsic problem of visual odometry is the appearance of drift. The drift is caused by error accumulation, as visual odometry is based on relative measurements, and will grow unboundedly with time. The paper first reviews algorithms which adopt various methods to suppress this drift. However, as far as we know, no work has been done to statistically model and analyze the intrinsic properties of this drift. This paper uses an unbounded system model to represent the drift behavior of visual odometry. The model is composed of an unbounded deterministic part with unknown constant parameters, and a first-order Gauss-Markov process. A simple scheme is given to identify the unknown parameters as well as the statistics of the stochastic part from experimental data. Experiments and discussions are also provided.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual odometry is a new navigation technology using video data. For long-range navigation, an intrinsic problem of visual odometry is the appearance of drift. The drift is caused by error accumulation, as visual odometry is based on relative measurements, and will grow unboundedly with time. The paper first reviews algorithms which adopt various methods to suppress this drift. However, as far as we know, no work has been done to statistically model and analyze the intrinsic properties of this drift. This paper uses an unbounded system model to represent the drift behavior of visual odometry. The model is composed of an unbounded deterministic part with unknown constant parameters, and a first-order Gauss-Markov process. A simple scheme is given to identify the unknown parameters as well as the statistics of the stochastic part from experimental data. Experiments and discussions are also provided.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual odometry is a new navigation technology using video data. For long-range navigation, an intrinsic problem of visual odometry is the appearance of drift. The drift is caused by error accumulation, as visual odometry is based on relative measurements, and will grow unboundedly with time. The paper first reviews algorithms which adopt various methods to suppress this drift. However, as far as we know, no work has been done to statistically model and analyze the intrinsic properties of this drift. This paper uses an unbounded system model to represent the drift behavior of visual odometry. The model is composed of an unbounded deterministic part with unknown constant parameters, and a first-order Gauss-Markov process. A simple scheme is given to identify the unknown parameters as well as the statistics of the stochastic part from experimental data. Experiments and discussions are also provided.",
"fno": "4285a121",
"keywords": [
"Navigation",
"Visual Odometry",
"Long Range Drift"
],
"authors": [
{
"affiliation": null,
"fullName": "Ruyi Jiang",
"givenName": "Ruyi",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Reinhard Klette",
"givenName": "Reinhard",
"surname": "Klette",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shigang Wang",
"givenName": "Shigang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "psivt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "121-126",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4285-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4285a115",
"articleId": "12OmNC4eSrR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4285a127",
"articleId": "12OmNscxj1x",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2011/4362/0/4362a086",
"title": "Visual Odometry Using 3-Dimensional Video Input",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a086/12OmNA0vnQM",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995463",
"title": "High-precision localization using visual landmarks fused with range data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995463/12OmNC1Y5mS",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rvsp/2011/4581/0/4581a081",
"title": "Underwater Feature Based Visual Odometry - Experiment and Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/rvsp/2011/4581a081/12OmNyKJiiw",
"parentPublication": {
"id": "proceedings/rvsp/2011/4581/0",
"title": "International Conference on Robot, Vision and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a340",
"title": "Unsupervised Learning of Monocular Depth Estimation and Visual Odometry with Deep Feature Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a340/17D45WB0qcN",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2018/6481/0/648101a360",
"title": "Indoor Localization in Dynamic Human Environments Using Visual Odometry and Global Pose Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2018/648101a360/17D45Wc1IL0",
"parentPublication": {
"id": "proceedings/crv/2018/6481/0",
"title": "2018 15th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f530",
"title": "MBA-VO: Motion Blur Aware Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f530/1BmJbIoIPug",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a443",
"title": "Multi-Spectral Visual Odometry without Explicit Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a443/1ezRDaaunNm",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150639",
"title": "Dynamic Attention-based Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150639/1lPHvonwPTO",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispds/2020/9668/0/966800a005",
"title": "Self-Supervised Learning of Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/ispds/2020/966800a005/1oRiVfl6TaU",
"parentPublication": {
"id": "proceedings/ispds/2020/9668/0",
"title": "2020 International Conference on Information Science, Parallel and Distributed Systems (ISPDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCf1Dph",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxbEtOb",
"doi": "10.1109/SIBGRAPI.2014.13",
"title": "A Fast Feature Tracking Algorithm for Visual Odometry and Mapping Based on RGB-D Sensors",
"normalizedTitle": "A Fast Feature Tracking Algorithm for Visual Odometry and Mapping Based on RGB-D Sensors",
"abstract": "The recent introduction of low cost sensors such as the Kinect allows the design of real-time applications (i.e. for Robotics) that exploit novel capabilities. One such application is Visual Odometry, a fundamental module of any robotic platform that uses the synchronized color/depth streams captured by these devices to build a map representation of the environment at the same that the robot is localized within the map. Aiming to minimize error accumulation inherent to the process of robot localization, we design a visual feature tracker that works as the front-end of a Visual Odometry system for RGB-D sensors. Feature points are added to the tracker selectively based on pre-specified criteria such as the number of currently active points and their spatial distribution throughout the image. Our proposal is a tracking strategy that allows real-time camera pose computation (average of 24.847 ms per frame) despite the fact that no specialized hardware (such as modern GPUs) is employed. Experiments carried out on publicly available benchmark and datasets demonstrate the usefulness of the method, which achieved RMSE rates superior to the state-of-the-art RGB-D SLAM algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The recent introduction of low cost sensors such as the Kinect allows the design of real-time applications (i.e. for Robotics) that exploit novel capabilities. One such application is Visual Odometry, a fundamental module of any robotic platform that uses the synchronized color/depth streams captured by these devices to build a map representation of the environment at the same that the robot is localized within the map. Aiming to minimize error accumulation inherent to the process of robot localization, we design a visual feature tracker that works as the front-end of a Visual Odometry system for RGB-D sensors. Feature points are added to the tracker selectively based on pre-specified criteria such as the number of currently active points and their spatial distribution throughout the image. Our proposal is a tracking strategy that allows real-time camera pose computation (average of 24.847 ms per frame) despite the fact that no specialized hardware (such as modern GPUs) is employed. Experiments carried out on publicly available benchmark and datasets demonstrate the usefulness of the method, which achieved RMSE rates superior to the state-of-the-art RGB-D SLAM algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The recent introduction of low cost sensors such as the Kinect allows the design of real-time applications (i.e. for Robotics) that exploit novel capabilities. One such application is Visual Odometry, a fundamental module of any robotic platform that uses the synchronized color/depth streams captured by these devices to build a map representation of the environment at the same that the robot is localized within the map. Aiming to minimize error accumulation inherent to the process of robot localization, we design a visual feature tracker that works as the front-end of a Visual Odometry system for RGB-D sensors. Feature points are added to the tracker selectively based on pre-specified criteria such as the number of currently active points and their spatial distribution throughout the image. Our proposal is a tracking strategy that allows real-time camera pose computation (average of 24.847 ms per frame) despite the fact that no specialized hardware (such as modern GPUs) is employed. Experiments carried out on publicly available benchmark and datasets demonstrate the usefulness of the method, which achieved RMSE rates superior to the state-of-the-art RGB-D SLAM algorithm.",
"fno": "4258a227",
"keywords": [
"Visualization",
"Cameras",
"Feature Extraction",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"RGB D Sensors",
"Visual Odometry",
"Feature Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Bruno Marques Ferreira da Silva",
"givenName": "Bruno Marques Ferreira da",
"surname": "Silva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luiz Marcos Garcia Goncalves",
"givenName": "Luiz Marcos Garcia",
"surname": "Goncalves",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "227-234",
"year": "2014",
"issn": "1530-1834",
"isbn": "978-1-4799-4258-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4258a220",
"articleId": "12OmNzSyChl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4258a235",
"articleId": "12OmNxd4tou",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a018",
"title": "σ-DVO: Sensor Noise Model Meets Dense Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a018/12OmNCwUmxA",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a452",
"title": "Planes Detection for Robust Localization and Mapping in RGB-D SLAM Systems",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a452/12OmNqH9hdY",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d934",
"title": "Robust RGB-D Odometry Using Point and Line Features",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d934/12OmNro0HZg",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d271",
"title": "Online Reconstruction of Indoor Scenes from RGB-D Streams",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d271/12OmNxj238p",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256",
"title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA",
"parentPublication": {
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0",
"title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005867",
"title": "Processed RGB-D Slam Using Open-Source Software",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005867/17D45XfSEUE",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icectt/2019/0042/0/004200a154",
"title": "An Adaptive Visual Odometry Based on RGB-D Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/icectt/2019/004200a154/1bolRwl5L5C",
"parentPublication": {
"id": "proceedings/icectt/2019/0042/0",
"title": "2019 4th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e928",
"title": "Information-Driven Direct RGB-D Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e928/1m3osoTCN44",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a252",
"title": "Comparison of RGB-D sensors for 3D reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a252/1oZBBkWR9aU",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1AH82uivdwA",
"title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)",
"acronym": "icbase",
"groupId": "1841125",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AH8aY1BiV2",
"doi": "10.1109/ICBASE53849.2021.00129",
"title": "Stereo Visual Odometry with Information Enhancement at Feature Points",
"normalizedTitle": "Stereo Visual Odometry with Information Enhancement at Feature Points",
"abstract": "The visual odometry based on the feature point method is solved by matching spatial points with pixel points using the PnP algorithm when calculating the pose change between the front and back frames. Accurate spatial location of feature points plays an important role in the process of visual odometry calculation. Incorrectly positioned space points greatly affect the performance of the visual odometer. The depth value calculation method based on deep learning can effectively improve the accuracy of spatial point locations, however, this method does not focus on the correctness of depth values at feature points. This paper proposes a way to integrate deep learning and traditional approach to construct a stereo VO system which pays attention on the feature points’ depth to improve the accuracy of the visual odometry indirectly. Specifically, the training process is divided into two phases. The first phase trains a stereo matching network using a binocular dataset to obtain the initial model of the network. In the second stage, a feature extraction network is added to obtain a feature point mask from the extracted feature points, and a loss function is built using the mask, while a reprojection loss function is built using the poses values. The two loss functions are added to the first stage loss function during the second stage training. Finally, the trained stereo matching network is used to generate the depth values, and the matched feature points are obtained using the feature matching network, and the stereo visual odometry is constructed by calculating the relative pose between the former and latter frames through the PnP algorithm. Extensive experiments on the KITTI dataset show the robustness of our system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The visual odometry based on the feature point method is solved by matching spatial points with pixel points using the PnP algorithm when calculating the pose change between the front and back frames. Accurate spatial location of feature points plays an important role in the process of visual odometry calculation. Incorrectly positioned space points greatly affect the performance of the visual odometer. The depth value calculation method based on deep learning can effectively improve the accuracy of spatial point locations, however, this method does not focus on the correctness of depth values at feature points. This paper proposes a way to integrate deep learning and traditional approach to construct a stereo VO system which pays attention on the feature points’ depth to improve the accuracy of the visual odometry indirectly. Specifically, the training process is divided into two phases. The first phase trains a stereo matching network using a binocular dataset to obtain the initial model of the network. In the second stage, a feature extraction network is added to obtain a feature point mask from the extracted feature points, and a loss function is built using the mask, while a reprojection loss function is built using the poses values. The two loss functions are added to the first stage loss function during the second stage training. Finally, the trained stereo matching network is used to generate the depth values, and the matched feature points are obtained using the feature matching network, and the stereo visual odometry is constructed by calculating the relative pose between the former and latter frames through the PnP algorithm. Extensive experiments on the KITTI dataset show the robustness of our system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The visual odometry based on the feature point method is solved by matching spatial points with pixel points using the PnP algorithm when calculating the pose change between the front and back frames. Accurate spatial location of feature points plays an important role in the process of visual odometry calculation. Incorrectly positioned space points greatly affect the performance of the visual odometer. The depth value calculation method based on deep learning can effectively improve the accuracy of spatial point locations, however, this method does not focus on the correctness of depth values at feature points. This paper proposes a way to integrate deep learning and traditional approach to construct a stereo VO system which pays attention on the feature points’ depth to improve the accuracy of the visual odometry indirectly. Specifically, the training process is divided into two phases. The first phase trains a stereo matching network using a binocular dataset to obtain the initial model of the network. In the second stage, a feature extraction network is added to obtain a feature point mask from the extracted feature points, and a loss function is built using the mask, while a reprojection loss function is built using the poses values. The two loss functions are added to the first stage loss function during the second stage training. Finally, the trained stereo matching network is used to generate the depth values, and the matched feature points are obtained using the feature matching network, and the stereo visual odometry is constructed by calculating the relative pose between the former and latter frames through the PnP algorithm. Extensive experiments on the KITTI dataset show the robustness of our system.",
"fno": "270900a654",
"keywords": [
"Deep Learning Artificial Intelligence",
"Distance Measurement",
"Feature Extraction",
"Image Matching",
"Pose Estimation",
"Stereo Image Processing",
"Stereo Visual Odometry",
"Spatial Points",
"Pixel Points",
"Spatial Location",
"Visual Odometry Calculation",
"Space Points",
"Depth Value Calculation Method",
"Deep Learning",
"Spatial Point Locations",
"Feature Extraction Network",
"Feature Point Mask",
"Loss Function",
"Trained Stereo Matching Network",
"Matched Feature Points",
"Feature Matching Network",
"Training",
"Deep Learning",
"Visualization",
"Feature Extraction",
"Robustness",
"Visual Odometry",
"Software Engineering",
"Stereo Visual Odometry",
"Deep Learning",
"Feature Points",
"Stereo Matching Network"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology,School of Computer Science & Technology,Beijing,China",
"fullName": "Siyu Liu",
"givenName": "Siyu",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology,School of Computer Science & Technology,Beijing,China",
"fullName": "Bo Ma",
"givenName": "Bo",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icbase",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "654-661",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2709-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "270900a649",
"articleId": "1AH86qesIW4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "270900a662",
"articleId": "1AH8jHnulfG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2011/4362/0/4362a086",
"title": "Visual Odometry Using 3-Dimensional Video Input",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a086/12OmNA0vnQM",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcecs/2008/3555/0/3555a150",
"title": "Stereo Visual Odometry for Mobile Robots on Uneven Terrain",
"doi": null,
"abstractUrl": "/proceedings-article/wcecs/2008/3555a150/12OmNAlvHB8",
"parentPublication": {
"id": "proceedings/wcecs/2008/3555/0",
"title": "World Congress on Engineering and Computer Science, Advances in Electrical and Electronics Engineering - IAENG Special Edition of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d923",
"title": "Stereo DSO: Large-Scale Direct Sparse Visual Odometry with Stereo Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d923/12OmNApLGr1",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a340",
"title": "Unsupervised Learning of Monocular Depth Estimation and Visual Odometry with Deep Feature Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a340/17D45WB0qcN",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a244",
"title": "Robust Multi-scale ORB Algorithm in Real-Time Monocular Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a244/17D45WKWnJM",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2018/6481/0/648101a360",
"title": "Indoor Localization in Dynamic Human Environments Using Visual Odometry and Global Pose Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2018/648101a360/17D45Wc1IL0",
"parentPublication": {
"id": "proceedings/crv/2018/6481/0",
"title": "2018 15th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f530",
"title": "MBA-VO: Motion Blur Aware Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f530/1BmJbIoIPug",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086694",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a443",
"title": "Multi-Spectral Visual Odometry without Explicit Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a443/1ezRDaaunNm",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hQqtAaoUes",
"doi": "10.1109/ICCV.2019.00440",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"normalizedTitle": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"abstract": "In this paper we tackle the joint learning problem of keyframe detection and visual odometry towards monocular visual SLAM systems. As an important task in visual SLAM, keyframe selection helps efficient camera relocalization and effective augmentation of visual odometry. To benefit from it, we first present a deep network design for the keyframe selection, which is able to reliably detect keyframes and localize new frames, then an end-to-end unsupervised deep framework further proposed for simultaneously learning the keyframe selection and the visual odometry tasks. As far as we know, it is the first work to jointly optimize these two complementary tasks in a single deep framework. To make the two tasks facilitate each other in the learning, a collaborative optimization loss based on both geometric and visual metrics is proposed. Extensive experiments on publicly available datasets (i.e. KITTI raw dataset and its odometry split [12]) clearly demonstrate the effectiveness of the proposed approach, and new state-ofthe-art results are established on the unsupervised depth and pose estimation from monocular video.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we tackle the joint learning problem of keyframe detection and visual odometry towards monocular visual SLAM systems. As an important task in visual SLAM, keyframe selection helps efficient camera relocalization and effective augmentation of visual odometry. To benefit from it, we first present a deep network design for the keyframe selection, which is able to reliably detect keyframes and localize new frames, then an end-to-end unsupervised deep framework further proposed for simultaneously learning the keyframe selection and the visual odometry tasks. As far as we know, it is the first work to jointly optimize these two complementary tasks in a single deep framework. To make the two tasks facilitate each other in the learning, a collaborative optimization loss based on both geometric and visual metrics is proposed. Extensive experiments on publicly available datasets (i.e. KITTI raw dataset and its odometry split [12]) clearly demonstrate the effectiveness of the proposed approach, and new state-ofthe-art results are established on the unsupervised depth and pose estimation from monocular video.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we tackle the joint learning problem of keyframe detection and visual odometry towards monocular visual SLAM systems. As an important task in visual SLAM, keyframe selection helps efficient camera relocalization and effective augmentation of visual odometry. To benefit from it, we first present a deep network design for the keyframe selection, which is able to reliably detect keyframes and localize new frames, then an end-to-end unsupervised deep framework further proposed for simultaneously learning the keyframe selection and the visual odometry tasks. As far as we know, it is the first work to jointly optimize these two complementary tasks in a single deep framework. To make the two tasks facilitate each other in the learning, a collaborative optimization loss based on both geometric and visual metrics is proposed. Extensive experiments on publicly available datasets (i.e. KITTI raw dataset and its odometry split [12]) clearly demonstrate the effectiveness of the proposed approach, and new state-ofthe-art results are established on the unsupervised depth and pose estimation from monocular video.",
"fno": "4.803E306",
"keywords": [
"Cameras",
"Distance Measurement",
"Optimisation",
"Pose Estimation",
"SLAM Robots",
"Unsupervised Learning",
"Video Signal Processing",
"End To End Unsupervised Deep Framework",
"Keyframe Selection",
"Visual Odometry Tasks",
"Geometric Metrics",
"Visual Metrics",
"Unsupervised Collaborative Learning",
"Keyframe Detection",
"Monocular Deep SLAM",
"Joint Learning Problem",
"Monocular Visual SLAM Systems",
"Deep Network Design",
"Odometry Split",
"Pose Estimation",
"Monocular Video",
"Collaborative Optimization Loss",
"Visual Odometry",
"Visualization",
"Simultaneous Localization And Mapping",
"Task Analysis",
"Cameras",
"Machine Learning",
"Optimization"
],
"authors": [
{
"affiliation": "Beihang University",
"fullName": "Lu Sheng",
"givenName": "Lu",
"surname": "Sheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Oxford",
"fullName": "Dan Xu",
"givenName": "Dan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Sydney",
"fullName": "Wanli Ouyang",
"givenName": "Wanli",
"surname": "Ouyang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chinese University of Hong Kong. Hong Kong",
"fullName": "Xiaogang Wang",
"givenName": "Xiaogang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": false,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "4301-4310",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4.803E295",
"articleId": "1hQqlte2pd6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300e311",
"articleId": "1hVlyjsBgTS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a155",
"title": "GSLAM: Initialization-Robust Monocular Visual SLAM via Global Structure-from-Motion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a155/12OmNAJ4peW",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b449",
"title": "Semi-dense Visual Odometry for a Monocular Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b449/12OmNC1oT51",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a001",
"title": "Robust Keyframe-based Monocular SLAM for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a001/12OmNx57HK9",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836532",
"title": "Robust Keyframe-Based Monocular SLAM for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836532/12OmNx5GU8K",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a350",
"title": "Monocular SLAM Algorithm Based on Improved Depth Map Estimation and Keyframe Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a350/12OmNyeECAZ",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404531",
"title": "Global Localization from Monocular SLAM on a Mobile Phone",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404531/13rRUwdrdSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e928",
"title": "Information-Driven Direct RGB-D Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e928/1m3osoTCN44",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523838",
"title": "Instant Visual Odometry Initialization for Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523838/1wpqsbFen3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1kpIDb9Begw",
"title": "2020 17th Conference on Computer and Robot Vision (CRV)",
"acronym": "crv",
"groupId": "1001794",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kpIGiAFaYo",
"doi": "10.1109/CRV50864.2020.00018",
"title": "Depth Prediction for Monocular Direct Visual Odometry",
"normalizedTitle": "Depth Prediction for Monocular Direct Visual Odometry",
"abstract": "Depth prediction from monocular images with deep CNNs is a topic of increasing interest to the community. Advances have lead to models capable of predicting disparity maps with consistent scale, which are an acceptable prior for gradient-based direct methods. With this in consideration, we exploit depth prediction as a candidate prior for the coarse initialization, tracking, and marginalization steps of the direct visual odometry system, enabling the second-order optimizer to converge faster into a precise global minimum. In addition, the given depth prior supports large baseline stereo scenarios, maintaining robust pose estimations against challenging motion states such as in-place rotation. We further refine our pose estimation with semi-online loop closure. The experiments on KITTI demonstrate that our proposed method achieves state- of-the-art performance compared to both traditional direct visual odometry and learning-based counterparts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Depth prediction from monocular images with deep CNNs is a topic of increasing interest to the community. Advances have lead to models capable of predicting disparity maps with consistent scale, which are an acceptable prior for gradient-based direct methods. With this in consideration, we exploit depth prediction as a candidate prior for the coarse initialization, tracking, and marginalization steps of the direct visual odometry system, enabling the second-order optimizer to converge faster into a precise global minimum. In addition, the given depth prior supports large baseline stereo scenarios, maintaining robust pose estimations against challenging motion states such as in-place rotation. We further refine our pose estimation with semi-online loop closure. The experiments on KITTI demonstrate that our proposed method achieves state- of-the-art performance compared to both traditional direct visual odometry and learning-based counterparts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Depth prediction from monocular images with deep CNNs is a topic of increasing interest to the community. Advances have lead to models capable of predicting disparity maps with consistent scale, which are an acceptable prior for gradient-based direct methods. With this in consideration, we exploit depth prediction as a candidate prior for the coarse initialization, tracking, and marginalization steps of the direct visual odometry system, enabling the second-order optimizer to converge faster into a precise global minimum. In addition, the given depth prior supports large baseline stereo scenarios, maintaining robust pose estimations against challenging motion states such as in-place rotation. We further refine our pose estimation with semi-online loop closure. The experiments on KITTI demonstrate that our proposed method achieves state- of-the-art performance compared to both traditional direct visual odometry and learning-based counterparts.",
"fno": "09108693",
"keywords": [
"Convolutional Neural Nets",
"Image Classification",
"Learning Artificial Intelligence",
"Mobile Robots",
"Optimisation",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Stereo Image Processing",
"Depth Prediction",
"Monocular Direct Visual Odometry",
"Monocular Images",
"Gradient Based Direct Methods",
"Direct Visual Odometry System",
"Direct Visual Odometry",
"Tracking Loops",
"Simultaneous Localization And Mapping",
"Tracking",
"Pose Estimation",
"Pipelines",
"Predictive Models",
"Rendering Computer Graphics",
"Depth Prediction",
"Visual Odometry",
"Deep Learning",
"Visual SLAM"
],
"authors": [
{
"affiliation": "McGill University,Computer Science",
"fullName": "Ran Cheng",
"givenName": "Ran",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toronto,Engineering Science",
"fullName": "Christopher Agia",
"givenName": "Christopher",
"surname": "Agia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "McGill University,Computer Science",
"fullName": "David Meger",
"givenName": "David",
"surname": "Meger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "McGill University,Computer Science",
"fullName": "Gregory Dudek",
"givenName": "Gregory",
"surname": "Dudek",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "crv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-05-01T00:00:00",
"pubType": "proceedings",
"pages": "70-77",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9891-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09108678",
"articleId": "1kpIEkZx8eQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09108679",
"articleId": "1kpIFI2nMti",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948420",
"title": "Semi-dense visual odometry for AR on a smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948420/12OmNAXPykk",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032d923",
"title": "Stereo DSO: Large-Scale Direct Sparse Visual Odometry with Stereo Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d923/12OmNApLGr1",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b449",
"title": "Semi-dense Visual Odometry for a Monocular Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b449/12OmNC1oT51",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/4.803E306",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-dsci-smartcns/2019/5209/0/520900a422",
"title": "A Lifted Semi-Direct Monocular Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-dsci-smartcns/2019/520900a422/1hgrUtcCcus",
"parentPublication": {
"id": "proceedings/iucc-dsci-smartcns/2019/5209/0",
"title": "2019 IEEE International Conferences on Ubiquitous Computing & Communications (IUCC) and Data Science and Computational Intelligence (DSCI) and Smart Computing, Networking and Services (SmartCNS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b278",
"title": "D3VO: Deep Depth, Deep Pose and Deep Uncertainty for Monocular Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b278/1m3neRj6c1O",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523838",
"title": "Instant Visual Odometry Initialization for Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523838/1wpqsbFen3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2021/3960/0/396000a464",
"title": "Visual Odometry integrated with Self-Supervised Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2021/396000a464/1xqyLky3tSM",
"parentPublication": {
"id": "proceedings/icceai/2021/3960/0",
"title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlise/2021/1736/0/173600a425",
"title": "Spatial and Temporal Monocular Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/mlise/2021/173600a425/1yOW4ibH7by",
"parentPublication": {
"id": "proceedings/mlise/2021/1736/0",
"title": "2021 International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1lPGXn8hEiI",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1lPHvonwPTO",
"doi": "10.1109/CVPRW50498.2020.00026",
"title": "Dynamic Attention-based Visual Odometry",
"normalizedTitle": "Dynamic Attention-based Visual Odometry",
"abstract": "This paper proposes a dynamic attention-based visual odometry framework (DAVO), a learning-based VO method, for estimating the ego-motion of a monocular camera. DAVO dynamically adjusts the attention weights on different semantic categories for different motion scenarios based on optical flow maps. These weighted semantic categories can then be used to generate attention maps that highlight the relative importance of different semantic regions in input frames for pose estimation. In order to examine the proposed DAVO, we perform a number of experiments on the KITTI Visual Odometry and SLAM benchmark suite to quantitatively and qualitatively inspect the impacts of the dynamically adjusted weights on the accuracy of the evaluated trajectories. Moreover, we design a set of ablation analyses to justify each of our design choices, and validate the effectiveness as well as the advantages of DAVO. Our experiments on the KITTI dataset shows that the proposed DAVO framework does provide satisfactory performance in ego-motion estimation, and is able deliver competitive performance when compared to the contemporary VO methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a dynamic attention-based visual odometry framework (DAVO), a learning-based VO method, for estimating the ego-motion of a monocular camera. DAVO dynamically adjusts the attention weights on different semantic categories for different motion scenarios based on optical flow maps. These weighted semantic categories can then be used to generate attention maps that highlight the relative importance of different semantic regions in input frames for pose estimation. In order to examine the proposed DAVO, we perform a number of experiments on the KITTI Visual Odometry and SLAM benchmark suite to quantitatively and qualitatively inspect the impacts of the dynamically adjusted weights on the accuracy of the evaluated trajectories. Moreover, we design a set of ablation analyses to justify each of our design choices, and validate the effectiveness as well as the advantages of DAVO. Our experiments on the KITTI dataset shows that the proposed DAVO framework does provide satisfactory performance in ego-motion estimation, and is able deliver competitive performance when compared to the contemporary VO methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a dynamic attention-based visual odometry framework (DAVO), a learning-based VO method, for estimating the ego-motion of a monocular camera. DAVO dynamically adjusts the attention weights on different semantic categories for different motion scenarios based on optical flow maps. These weighted semantic categories can then be used to generate attention maps that highlight the relative importance of different semantic regions in input frames for pose estimation. In order to examine the proposed DAVO, we perform a number of experiments on the KITTI Visual Odometry and SLAM benchmark suite to quantitatively and qualitatively inspect the impacts of the dynamically adjusted weights on the accuracy of the evaluated trajectories. Moreover, we design a set of ablation analyses to justify each of our design choices, and validate the effectiveness as well as the advantages of DAVO. Our experiments on the KITTI dataset shows that the proposed DAVO framework does provide satisfactory performance in ego-motion estimation, and is able deliver competitive performance when compared to the contemporary VO methods.",
"fno": "09150639",
"keywords": [
"Distance Measurement",
"Learning Artificial Intelligence",
"Mobile Robots",
"Motion Estimation",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Dynamic Attention Based Visual Odometry Framework",
"Learning Based VO Method",
"Monocular Camera",
"Attention Weights",
"Optical Flow Maps",
"Weighted Semantic Categories",
"Attention Maps",
"Pose Estimation",
"DAVO Framework",
"Ego Motion Estimation",
"KITTI Visual Odometry",
"SLAM Benchmark Suite",
"Semantics",
"Cameras",
"Trajectory",
"Pose Estimation",
"Visual Odometry",
"Dynamics"
],
"authors": [
{
"affiliation": "National Tsing Hua University,Elsa Lab, Department of Computer Science,Hsinchu,Taiwan",
"fullName": "Xin-Yu Kuo",
"givenName": "Xin-Yu",
"surname": "Kuo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Elsa Lab, Department of Computer Science,Hsinchu,Taiwan",
"fullName": "Chien Liu",
"givenName": "Chien",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Elsa Lab, Department of Computer Science,Hsinchu,Taiwan",
"fullName": "Kai-Chen Lin",
"givenName": "Kai-Chen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Elsa Lab, Department of Computer Science,Hsinchu,Taiwan",
"fullName": "Chun-Yi Lee",
"givenName": "Chun-Yi",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "160-169",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9360-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09150814",
"articleId": "1lPGXVNkI5a",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09150866",
"articleId": "1lPHryqz3uE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2011/4362/0/4362a086",
"title": "Visual Odometry Using 3-Dimensional Video Input",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2011/4362a086/12OmNA0vnQM",
"parentPublication": {
"id": "proceedings/crv/2011/4362/0",
"title": "2011 Canadian Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a239",
"title": "Real-Time Visual Odometry Based on Optical Flow and Depth Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a239/12OmNC1Y5k3",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2017/2818/0/2818a314",
"title": "Night Rider: Visual Odometry Using Headlights",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2017/2818a314/12OmNxUv6g8",
"parentPublication": {
"id": "proceedings/crv/2017/2818/0",
"title": "2017 14th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2018/6580/0/658001a048",
"title": "The Comparison between FTF-VO and MF-VO for High Accuracy Mobile Robot Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2018/658001a048/12OmNz2kqeN",
"parentPublication": {
"id": "proceedings/icris/2018/6580/0",
"title": "2018 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a340",
"title": "Unsupervised Learning of Monocular Depth Estimation and Visual Odometry with Deep Feature Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a340/17D45WB0qcN",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbase/2021/2709/0/270900a654",
"title": "Stereo Visual Odometry with Information Enhancement at Feature Points",
"doi": null,
"abstractUrl": "/proceedings-article/icbase/2021/270900a654/1AH8aY1BiV2",
"parentPublication": {
"id": "proceedings/icbase/2021/2709/0",
"title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f530",
"title": "MBA-VO: Motion Blur Aware Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f530/1BmJbIoIPug",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086694",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispds/2020/9668/0/966800a005",
"title": "Self-Supervised Learning of Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/ispds/2020/966800a005/1oRiVfl6TaU",
"parentPublication": {
"id": "proceedings/ispds/2020/9668/0",
"title": "2020 International Conference on Information Science, Parallel and Distributed Systems (ISPDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900n3179",
"title": "Generalizing to the Open World: Deep Visual Odometry with Online Adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900n3179/1yeJYMhbwlO",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3osoTCN44",
"doi": "10.1109/CVPR42600.2020.00498",
"title": "Information-Driven Direct RGB-D Odometry",
"normalizedTitle": "Information-Driven Direct RGB-D Odometry",
"abstract": "This paper presents an information-theoretic approach to point selection in direct RGB-D odometry. The aim is to select only the most informative measurements, in order to reduce the optimization problem with a minimal impact in the accuracy. It is usual practice in visual odometry/SLAM to track several hundreds of points, achieving real-time performance in high-end desktop PCs. Reducing their computational footprint will facilitate the implementation of odometry and SLAM in low-end platforms such as small robots and AR/VR glasses. Our experimental results show that our novel information-based selection criterion allows us to reduce the number of tracked points an order of magnitude (down to only 24 of them), achieving an accuracy similar to the state of the art (sometimes outperforming it) while reducing 10 times the computational demand.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an information-theoretic approach to point selection in direct RGB-D odometry. The aim is to select only the most informative measurements, in order to reduce the optimization problem with a minimal impact in the accuracy. It is usual practice in visual odometry/SLAM to track several hundreds of points, achieving real-time performance in high-end desktop PCs. Reducing their computational footprint will facilitate the implementation of odometry and SLAM in low-end platforms such as small robots and AR/VR glasses. Our experimental results show that our novel information-based selection criterion allows us to reduce the number of tracked points an order of magnitude (down to only 24 of them), achieving an accuracy similar to the state of the art (sometimes outperforming it) while reducing 10 times the computational demand.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an information-theoretic approach to point selection in direct RGB-D odometry. The aim is to select only the most informative measurements, in order to reduce the optimization problem with a minimal impact in the accuracy. It is usual practice in visual odometry/SLAM to track several hundreds of points, achieving real-time performance in high-end desktop PCs. Reducing their computational footprint will facilitate the implementation of odometry and SLAM in low-end platforms such as small robots and AR/VR glasses. Our experimental results show that our novel information-based selection criterion allows us to reduce the number of tracked points an order of magnitude (down to only 24 of them), achieving an accuracy similar to the state of the art (sometimes outperforming it) while reducing 10 times the computational demand.",
"fno": "716800e928",
"keywords": [
"Distance Measurement",
"Feature Selection",
"Image Colour Analysis",
"Information Theory",
"Mobile Robots",
"Object Tracking",
"Optimisation",
"Robot Vision",
"SLAM Robots",
"SLAM",
"Visual Odometry",
"Point Selection",
"Information Based Selection Criterion",
"Tracked Points",
"Desktop P Cs",
"Real Time Performance",
"Optimization Problem",
"Informative Measurements",
"Information Theoretic Approach",
"Information Driven Direct RGB D Odometry",
"Simultaneous Localization And Mapping",
"Cameras",
"Jacobian Matrices",
"Optimization",
"Entropy",
"Visual Odometry",
"Covariance Matrices"
],
"authors": [
{
"affiliation": "University of Zaragoza; German Aerospace Center (DLR)",
"fullName": "Alejandro Fontán",
"givenName": "Alejandro",
"surname": "Fontán",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Zaragoza",
"fullName": "Javier Civera",
"givenName": "Javier",
"surname": "Civera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Aerospace Center (DLR); Technical University of Munich",
"fullName": "Rudolph Triebel",
"givenName": "Rudolph",
"surname": "Triebel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4928-4936",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800e918",
"articleId": "1m3npYWpva0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800e937",
"articleId": "1m3neKOSAIo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391d934",
"title": "Robust RGB-D Odometry Using Point and Line Features",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d934/12OmNro0HZg",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a227",
"title": "A Fast Feature Tracking Algorithm for Visual Odometry and Mapping Based on RGB-D Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a227/12OmNxbEtOb",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256",
"title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA",
"parentPublication": {
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0",
"title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679500",
"title": "Plane Based Visual Odometry for Structural and Low-Texture Environments Using RGB-D Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679500/18Xknbsd7BS",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10086694",
"title": "SDV-LOAM: Semi-Direct Visual-LiDAR Odometry and Mapping",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10086694/1LUpwXZtAe4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/4.803E306",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08691513",
"title": "Unsupervised Deep Visual-Inertial Odometry with Online Error Correction for RGB-D Imagery",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08691513/1jeCTblwCMo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523838",
"title": "Instant Visual Odometry Initialization for Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523838/1wpqsbFen3G",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxRnvRU",
"title": "2015 Brazilian Conference on Intelligent Systems (BRACIS)",
"acronym": "bracis",
"groupId": "1803430",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBU1jDV",
"doi": "10.1109/BRACIS.2015.12",
"title": "An Occlusion Calculus Based on an Interval Algebra",
"normalizedTitle": "An Occlusion Calculus Based on an Interval Algebra",
"abstract": "This paper introduces a new qualitative spatial reasoning formalism, called Interval Occlusion Calculus (IOC), that takes into account multiple viewpoints of a scene. This formalism extends Allen's Algebra by including an interval-based definition for spatial occlusion. We prove that IOC is a relation algebra and show complexity results for this formalism.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a new qualitative spatial reasoning formalism, called Interval Occlusion Calculus (IOC), that takes into account multiple viewpoints of a scene. This formalism extends Allen's Algebra by including an interval-based definition for spatial occlusion. We prove that IOC is a relation algebra and show complexity results for this formalism.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a new qualitative spatial reasoning formalism, called Interval Occlusion Calculus (IOC), that takes into account multiple viewpoints of a scene. This formalism extends Allen's Algebra by including an interval-based definition for spatial occlusion. We prove that IOC is a relation algebra and show complexity results for this formalism.",
"fno": "0016a128",
"keywords": [
"Observers",
"Calculus",
"Algebra",
"Cognition",
"Complexity Theory",
"Image Segmentation",
"Intelligent Systems",
"Multiple Viewpoints",
"Allen Algebra",
"Qualitative Spatial Reasoning"
],
"authors": [
{
"affiliation": null,
"fullName": "Paulo E. Santos",
"givenName": "Paulo E.",
"surname": "Santos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gérard Ligozat",
"givenName": "Gérard",
"surname": "Ligozat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marjan Safi-Samghabad",
"givenName": "Marjan",
"surname": "Safi-Samghabad",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bracis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "128-133",
"year": "2015",
"issn": null,
"isbn": "978-1-5090-0016-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0016a122",
"articleId": "12OmNvStcvO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0016a134",
"articleId": "12OmNrMZpEu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/time/2009/3727/0/3727a141",
"title": "Interval Algebra Networks with Infinite Intervals",
"doi": null,
"abstractUrl": "/proceedings-article/time/2009/3727a141/12OmNB1wkLI",
"parentPublication": {
"id": "proceedings/time/2009/3727/0",
"title": "2009 16th International Symposium on Temporal Representation and Reasoning (TIME 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ims/1991/2205/0/00153694",
"title": "An algebra and calculus for relational multidatabase systems",
"doi": null,
"abstractUrl": "/proceedings-article/ims/1991/00153694/12OmNB9bvcx",
"parentPublication": {
"id": "proceedings/ims/1991/2205/0",
"title": "Proceedings First International Workshop on Interoperability in Multidatabase Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icci/1993/4212/0/00315346",
"title": "Interval-set algebra for qualitative knowledge representation",
"doi": null,
"abstractUrl": "/proceedings-article/icci/1993/00315346/12OmNBU1jJN",
"parentPublication": {
"id": "proceedings/icci/1993/4212/0",
"title": "Cognitive Informatics, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismvl/1989/1947/0/00037798",
"title": "Medium algebra MA and medium propositional calculus MP*",
"doi": null,
"abstractUrl": "/proceedings-article/ismvl/1989/00037798/12OmNsbGvG9",
"parentPublication": {
"id": "proceedings/ismvl/1989/1947/0",
"title": "Proceedings The Nineteenth International Symposium on Multiple-Valued Logic",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/time/2002/1474/0/14740028",
"title": "Extending the Point Algebra into the Qualitative Algebra",
"doi": null,
"abstractUrl": "/proceedings-article/time/2002/14740028/12OmNx965BV",
"parentPublication": {
"id": "proceedings/time/2002/1474/0",
"title": "Proceedings Ninth International Symposium on Temporal Representation and Reasoning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/1984/0533/0/07271312",
"title": "An entity-relationship algebra",
"doi": null,
"abstractUrl": "/proceedings-article/icde/1984/07271312/12OmNylboIn",
"parentPublication": {
"id": "proceedings/icde/1984/0533/0",
"title": "1984 IEEE First International Conference on Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/time/2013/2240/0/06786791",
"title": "Minimal Consistency Problem of Temporal Qualitative Constraint Networks",
"doi": null,
"abstractUrl": "/proceedings-article/time/2013/06786791/12OmNzt0IrN",
"parentPublication": {
"id": "proceedings/time/2013/2240/0",
"title": "2013 20th International Symposium on Temporal Representation and Reasoning (TIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0/666700a353",
"title": "A New Interval Algebra for Temporal Data Modeling and Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2021/666700a353/1BrAGQ3H9Fm",
"parentPublication": {
"id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0",
"title": "2021 20th International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2021/5841/0/584100a575",
"title": "Scratch-Style Relational Algebra and Calculus",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2021/584100a575/1EpLFOorneo",
"parentPublication": {
"id": "proceedings/csci/2021/5841/0",
"title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2019/3608/0/08785877",
"title": "Graphical Affine Algebra",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2019/08785877/1cdOo94Rtp6",
"parentPublication": {
"id": "proceedings/lics/2019/3608/0",
"title": "2019 34th Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrkjVbZ",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqH9hoT",
"doi": "10.1109/WACV.2015.88",
"title": "AR-Weapon: Live Augmented Reality Based First-Person Shooting System",
"normalizedTitle": "AR-Weapon: Live Augmented Reality Based First-Person Shooting System",
"abstract": "This paper introduces a user-worn Augmented Reality (AR) based first-person weapon shooting system (AR-Weapon), suitable for both training and gaming. Different from existing AR-based first-person shooting systems, AR-Weapon does not use fiducial markers placed in the scene for tracking. Instead it uses natural scene features observed by the tracking camera from the live view of the world. The AR-Weapon system estimates 6-degrees of freedom orientation and location of the weapon and of the user operating it, thus allowing the weapon to fire simulated projectiles for both direct fire and non-line of sight during live runs. In addition, stereo cameras are used to compute depth and provide dynamic occlusion reasoning. Using the 6-DOF head and weapon tracking, dynamic occlusion reasoning and a terrain model of the environment, the fully virtual projectiles and synthetic avatars are displayed on the user's head mounted Optical-See-Through (OST) display overlaid over the live view of the real world. Since the projectiles, weapon characteristics and virtual enemy combatants are all simulated they can easily be changed to vary scenarios, new projectile types and future weapons. In this paper, we present the technical algorithms, system design and experiment results for a prototype AR-Weapon system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a user-worn Augmented Reality (AR) based first-person weapon shooting system (AR-Weapon), suitable for both training and gaming. Different from existing AR-based first-person shooting systems, AR-Weapon does not use fiducial markers placed in the scene for tracking. Instead it uses natural scene features observed by the tracking camera from the live view of the world. The AR-Weapon system estimates 6-degrees of freedom orientation and location of the weapon and of the user operating it, thus allowing the weapon to fire simulated projectiles for both direct fire and non-line of sight during live runs. In addition, stereo cameras are used to compute depth and provide dynamic occlusion reasoning. Using the 6-DOF head and weapon tracking, dynamic occlusion reasoning and a terrain model of the environment, the fully virtual projectiles and synthetic avatars are displayed on the user's head mounted Optical-See-Through (OST) display overlaid over the live view of the real world. Since the projectiles, weapon characteristics and virtual enemy combatants are all simulated they can easily be changed to vary scenarios, new projectile types and future weapons. In this paper, we present the technical algorithms, system design and experiment results for a prototype AR-Weapon system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a user-worn Augmented Reality (AR) based first-person weapon shooting system (AR-Weapon), suitable for both training and gaming. Different from existing AR-based first-person shooting systems, AR-Weapon does not use fiducial markers placed in the scene for tracking. Instead it uses natural scene features observed by the tracking camera from the live view of the world. The AR-Weapon system estimates 6-degrees of freedom orientation and location of the weapon and of the user operating it, thus allowing the weapon to fire simulated projectiles for both direct fire and non-line of sight during live runs. In addition, stereo cameras are used to compute depth and provide dynamic occlusion reasoning. Using the 6-DOF head and weapon tracking, dynamic occlusion reasoning and a terrain model of the environment, the fully virtual projectiles and synthetic avatars are displayed on the user's head mounted Optical-See-Through (OST) display overlaid over the live view of the real world. Since the projectiles, weapon characteristics and virtual enemy combatants are all simulated they can easily be changed to vary scenarios, new projectile types and future weapons. In this paper, we present the technical algorithms, system design and experiment results for a prototype AR-Weapon system.",
"fno": "6683a618",
"keywords": [
"Weapons",
"Navigation",
"Head",
"Training",
"Databases",
"Visualization",
"Three Dimensional Displays"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhiwei Zhu",
"givenName": "Zhiwei",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vlad Branzoi",
"givenName": "Vlad",
"surname": "Branzoi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mikhail Sizintsev",
"givenName": "Mikhail",
"surname": "Sizintsev",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nicholas Vitovitch",
"givenName": "Nicholas",
"surname": "Vitovitch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Taragay Oskiper",
"givenName": "Taragay",
"surname": "Oskiper",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ryan Villamil",
"givenName": "Ryan",
"surname": "Villamil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ali Chaudhry",
"givenName": "Ali",
"surname": "Chaudhry",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Supun Samarasekera",
"givenName": "Supun",
"surname": "Samarasekera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rakesh Kumar",
"givenName": "Rakesh",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-01-01T00:00:00",
"pubType": "proceedings",
"pages": "618-625",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-6683-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6683a610",
"articleId": "12OmNxxvAGQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6683a626",
"articleId": "12OmNzXFoAz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643620",
"title": "AR Shooter: An augmented reality shooting game system",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643620/12OmNyrIavz",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2009/3752/1/05336148",
"title": "New Algorithm and Simulation for Firing Safety Zone Design of Shipborne Weapon System",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2009/05336148/12OmNyv7m0a",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2015/02/06922494",
"title": "Adaptive Shooting for Bots in First Person Shooter Games Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/ci/2015/02/06922494/13rRUwInvnn",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeas/2022/6305/0/630500a033",
"title": "Design of Simple Simulation Platform for Armed Helicopter Shooting Vibration Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icmeas/2022/630500a033/1I8wFyCJSKc",
"parentPublication": {
"id": "proceedings/icmeas/2022/6305/0",
"title": "2022 8th International Conference on Mechanical Engineering and Automation Science (ICMEAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a673",
"title": "Real-time Concealed Weapon Detection on 3D Radar Images for Walk-through Screening System",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a673/1L8qi660aQw",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a541",
"title": "A study of the influence of AR on the perception, comprehension and projection levels of situation awareness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a541/1MNgMgQsPjW",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a563",
"title": "Detecting the Point of Release of Virtual Projectiles in AR/VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a563/1tnXQEjQQh2",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cipae/2021/2665/0/266500a275",
"title": "Modeling and Simulation of Dual Axis Stabilization System for Vehicle Weapon Based on Angular Rate Compensation",
"doi": null,
"abstractUrl": "/proceedings-article/cipae/2021/266500a275/1yQATtjA676",
"parentPublication": {
"id": "proceedings/cipae/2021/2665/0",
"title": "2021 International Conference on Computers, Information Processing and Advanced Education (CIPAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2021/0189/0/018900a144",
"title": "RealShooting: Expanding the experience of point-and-click target shooting games",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2021/018900a144/1zusp8gOBMs",
"parentPublication": {
"id": "proceedings/sbgames/2021/0189/0",
"title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a237",
"title": "A Large-Scale Indoor Layout Reconstruction and Localization System for Spatial-Aware Mobile AR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a237/1zxLwu0YJqw",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cK",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrFTr6j",
"doi": "10.1109/ISMAR.2016.17",
"title": "Edge Snapping-Based Depth Enhancement for Dynamic Occlusion Handling in Augmented Reality",
"normalizedTitle": "Edge Snapping-Based Depth Enhancement for Dynamic Occlusion Handling in Augmented Reality",
"abstract": "Dynamic occlusion handling is critical for correct depth perception in Augmented Reality (AR) applications. Consequently it is a key component to ensure realistic and immersive AR experiences. Existing solutions to tackle this challenge typically suffer from various limitations, e.g. assumption of a static scene or high computational complexity. In this work, we propose an algorithm for depth map enhancement for dynamic occlusion handling in AR applications. The key of our algorithm is an edge snapping approach, formulated as discrete optimization, that improves the consistency of object boundaries between RGB and depth data. The optimization problem is solved efficiently via dynamic programming and our system runs in near real-time on the tablet platform. Experimental evaluations demonstrate that our approach largely improves the raw sensor data and is particularly suitable compared to several related approaches in terms of both speed and quality. Furthermore, we demonstrate visually pleasing dynamic occlusion effects for multiple AR use cases based on our edge snapping results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dynamic occlusion handling is critical for correct depth perception in Augmented Reality (AR) applications. Consequently it is a key component to ensure realistic and immersive AR experiences. Existing solutions to tackle this challenge typically suffer from various limitations, e.g. assumption of a static scene or high computational complexity. In this work, we propose an algorithm for depth map enhancement for dynamic occlusion handling in AR applications. The key of our algorithm is an edge snapping approach, formulated as discrete optimization, that improves the consistency of object boundaries between RGB and depth data. The optimization problem is solved efficiently via dynamic programming and our system runs in near real-time on the tablet platform. Experimental evaluations demonstrate that our approach largely improves the raw sensor data and is particularly suitable compared to several related approaches in terms of both speed and quality. Furthermore, we demonstrate visually pleasing dynamic occlusion effects for multiple AR use cases based on our edge snapping results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dynamic occlusion handling is critical for correct depth perception in Augmented Reality (AR) applications. Consequently it is a key component to ensure realistic and immersive AR experiences. Existing solutions to tackle this challenge typically suffer from various limitations, e.g. assumption of a static scene or high computational complexity. In this work, we propose an algorithm for depth map enhancement for dynamic occlusion handling in AR applications. The key of our algorithm is an edge snapping approach, formulated as discrete optimization, that improves the consistency of object boundaries between RGB and depth data. The optimization problem is solved efficiently via dynamic programming and our system runs in near real-time on the tablet platform. Experimental evaluations demonstrate that our approach largely improves the raw sensor data and is particularly suitable compared to several related approaches in terms of both speed and quality. Furthermore, we demonstrate visually pleasing dynamic occlusion effects for multiple AR use cases based on our edge snapping results.",
"fno": "3641a054",
"keywords": [
"Three Dimensional Displays",
"Glass",
"Solid Modeling",
"Image Edge Detection",
"Two Dimensional Displays",
"Visualization",
"Heuristic Algorithms",
"AR Glasses",
"Occlusion Handling",
"Depth Enhancement",
"Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Chao Du",
"givenName": "Chao",
"surname": "Du",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yen-Lin Chen",
"givenName": "Yen-Lin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mao Ye",
"givenName": "Mao",
"surname": "Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liu Ren",
"givenName": "Liu",
"surname": "Ren",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "54-62",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3641-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3641a044",
"articleId": "12OmNCd2rxc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3641a063",
"articleId": "12OmNB7cjhR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2016/2303/0/2303a189",
"title": "Occlusion Detection and Localization from Kinect Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a189/12OmNAS9zvZ",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d487",
"title": "Occlusion-Aware Depth Estimation Using Light-Field Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d487/12OmNrAv3E7",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a062",
"title": "Occlusion Matting: Realistic Occlusion Handling for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a062/12OmNvSKNKa",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945729",
"title": "Handling occlusion in Augmented Reality surgical training based instrument tracking",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945729/12OmNvkGW33",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07374709",
"title": "Depth Estimation with Occlusion Modeling Using Light-Field Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07374709/13rRUx0gewi",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102829",
"title": "Accurate Light Field Depth Estimation via an Occlusion-Aware Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102829/1kwqOQWv65a",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800o4636",
"title": "Predicting Sharp and Accurate Occlusion Boundaries in Monocular Depth Estimation Using Displacement Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800o4636/1m3nG2dJvXi",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a064",
"title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a514",
"title": "DynOcc: Learning Single-View Depth from Dynamic Occlusion Cues",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a514/1qyxmHey0PS",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwoxSc6",
"doi": "10.1109/ICCV.2015.502",
"title": "Large Displacement 3D Scene Flow with Occlusion Reasoning",
"normalizedTitle": "Large Displacement 3D Scene Flow with Occlusion Reasoning",
"abstract": "The emergence of modern, affordable and accurate RGB-D sensors increases the need for single view approaches to estimate 3-dimensional motion, also known as scene flow. In this paper we propose a coarse-to-fine, dense, correspondence-based scene flow formulation that relies on explicit geometric reasoning to account for the effects of large displacements and to model occlusion. Our methodology enforces local motion rigidity at the level of the 3d point cloud without explicitly smoothing the parameters of adjacent neighborhoods. By integrating all geometric and photometric components in a single, consistent, occlusion-aware energy model, defined over overlapping, image-adaptive neighborhoods, our method can process fast motions and large occlusions areas, as present in challenging datasets like the MPI Sintel Flow Dataset, recently augmented with depth information. By explicitly modeling large displacements and occlusion, we can handle difficult sequences which cannot be currently processed by state of the art scene flow methods. We also show that by integrating depth information into the model, we can obtain correspondence fields with improved spatial support and sharper boundaries compared to the state of the art, large-displacement optical flow methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The emergence of modern, affordable and accurate RGB-D sensors increases the need for single view approaches to estimate 3-dimensional motion, also known as scene flow. In this paper we propose a coarse-to-fine, dense, correspondence-based scene flow formulation that relies on explicit geometric reasoning to account for the effects of large displacements and to model occlusion. Our methodology enforces local motion rigidity at the level of the 3d point cloud without explicitly smoothing the parameters of adjacent neighborhoods. By integrating all geometric and photometric components in a single, consistent, occlusion-aware energy model, defined over overlapping, image-adaptive neighborhoods, our method can process fast motions and large occlusions areas, as present in challenging datasets like the MPI Sintel Flow Dataset, recently augmented with depth information. By explicitly modeling large displacements and occlusion, we can handle difficult sequences which cannot be currently processed by state of the art scene flow methods. We also show that by integrating depth information into the model, we can obtain correspondence fields with improved spatial support and sharper boundaries compared to the state of the art, large-displacement optical flow methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The emergence of modern, affordable and accurate RGB-D sensors increases the need for single view approaches to estimate 3-dimensional motion, also known as scene flow. In this paper we propose a coarse-to-fine, dense, correspondence-based scene flow formulation that relies on explicit geometric reasoning to account for the effects of large displacements and to model occlusion. Our methodology enforces local motion rigidity at the level of the 3d point cloud without explicitly smoothing the parameters of adjacent neighborhoods. By integrating all geometric and photometric components in a single, consistent, occlusion-aware energy model, defined over overlapping, image-adaptive neighborhoods, our method can process fast motions and large occlusions areas, as present in challenging datasets like the MPI Sintel Flow Dataset, recently augmented with depth information. By explicitly modeling large displacements and occlusion, we can handle difficult sequences which cannot be currently processed by state of the art scene flow methods. We also show that by integrating depth information into the model, we can obtain correspondence fields with improved spatial support and sharper boundaries compared to the state of the art, large-displacement optical flow methods.",
"fno": "8391e417",
"keywords": [
"Three Dimensional Displays",
"Cognition",
"Optical Imaging",
"Cameras",
"Optical Sensors",
"Adaptive Optics"
],
"authors": [
{
"affiliation": null,
"fullName": "Andrei Zanfir",
"givenName": "Andrei",
"surname": "Zanfir",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cristian Sminchisescu",
"givenName": "Cristian",
"surname": "Sminchisescu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "4417-4425",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391e408",
"articleId": "12OmNyqRnp3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391e426",
"articleId": "12OmNwtn3Fj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851f704",
"title": "Efficient Coarse-to-Fine Patch Match for Large Displacement Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f704/12OmNAlvHUT",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a198",
"title": "Optical Flow at Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a198/12OmNBqdr2T",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559005",
"title": "Motion field and occlusion time estimation via alternate exposure flow",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559005/12OmNx8Ous9",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840b721",
"title": "Locally Affine Sparse-to-Dense Matching for Motion and Occlusion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840b721/12OmNxXl5x3",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683b100",
"title": "Sparse Flow: Sparse Matching for Small to Large Displacement Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683b100/12OmNySosMD",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b647",
"title": "FlowNet 2.0: Evolution of Optical Flow Estimation with Deep Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b647/12OmNyVerZg",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b098",
"title": "Local Layering for Joint Motion Estimation and Occlusion Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b098/12OmNzt0IC2",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e884",
"title": "Occlusion Aware Unsupervised Learning of Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e884/17D45WWzW7b",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2018/7445/0/744500a119",
"title": "MaxFlow: a Convolutional Neural Network Based Optical Flow Algorithm for Large Displacement Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2018/744500a119/17D45WnnFX5",
"parentPublication": {
"id": "proceedings/dcabes/2018/7445/0",
"title": "2018 17th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h394",
"title": "Self-Supervised Monocular Scene Flow Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h394/1m3odcpYzoQ",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNywfKyu",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRPgDK",
"doi": "10.1109/ISMAR.2010.5643558",
"title": "Foreground and shadow occlusion handling for outdoor augmented reality",
"normalizedTitle": "Foreground and shadow occlusion handling for outdoor augmented reality",
"abstract": "Occlusion handling in augmented reality (AR) applications is challenging in synthesizing virtual objects correctly into the real scene with respect to existing foregrounds and shadows. Furthermore, outdoor environment makes the task more difficult due to the unpredictable illumination changes. This paper proposes novel outdoor illumination constraints for resolving the foreground occlusion problem in outdoor environment. The constraints can be also integrated into a probabilistic model of multiple cues for a better segmentation of the foreground. In addition, we introduce an effective method to resolve the shadow occlusion problem by using shadow detection and recasting with a spherical vision camera. We have applied the system in our digital cultural heritage project named Virtual Asuka (VA) and verified the effectiveness of the system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusion handling in augmented reality (AR) applications is challenging in synthesizing virtual objects correctly into the real scene with respect to existing foregrounds and shadows. Furthermore, outdoor environment makes the task more difficult due to the unpredictable illumination changes. This paper proposes novel outdoor illumination constraints for resolving the foreground occlusion problem in outdoor environment. The constraints can be also integrated into a probabilistic model of multiple cues for a better segmentation of the foreground. In addition, we introduce an effective method to resolve the shadow occlusion problem by using shadow detection and recasting with a spherical vision camera. We have applied the system in our digital cultural heritage project named Virtual Asuka (VA) and verified the effectiveness of the system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusion handling in augmented reality (AR) applications is challenging in synthesizing virtual objects correctly into the real scene with respect to existing foregrounds and shadows. Furthermore, outdoor environment makes the task more difficult due to the unpredictable illumination changes. This paper proposes novel outdoor illumination constraints for resolving the foreground occlusion problem in outdoor environment. The constraints can be also integrated into a probabilistic model of multiple cues for a better segmentation of the foreground. In addition, we introduce an effective method to resolve the shadow occlusion problem by using shadow detection and recasting with a spherical vision camera. We have applied the system in our digital cultural heritage project named Virtual Asuka (VA) and verified the effectiveness of the system.",
"fno": "05643558",
"keywords": [
"Augmented Reality",
"Image Segmentation",
"Object Detection",
"Shadow Occlusion Handling",
"Foreground Occlusion Handling",
"Outdoor Augmented Reality",
"Foreground Segmentation",
"Shadow Detection",
"Shadow Recasting",
"Spherical Vision Camera",
"Virtual Asuka",
"Lighting",
"Sun",
"Motion Segmentation",
"Cameras",
"Image Color Analysis",
"Pixel",
"Image Segmentation",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"I 4 6 Image Processing And Computer Vision Segmentation Pixel Classificatio"
],
"authors": [
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Boun Vinh Lu",
"givenName": "Boun Vinh",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Tetsuya Kakuta",
"givenName": "Tetsuya",
"surname": "Kakuta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Rei Kawakami",
"givenName": "Rei",
"surname": "Kawakami",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Takeshi Oishi",
"givenName": "Takeshi",
"surname": "Oishi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo, Japan",
"fullName": "Katsushi Ikeuchi",
"givenName": "Katsushi",
"surname": "Ikeuchi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "109-118",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9343-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05643556",
"articleId": "12OmNAkWvti",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05643559",
"articleId": "12OmNB7tUq0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/snpd/2009/3642/0/3642a391",
"title": "Foreground and Shadow Segmentation by Exploiting Multiple Cues",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2009/3642a391/12OmNBmf3c5",
"parentPublication": {
"id": "proceedings/snpd/2009/3642/0",
"title": "2009 10th ACIS International Conference on Software Engineering, Artificial Intelligences, Networking and Parallel/Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2011/0230/0/06167859",
"title": "Real-Time Diffuse Behavior Detection of Pixels from Outdoor Image Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2011/06167859/12OmNCcbE4U",
"parentPublication": {
"id": "proceedings/imvip/2011/0230/0",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a028",
"title": "x-Hour Outdoor Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a028/12OmNCesr5K",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c373",
"title": "Deep outdoor illumination estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c373/12OmNy1SFMf",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2007/3009/0/30090189",
"title": "The Soft Shadow Occlusion Camera",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2007/30090189/12OmNy1SFS0",
"parentPublication": {
"id": "proceedings/pg/2007/3009/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761326",
"title": "Monocular video foreground segmentation system",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761326/12OmNyprnw8",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2004/2158/1/01315055",
"title": "Video repairing: inference of foreground and background under severe occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2004/01315055/12OmNz61cV8",
"parentPublication": {
"id": "proceedings/cvpr/2004/2158/1",
"title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcip/2009/3699/2/3699b422",
"title": "A Robust Algorithm for Shadow Removal of Foreground Detection in Video Surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/apcip/2009/3699b422/12OmNzmtWBC",
"parentPublication": {
"id": "proceedings/apcip/2009/3699/1",
"title": "Information Processing, Asia-Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a460",
"title": "Shadow Detection and Sun Direction in Photo Collections",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a460/12OmNzwpUq0",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/4/01699780",
"title": "Better Foreground Segmentation for Static Cameras via New Energy Form and Dynamic Graph-cut",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/01699780/1i5nhUkr2Ew",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ap5wvyUHKM",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "1ap5xY9dZni",
"doi": "10.1109/ICVRV.2017.00069",
"title": "Real-Time Augmented Reality with Occlusion Handling Based on RGBD Images",
"normalizedTitle": "Real-Time Augmented Reality with Occlusion Handling Based on RGBD Images",
"abstract": "Augmented Reality (AR) is one of the latest developments in human-computer interaction technology. It aims to generate illusions from seamless fusion of virtual objects and real world. Typical AR system requires two basic parts: three-dimensional registration and real-virtual fusion. Occlusion handling is crucial for visual realism. To optimize visual realism, we generated a real-time systematic architecture to operate occlusion handling. The architecture is based on RGBD images, and it consists of three parts: real-time camera tracking system, 3D reconstruction system and AR fusion system. Specifically, we used a two-pass scheme strategy to execute the AR system. The first pass tracks camera poses timely at video rate, which allows the reconstruction results be updated and visualized correspondingly during the scanning. The second pass takes place simultaneously to handle occlusion between virtual objects and real scene according to camera pose. Finally, the render results of virtual objects and the color images are fused to generate AR contents. Our results indicate that this method is stable and precise for occlusion handling, and can effectively improve realism in AR system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality (AR) is one of the latest developments in human-computer interaction technology. It aims to generate illusions from seamless fusion of virtual objects and real world. Typical AR system requires two basic parts: three-dimensional registration and real-virtual fusion. Occlusion handling is crucial for visual realism. To optimize visual realism, we generated a real-time systematic architecture to operate occlusion handling. The architecture is based on RGBD images, and it consists of three parts: real-time camera tracking system, 3D reconstruction system and AR fusion system. Specifically, we used a two-pass scheme strategy to execute the AR system. The first pass tracks camera poses timely at video rate, which allows the reconstruction results be updated and visualized correspondingly during the scanning. The second pass takes place simultaneously to handle occlusion between virtual objects and real scene according to camera pose. Finally, the render results of virtual objects and the color images are fused to generate AR contents. Our results indicate that this method is stable and precise for occlusion handling, and can effectively improve realism in AR system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality (AR) is one of the latest developments in human-computer interaction technology. It aims to generate illusions from seamless fusion of virtual objects and real world. Typical AR system requires two basic parts: three-dimensional registration and real-virtual fusion. Occlusion handling is crucial for visual realism. To optimize visual realism, we generated a real-time systematic architecture to operate occlusion handling. The architecture is based on RGBD images, and it consists of three parts: real-time camera tracking system, 3D reconstruction system and AR fusion system. Specifically, we used a two-pass scheme strategy to execute the AR system. The first pass tracks camera poses timely at video rate, which allows the reconstruction results be updated and visualized correspondingly during the scanning. The second pass takes place simultaneously to handle occlusion between virtual objects and real scene according to camera pose. Finally, the render results of virtual objects and the color images are fused to generate AR contents. Our results indicate that this method is stable and precise for occlusion handling, and can effectively improve realism in AR system.",
"fno": "263600a298",
"keywords": [
"Augmented Reality",
"Cameras",
"Computer Graphics",
"Image Colour Analysis",
"Image Reconstruction",
"Image Registration",
"Object Tracking",
"Pose Estimation",
"Rendering Computer Graphics",
"Augmented Reality",
"Occlusion Handling",
"RGBD Images",
"Human Computer Interaction Technology",
"Seamless Fusion",
"Virtual Objects",
"Three Dimensional Registration",
"Real Virtual Fusion",
"Visual Realism",
"Real Time Systematic Architecture",
"Real Time Camera",
"3 D Reconstruction System",
"Fusion System",
"AR System",
"Two Pass Scheme Strategy",
"Camera Pose Tracking",
"Video Rate",
"Cameras",
"Image Reconstruction",
"Three Dimensional Displays",
"Real Time Systems",
"Augmented Reality",
"Color",
"Rendering Computer Graphics",
"Augmented Reality",
"Occlusion Handling",
"Scene Reconstruction",
"Rgbd"
],
"authors": [
{
"affiliation": null,
"fullName": "Xiaozhi Guo",
"givenName": "Xiaozhi",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Wang",
"givenName": "Chen",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yue Qi",
"givenName": "Yue",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "298-302",
"year": "2017",
"issn": "2375-141X",
"isbn": "978-1-5386-2636-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "263600a296",
"articleId": "1ap5AyOKO9q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "263600a303",
"articleId": "1ap5xcPk2Ws",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2009/3789/0/3789a153",
"title": "An Implementation Review of Occlusion-Based Interaction in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a153/12OmNB7cjly",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a062",
"title": "Occlusion Matting: Realistic Occlusion Handling for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a062/12OmNvSKNKa",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945729",
"title": "Handling occlusion in Augmented Reality surgical training based instrument tracking",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945729/12OmNvkGW33",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2013/5009/0/5009a141",
"title": "Augmented Reality X-Ray Interaction in K-12 Education: Theory, Student Perception and Teacher Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a141/12OmNwp74Cw",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643558",
"title": "Foreground and shadow occlusion handling for outdoor augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643558/12OmNyRPgDK",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2011/4420/0/4420b025",
"title": "On Visual Artifacts of Physics Simulation in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2011/4420b025/12OmNzIUfTU",
"parentPublication": {
"id": "proceedings/isuvr/2011/4420/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699304",
"title": "Addressing the Occlusion Problem in Augmented Reality Environments with Phantom Hollow Objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699304/19F1T4QjgOY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523845",
"title": "Long-Range Augmented Reality with Dynamic Occlusion Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523845/1wpqkYgQZd6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09560081",
"title": "Occlusion Handling in Augmented Reality: Past, Present and Future",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09560081/1xtOqCv3vNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1f6V69wY",
"doi": "10.1109/VR.2019.8798025",
"title": "Occlusion Management in VR: A Comparative Study",
"normalizedTitle": "Occlusion Management in VR: A Comparative Study",
"abstract": "VR applications rely on the user's ability to explore the virtual scene efficiently. In complex scenes, occlusions limit what the user can see from a given location, and the user has to navigate the viewpoint around occluders to gain line of sight to the hidden parts of the scene. When the disoccluded regions prove to be of no interest, the user has to retrace their path, making scene exploration inefficient. Furthermore, the user might not be able to assume a viewpoint that would reveal the occluded regions due to physical limitations, such as obstacles in the real world hosting the VR application, viewpoints beyond the tracked area, or viewpoints above the user's head that cannot be reached by walking. Several occlusion management methods have been proposed in visualization research, such as top view, X-ray, and multiperspective visualization, which help the user see more from the current position, having the potential to improve the exploration efficiency of complex scenes. This paper reports on a study that investigates the potential of these three occlusion management methods in the context of VR applications, compared to conventional navigation. Participants were required to explore two virtual scenes to purchase five items in a virtual Supermarket, and to find three people in a virtual parking garage. The task performance metrics were task completion time, total distance traveled, and total head rotation. The study also measured user spatial awareness, depth perception, and simulator sickness. The results indicate that users benefit from top view visualization which helps them learn the scene layout and helps them understand their position within the scene, but the top view does not let the user find targets easily due to occlusions in the vertical direction, and due to the small image footprint of the targets. The X-ray visualization method worked better in the garage scene, a scene with a few big occluders and a low occlusion depth complexity' and less well in the Supermarket scene, a scene with many small occluders that create high occlusion depth complexity. The multi-perspective visualization method achieves better performance than the top view method and the X-ray method, in both scenes. There are no significant differences between the three methods and the conventional method in terms of spatial awareness, depth perception, and simulator sickness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "VR applications rely on the user's ability to explore the virtual scene efficiently. In complex scenes, occlusions limit what the user can see from a given location, and the user has to navigate the viewpoint around occluders to gain line of sight to the hidden parts of the scene. When the disoccluded regions prove to be of no interest, the user has to retrace their path, making scene exploration inefficient. Furthermore, the user might not be able to assume a viewpoint that would reveal the occluded regions due to physical limitations, such as obstacles in the real world hosting the VR application, viewpoints beyond the tracked area, or viewpoints above the user's head that cannot be reached by walking. Several occlusion management methods have been proposed in visualization research, such as top view, X-ray, and multiperspective visualization, which help the user see more from the current position, having the potential to improve the exploration efficiency of complex scenes. This paper reports on a study that investigates the potential of these three occlusion management methods in the context of VR applications, compared to conventional navigation. Participants were required to explore two virtual scenes to purchase five items in a virtual Supermarket, and to find three people in a virtual parking garage. The task performance metrics were task completion time, total distance traveled, and total head rotation. The study also measured user spatial awareness, depth perception, and simulator sickness. The results indicate that users benefit from top view visualization which helps them learn the scene layout and helps them understand their position within the scene, but the top view does not let the user find targets easily due to occlusions in the vertical direction, and due to the small image footprint of the targets. The X-ray visualization method worked better in the garage scene, a scene with a few big occluders and a low occlusion depth complexity' and less well in the Supermarket scene, a scene with many small occluders that create high occlusion depth complexity. The multi-perspective visualization method achieves better performance than the top view method and the X-ray method, in both scenes. There are no significant differences between the three methods and the conventional method in terms of spatial awareness, depth perception, and simulator sickness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "VR applications rely on the user's ability to explore the virtual scene efficiently. In complex scenes, occlusions limit what the user can see from a given location, and the user has to navigate the viewpoint around occluders to gain line of sight to the hidden parts of the scene. When the disoccluded regions prove to be of no interest, the user has to retrace their path, making scene exploration inefficient. Furthermore, the user might not be able to assume a viewpoint that would reveal the occluded regions due to physical limitations, such as obstacles in the real world hosting the VR application, viewpoints beyond the tracked area, or viewpoints above the user's head that cannot be reached by walking. Several occlusion management methods have been proposed in visualization research, such as top view, X-ray, and multiperspective visualization, which help the user see more from the current position, having the potential to improve the exploration efficiency of complex scenes. This paper reports on a study that investigates the potential of these three occlusion management methods in the context of VR applications, compared to conventional navigation. Participants were required to explore two virtual scenes to purchase five items in a virtual Supermarket, and to find three people in a virtual parking garage. The task performance metrics were task completion time, total distance traveled, and total head rotation. The study also measured user spatial awareness, depth perception, and simulator sickness. The results indicate that users benefit from top view visualization which helps them learn the scene layout and helps them understand their position within the scene, but the top view does not let the user find targets easily due to occlusions in the vertical direction, and due to the small image footprint of the targets. The X-ray visualization method worked better in the garage scene, a scene with a few big occluders and a low occlusion depth complexity' and less well in the Supermarket scene, a scene with many small occluders that create high occlusion depth complexity. The multi-perspective visualization method achieves better performance than the top view method and the X-ray method, in both scenes. There are no significant differences between the three methods and the conventional method in terms of spatial awareness, depth perception, and simulator sickness.",
"fno": "08798025",
"keywords": [
"Data Visualisation",
"Interactive Systems",
"Virtual Reality",
"VR Application",
"Virtual Scene",
"Occlusions",
"Occlusion Management Methods",
"Virtual Parking Garage",
"User Spatial Awareness",
"View Visualization",
"Scene Layout",
"X Ray Visualization Method",
"Garage Scene",
"Multiperspective Visualization Method",
"Scene Exploration",
"Occlusion Depth Complexity",
"Supermarket Scene",
"Visualization",
"Cameras",
"Task Analysis",
"X Ray Imaging",
"Navigation",
"Legged Locomotion",
"Resists",
"Scene Exploration",
"Occlusion Management",
"Virtual Reality",
"Top View",
"X Ray",
"Multiperspective Visualization",
"Human Centered Computing X 2014 Human Centered Interaction X 2014 Virtual Reality",
"Computer Graphics X 2014 Ocllusion Management X 2014 Visualization"
],
"authors": [
{
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beijing Advanced Innovation Center for Biomedical Engineering, Beihang University, China",
"fullName": "Lili Wang",
"givenName": "Lili",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao Research Institute of Beihang University, China",
"fullName": "Han Zhao",
"givenName": "Han",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao Research Institute of Beihang University, China",
"fullName": "Zesheng Wang",
"givenName": "Zesheng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beijing Advanced Innovation Center for Biomedical Engineering, Beihang University, China",
"fullName": "Jian Wu",
"givenName": "Jian",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao Research Institute of Beihang University, China",
"fullName": "Bingqiang Li",
"givenName": "Bingqiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao Research Institute of Beihang University, China",
"fullName": "Zhiming He",
"givenName": "Zhiming",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Purdue University, United States",
"fullName": "Voicu Popescu",
"givenName": "Voicu",
"surname": "Popescu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "708-716",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798251",
"articleId": "1cJ0YOUUaqc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798204",
"articleId": "1cJ11rHzFi8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2007/1179/0/04270170",
"title": "3D Occlusion Inference from Silhouette Cues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270170/12OmNBCqbE1",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/med/2009/4684/0/05164668",
"title": "Detecting static occlusion edges using foreground patterns",
"doi": null,
"abstractUrl": "/proceedings-article/med/2009/05164668/12OmNwMXnqv",
"parentPublication": {
"id": "proceedings/med/2009/4684/0",
"title": "Mediterranean Conference on Control and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270451",
"title": "Robust Occlusion Handling in Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270451/12OmNzayNm9",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2002/1846/0/18460043",
"title": "Dynamic Scene Occlusion Culling Using a Regular Grid",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2002/18460043/12OmNzlD96b",
"parentPublication": {
"id": "proceedings/sibgrapi/2002/1846/0",
"title": "Proceedings. XV Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2006/2686/0/04027063",
"title": "Plausible Image Based Soft Shadows Using Occlusion Textures",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2006/04027063/146z4GOCJgZ",
"parentPublication": {
"id": "proceedings/sibgrapi/2006/2686/0",
"title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08123949",
"title": "Efficient VR and AR Navigation Through Multiperspective Occlusion Management",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08123949/14H4WNoi7Yc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642365",
"title": "VR Exploration Assistance through Automatic Occlusion Removal",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642365/17PYEj2mz9Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699200",
"title": "Effective Free Field of View Scene Exploration in VR and AR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09332290",
"title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09560081",
"title": "Occlusion Handling in Augmented Reality: Past, Present and Future",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09560081/1xtOqCv3vNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeQZ7zP5ks",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00057",
"title": "Occlusion Handling in Outdoor Augmented Reality using a Combination of Map Data and Instance Segmentation",
"normalizedTitle": "Occlusion Handling in Outdoor Augmented Reality using a Combination of Map Data and Instance Segmentation",
"abstract": "Visual consistency between virtual objects and the real environment is essential to improve user experience in Augmented Reality (AR). Occlusion handling is one of the key factors for maintaining visual consistency. In an application scenario for small areas such as indoors, various methods are applicable to acquire a depth information required for occlusion handling. However, in an application scenario in wide environment such as outdoor especially a scene including many buildings, occlusion handling is a challenging task because acquiring an accurate depth map is challenging. Several studies that have tackled this problem utilized 3D models of real buildings, but they have suffered from the accuracy of 3D models and camera localization. In this study, we propose a novel occlusion handling method using a monocular RGB camera and map data. Our method detects the regions of buildings in a camera image using an instance segmentation method and then obtains accurate occlusion handling in the image from each building instance and corresponding building map. The qualitative evaluation shows the improvement in the occlusion handling with buildings. The user study also shows the better performance of the perception of depth and distance than a model-based method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual consistency between virtual objects and the real environment is essential to improve user experience in Augmented Reality (AR). Occlusion handling is one of the key factors for maintaining visual consistency. In an application scenario for small areas such as indoors, various methods are applicable to acquire a depth information required for occlusion handling. However, in an application scenario in wide environment such as outdoor especially a scene including many buildings, occlusion handling is a challenging task because acquiring an accurate depth map is challenging. Several studies that have tackled this problem utilized 3D models of real buildings, but they have suffered from the accuracy of 3D models and camera localization. In this study, we propose a novel occlusion handling method using a monocular RGB camera and map data. Our method detects the regions of buildings in a camera image using an instance segmentation method and then obtains accurate occlusion handling in the image from each building instance and corresponding building map. The qualitative evaluation shows the improvement in the occlusion handling with buildings. The user study also shows the better performance of the perception of depth and distance than a model-based method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual consistency between virtual objects and the real environment is essential to improve user experience in Augmented Reality (AR). Occlusion handling is one of the key factors for maintaining visual consistency. In an application scenario for small areas such as indoors, various methods are applicable to acquire a depth information required for occlusion handling. However, in an application scenario in wide environment such as outdoor especially a scene including many buildings, occlusion handling is a challenging task because acquiring an accurate depth map is challenging. Several studies that have tackled this problem utilized 3D models of real buildings, but they have suffered from the accuracy of 3D models and camera localization. In this study, we propose a novel occlusion handling method using a monocular RGB camera and map data. Our method detects the regions of buildings in a camera image using an instance segmentation method and then obtains accurate occlusion handling in the image from each building instance and corresponding building map. The qualitative evaluation shows the improvement in the occlusion handling with buildings. The user study also shows the better performance of the perception of depth and distance than a model-based method.",
"fno": "129800a246",
"keywords": [
"Augmented Reality",
"Cameras",
"Computer Graphics",
"Image Colour Analysis",
"Image Segmentation",
"Corresponding Building Map",
"Occlusion Handling",
"Buildings",
"Outdoor Augmented Reality",
"Map Data",
"Visual Consistency",
"Application Scenario",
"Accurate Depth Map",
"Novel Occlusion",
"Instance Segmentation Method",
"Accurate Occlusion",
"Location Awareness",
"Visualization",
"Solid Modeling",
"Image Segmentation",
"Three Dimensional Displays",
"Buildings",
"Cameras",
"Augmented Reality",
"Occlusion",
"Map Data",
"Instance Segmentation"
],
"authors": [
{
"affiliation": "NTT DOCOMO Inc",
"fullName": "Takaya Ogawa",
"givenName": "Takaya",
"surname": "Ogawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Osaka University",
"fullName": "Tomohiro Mashita",
"givenName": "Tomohiro",
"surname": "Mashita",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "246-250",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "129800a240",
"articleId": "1yfxHThjmo0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a251",
"articleId": "1yfxO7CNnQk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2011/0529/0/05981790",
"title": "Occlusion robust multi-camera face tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981790/12OmNqyUUFo",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a054",
"title": "Edge Snapping-Based Depth Enhancement for Dynamic Occlusion Handling in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a054/12OmNrFTr6j",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a062",
"title": "Occlusion Matting: Realistic Occlusion Handling for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a062/12OmNvSKNKa",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459207",
"title": "An HOG-LBP human detector with partial occlusion handling",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459207/12OmNwxlrcf",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643558",
"title": "Foreground and shadow occlusion handling for outdoor augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643558/12OmNyRPgDK",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/1/07501109",
"title": "Stereo by Integration of Two Algorithms with/without Occlusion Handling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07501109/12OmNzTYCap",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05540111",
"title": "Multi-cue pedestrian classification with partial occlusion handling",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05540111/12OmNzX6cjQ",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600v1178",
"title": "Instance-wise Occlusion and Depth Orders in Natural Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600v1178/1H1ksigvcRO",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a298",
"title": "Real-Time Augmented Reality with Occlusion Handling Based on RGBD Images",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a298/1ap5xY9dZni",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09560081",
"title": "Occlusion Handling in Augmented Reality: Past, Present and Future",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09560081/1xtOqCv3vNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyugyQo",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBTs7oB",
"doi": "10.1109/WACV.2014.6836118",
"title": "Fast dense 3D reconstruction using an adaptive multiscale discrete-continuous variational method",
"normalizedTitle": "Fast dense 3D reconstruction using an adaptive multiscale discrete-continuous variational method",
"abstract": "We present a system for fast dense 3D reconstruction with a hand-held camera. Walking around a target object, we shoot sequential images using continuous shooting mode. High-quality camera poses are obtained offline using structure-from-motion (SfM) algorithm with Bundle Adjustment. Multi-view stereo is solved using a new, efficient adaptive multiscale discrete-continuous variational method to generate depth maps with sub-pixel accuracy. Depth maps are then fused into a 3D model using volumetric integration with truncated signed distance function (TSDF). Our system is accurate, efficient and flexible: accurate depth maps are estimated with sub-pixel accuracy in stereo matching; dense models can be achieved within minutes as major algorithms parallelized on multi-core processor and GPU; various tasks can be handled (e.g. reconstruction of objects in both indoor and outdoor environment with different scales) without specific hand-tuning parameters. We evaluate our system quantitatively and qualitatively on Middlebury benchmark and another dataset collected with a smartphone camera.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system for fast dense 3D reconstruction with a hand-held camera. Walking around a target object, we shoot sequential images using continuous shooting mode. High-quality camera poses are obtained offline using structure-from-motion (SfM) algorithm with Bundle Adjustment. Multi-view stereo is solved using a new, efficient adaptive multiscale discrete-continuous variational method to generate depth maps with sub-pixel accuracy. Depth maps are then fused into a 3D model using volumetric integration with truncated signed distance function (TSDF). Our system is accurate, efficient and flexible: accurate depth maps are estimated with sub-pixel accuracy in stereo matching; dense models can be achieved within minutes as major algorithms parallelized on multi-core processor and GPU; various tasks can be handled (e.g. reconstruction of objects in both indoor and outdoor environment with different scales) without specific hand-tuning parameters. We evaluate our system quantitatively and qualitatively on Middlebury benchmark and another dataset collected with a smartphone camera.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system for fast dense 3D reconstruction with a hand-held camera. Walking around a target object, we shoot sequential images using continuous shooting mode. High-quality camera poses are obtained offline using structure-from-motion (SfM) algorithm with Bundle Adjustment. Multi-view stereo is solved using a new, efficient adaptive multiscale discrete-continuous variational method to generate depth maps with sub-pixel accuracy. Depth maps are then fused into a 3D model using volumetric integration with truncated signed distance function (TSDF). Our system is accurate, efficient and flexible: accurate depth maps are estimated with sub-pixel accuracy in stereo matching; dense models can be achieved within minutes as major algorithms parallelized on multi-core processor and GPU; various tasks can be handled (e.g. reconstruction of objects in both indoor and outdoor environment with different scales) without specific hand-tuning parameters. We evaluate our system quantitatively and qualitatively on Middlebury benchmark and another dataset collected with a smartphone camera.",
"fno": "06836118",
"keywords": [
"Cameras",
"Three Dimensional Displays",
"Accuracy",
"Solid Modeling",
"Computational Modeling",
"Graphics Processing Units",
"Image Reconstruction"
],
"authors": [
{
"affiliation": "University of Southern California, Los Angeles, 90089, USA",
"fullName": "Zhuoliang Kang",
"givenName": null,
"surname": "Zhuoliang Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Southern California, Los Angeles, 90089, USA",
"fullName": "Gerard Medioni",
"givenName": "Gerard",
"surname": "Medioni",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-03-01T00:00:00",
"pubType": "proceedings",
"pages": "53-60",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4985-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06836117",
"articleId": "12OmNx965BW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06836119",
"articleId": "12OmNqBtiRb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391a882",
"title": "Variational PatchMatch MultiView Reconstruction and Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a882/12OmNAQJzP1",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683a341",
"title": "A Sequential Online 3D Reconstruction System Using Dense Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683a341/12OmNqH9hkt",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c751",
"title": "Hybrid Kinect Depth Map Refinement for Transparent Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c751/12OmNxveNNV",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a057",
"title": "Real-Time Direct Dense Matching on Fisheye Images Using Plane-Sweeping Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a057/12OmNy2Jt8W",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a069",
"title": "Dense and Occlusion-Robust Multi-view Stereo for Unstructured Videos",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a069/12OmNzcPAyC",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a674",
"title": "Real Time Complete Dense Depth Reconstruction for a Monocular Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a674/12OmNzdoMK3",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a086",
"title": "Dense Depth Map Reconstruction from Sparse Measurements Using a Multilayer Conditional Random Field Model",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a086/12OmNzmtWBI",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851f479",
"title": "Just Look at the Image: Viewpoint-Specific Surface Normal Prediction for Improved Multi-View Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f479/12OmNzvQI3W",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126432",
"title": "Variational recursive joint estimation of dense scene structure and camera motion from monocular high speed traffic sequences",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126432/12OmNzyp634",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900c190",
"title": "DeepDNet: Deep Dense Network for Depth Completion Task",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900c190/1yXsP1ybj9u",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx5GU8K",
"doi": "10.1109/ISMAR-Adjunct.2016.0111",
"title": "Robust Keyframe-Based Monocular SLAM for Augmented Reality",
"normalizedTitle": "Robust Keyframe-Based Monocular SLAM for Augmented Reality",
"abstract": "In this demo, we present RKSLAM, a robust keyframe-based monocular SLAM system that can reliably handle fast motion with strong rotation and ensure good AR experiences. We contribute two key technical contributions: a novel multi-homography based feature tracking method which is very robust and efficient, and a sliding-window based camera pose optimization scheme which imposes the motion prior constraints between consecutive frames through simulated or real IMU data. Based on RKSLAM, we develop an AR App on a mobile device, which allows the user to freely insert 3D furniture models into the scene to see the AR effect without imagination.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this demo, we present RKSLAM, a robust keyframe-based monocular SLAM system that can reliably handle fast motion with strong rotation and ensure good AR experiences. We contribute two key technical contributions: a novel multi-homography based feature tracking method which is very robust and efficient, and a sliding-window based camera pose optimization scheme which imposes the motion prior constraints between consecutive frames through simulated or real IMU data. Based on RKSLAM, we develop an AR App on a mobile device, which allows the user to freely insert 3D furniture models into the scene to see the AR effect without imagination.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this demo, we present RKSLAM, a robust keyframe-based monocular SLAM system that can reliably handle fast motion with strong rotation and ensure good AR experiences. We contribute two key technical contributions: a novel multi-homography based feature tracking method which is very robust and efficient, and a sliding-window based camera pose optimization scheme which imposes the motion prior constraints between consecutive frames through simulated or real IMU data. Based on RKSLAM, we develop an AR App on a mobile device, which allows the user to freely insert 3D furniture models into the scene to see the AR effect without imagination.",
"fno": "07836532",
"keywords": [
"Augmented Reality",
"Feature Extraction",
"Mobile Computing",
"Object Tracking",
"Pose Estimation",
"SLAM Robots",
"Solid Modelling",
"Robust Keyframe Based Monocular SLAM",
"Augmented Reality",
"RKSLAM",
"AR Experiences",
"Multihomography Based Feature Tracking Method",
"Sliding Window Based Camera Pose Optimization Scheme",
"Motion Prior Constraints",
"IMU Data",
"AR App",
"Mobile Device",
"3 D Furniture Models",
"Three Dimensional Displays",
"Simultaneous Localization And Mapping",
"Cameras",
"Robustness",
"Solid Modeling",
"Mobile Handsets",
"Augmented Reality",
"SLAM",
"Augmented Reality",
"Tracking",
"Multiple Homography Representation",
"Mapping"
],
"authors": [
{
"affiliation": null,
"fullName": "Haomin Liu",
"givenName": "Haomin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guofeng Zhang",
"givenName": "Guofeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hujun Bao",
"givenName": "Hujun",
"surname": "Bao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "340-341",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836531",
"articleId": "12OmNybfr5v",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836533",
"articleId": "12OmNCvumT0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504740",
"title": "Fast and accurate relocalization for keyframe-based SLAM using geometric model selection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504740/12OmNBB0bYl",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2010/4124/0/4124a005",
"title": "Simultaneous Localization and Mapping for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a005/12OmNvA1hcT",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671783",
"title": "Handling pure camera rotation in keyframe-based SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671783/12OmNvmG7YF",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a001",
"title": "Robust Keyframe-based Monocular SLAM for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a001/12OmNx57HK9",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a350",
"title": "Monocular SLAM Algorithm Based on Improved Depth Map Estimation and Keyframe Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a350/12OmNyeECAZ",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404531",
"title": "Global Localization from Monocular SLAM on a Mobile Phone",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404531/13rRUwdrdSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798315",
"title": "A Hybrid RTK GNSS and SLAM Outdoor Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798315/1cJ0Soon8Yg",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a594",
"title": "On the Redundancy Detection in Keyframe-Based SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a594/1ezRCsrH9Be",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/4.803E306",
"title": "Unsupervised Collaborative Learning of Keyframe Detection and Visual Odometry Towards Monocular Deep SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/4.803E306/1hQqtAaoUes",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089490",
"title": "SPLAT: Spherical Localization and Tracking in Large Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089490/1jIxgqureDe",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyFCvPo",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyLiuEW",
"doi": "10.1109/ICCV.2013.348",
"title": "A Flexible Scene Representation for 3D Reconstruction Using an RGB-D Camera",
"normalizedTitle": "A Flexible Scene Representation for 3D Reconstruction Using an RGB-D Camera",
"abstract": "Updating a global 3D model with live RGB-D measurements has proven to be successful for 3D reconstruction of indoor scenes. Recently, a Truncated Signed Distance Function (TSDF) volumetric model and a fusion algorithm have been introduced (KinectFusion), showing significant advantages such as computational speed and accuracy of the reconstructed scene. This algorithm, however, is expensive in memory when constructing and updating the global model. As a consequence, the method is not well scalable to large scenes. We propose a new flexible 3D scene representation using a set of planes that is cheap in memory use and, nevertheless, achieves accurate reconstruction of indoor scenes from RGB-D image sequences. Projecting the scene onto different planes reduces significantly the size of the scene representation and thus it allows us to generate a global textured 3D model with lower memory requirement while keeping accuracy and easiness to update with live RGB-D measurements. Experimental results demonstrate that our proposed flexible 3D scene representation achieves accurate reconstruction, while keeping the scalability for large indoor scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Updating a global 3D model with live RGB-D measurements has proven to be successful for 3D reconstruction of indoor scenes. Recently, a Truncated Signed Distance Function (TSDF) volumetric model and a fusion algorithm have been introduced (KinectFusion), showing significant advantages such as computational speed and accuracy of the reconstructed scene. This algorithm, however, is expensive in memory when constructing and updating the global model. As a consequence, the method is not well scalable to large scenes. We propose a new flexible 3D scene representation using a set of planes that is cheap in memory use and, nevertheless, achieves accurate reconstruction of indoor scenes from RGB-D image sequences. Projecting the scene onto different planes reduces significantly the size of the scene representation and thus it allows us to generate a global textured 3D model with lower memory requirement while keeping accuracy and easiness to update with live RGB-D measurements. Experimental results demonstrate that our proposed flexible 3D scene representation achieves accurate reconstruction, while keeping the scalability for large indoor scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Updating a global 3D model with live RGB-D measurements has proven to be successful for 3D reconstruction of indoor scenes. Recently, a Truncated Signed Distance Function (TSDF) volumetric model and a fusion algorithm have been introduced (KinectFusion), showing significant advantages such as computational speed and accuracy of the reconstructed scene. This algorithm, however, is expensive in memory when constructing and updating the global model. As a consequence, the method is not well scalable to large scenes. We propose a new flexible 3D scene representation using a set of planes that is cheap in memory use and, nevertheless, achieves accurate reconstruction of indoor scenes from RGB-D image sequences. Projecting the scene onto different planes reduces significantly the size of the scene representation and thus it allows us to generate a global textured 3D model with lower memory requirement while keeping accuracy and easiness to update with live RGB-D measurements. Experimental results demonstrate that our proposed flexible 3D scene representation achieves accurate reconstruction, while keeping the scalability for large indoor scenes.",
"fno": "2840c800",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Solid Modeling",
"Image Reconstruction",
"Image Color Analysis",
"Color",
"Accuracy"
],
"authors": [
{
"affiliation": null,
"fullName": "Diego Thomas",
"givenName": "Diego",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Akihiro Sugimoto",
"givenName": "Akihiro",
"surname": "Sugimoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2800-2807",
"year": "2013",
"issn": "1550-5499",
"isbn": "978-1-4799-2840-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2840c792",
"articleId": "12OmNAGw14Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2840c808",
"articleId": "12OmNyfdOV5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2016/2491/0/2491a140",
"title": "Hierarchical Grouping Approach for Fast Approximate RGB-D Scene Flow",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a140/12OmNAolGSI",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977405",
"title": "Robust Camera Tracking by Combining Color and Depth Measurements",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977405/12OmNBhZ4nK",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2017/0560/0/08026256",
"title": "Real-time rendering of physical scene on virtual curved mirror with RGB-D camera networks",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026256/12OmNrAv3J5",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/349P3A38",
"title": "RGB-(D) scene labeling: Features and algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/349P3A38/12OmNrHB1WS",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06835732",
"title": "Robust tracking and mapping with a handheld RGB-D camera",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06835732/12OmNxG1yEt",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209d416",
"title": "Indoor Scene Recognition from RGB-D Images by Learning Scene Bases",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d416/12OmNyNQSHQ",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300b517",
"title": "X-Section: Cross-Section Prediction for Enhanced RGB-D Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300b517/1hQqx0MeVGw",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c172",
"title": "3D Scene Reconstruction With Multi-Layer Depth and Epipolar Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c172/1hVlfLRJFS0",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047367",
"title": "Parsing Indoor Scenes from RGB-D Image Using Superpixel and Region Merging",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047367/1iC6D9QWozm",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b747",
"title": "SPSG: Self-Supervised Photometric Scene Generation from RGB-D Scans",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b747/1yeLq17PHDG",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "18Ipiyawxl6",
"title": "2018 International Arab Conference on Information Technology (ACIT)",
"acronym": "acit",
"groupId": "1829564",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "18IpjjmLjgc",
"doi": "10.1109/ACIT.2018.8672679",
"title": "Design of an Automated 3D Scanner",
"normalizedTitle": "Design of an Automated 3D Scanner",
"abstract": "3D scanning is the process of generating a computer aided design model from an existing physical part. It can be achieved by different methods whether using: LASER, camera, and many advanced optical techniques. In this paper, we will concentrate on camera scanning technique, which is known as Photogrammetry. The input is a set of photographs, and the output is a 3D model of a real-world object. By using a camera and acquiring many images with predefined positions and orientations. 3D reconstruction from 2D images using Structure from Motion algorithm produces the point cloud and eventually the complete textured mesh. This system is specialized in 3D scanning of small objects. It involves the design of a fully automatic closed system of controlled scanning conditions in terms of lighting and angles of captured images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D scanning is the process of generating a computer aided design model from an existing physical part. It can be achieved by different methods whether using: LASER, camera, and many advanced optical techniques. In this paper, we will concentrate on camera scanning technique, which is known as Photogrammetry. The input is a set of photographs, and the output is a 3D model of a real-world object. By using a camera and acquiring many images with predefined positions and orientations. 3D reconstruction from 2D images using Structure from Motion algorithm produces the point cloud and eventually the complete textured mesh. This system is specialized in 3D scanning of small objects. It involves the design of a fully automatic closed system of controlled scanning conditions in terms of lighting and angles of captured images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D scanning is the process of generating a computer aided design model from an existing physical part. It can be achieved by different methods whether using: LASER, camera, and many advanced optical techniques. In this paper, we will concentrate on camera scanning technique, which is known as Photogrammetry. The input is a set of photographs, and the output is a 3D model of a real-world object. By using a camera and acquiring many images with predefined positions and orientations. 3D reconstruction from 2D images using Structure from Motion algorithm produces the point cloud and eventually the complete textured mesh. This system is specialized in 3D scanning of small objects. It involves the design of a fully automatic closed system of controlled scanning conditions in terms of lighting and angles of captured images.",
"fno": "08672679",
"keywords": [
"Cameras",
"Image Motion Analysis",
"Image Reconstruction",
"Image Scanners",
"Image Texture",
"Photogrammetry",
"LASER",
"Advanced Optical Techniques",
"Camera Scanning Technique",
"Point Cloud",
"Complete Textured Mesh",
"Fully Automatic Closed System",
"Controlled Scanning Conditions",
"Computer Aided Design Model",
"Photogrammetry",
"Structure From Motion Algorithm",
"Photographs",
"3 D Image Reconstruction",
"2 D Image Reconstruction",
"Automated 3 D Object Scanner Design",
"Three Dimensional Displays",
"Solid Modeling",
"Cameras",
"Surface Reconstruction",
"Feature Extraction",
"Image Reconstruction",
"Hardware",
"3 D Scanning",
"Photogrammetry",
"Structure From Motion",
"3 D Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Alaa Abd-Raheem",
"givenName": "Alaa",
"surname": "Abd-Raheem",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechatronics Engineering, The University of Jordan, Amman, Jordan",
"fullName": "Farah AlDeiri",
"givenName": "Farah",
"surname": "AlDeiri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechatronics Engineering, The University of Jordan, Amman, Jordan",
"fullName": "Musa Alyaman",
"givenName": "Musa",
"surname": "Alyaman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-0385-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08672682",
"articleId": "18Ipl21XPHi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08672680",
"articleId": "18IpiVKEklq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2016/2312/0/2312a428",
"title": "Online Detection Technique of 3D Defects for Steel Strips Based on Photometric Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a428/12OmNAio73T",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c326",
"title": "SurfaceNet: An End-to-End 3D Neural Network for Multiview Stereopsis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c326/12OmNB8TUfZ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a692",
"title": "3D Surface Extraction Using Incremental Tetrahedra Carving",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a692/12OmNC3XhgZ",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a055",
"title": "When Specular Object Meets RGB-D Camera 3D Scanning: Color Image Plus Fragmented Depth Map",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a055/12OmNyUWR8A",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcabes/2015/6593/0/6593a352",
"title": "Quick Capture and Reconstruction for 3D Head",
"doi": null,
"abstractUrl": "/proceedings-article/dcabes/2015/6593a352/12OmNyUnEKB",
"parentPublication": {
"id": "proceedings/dcabes/2015/6593/0",
"title": "2015 14th International Symposium on Distributed Computing and Applications for Business Engineering and Science (DCABES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/02/07833201",
"title": "Surface Reconstruction via Fusing Sparse-Sequence of Depth Images",
"doi": null,
"abstractUrl": "/journal/tg/2018/02/07833201/13rRUx0gezW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoias/2019/2662/0/266200a020",
"title": "An Accurate Phase Measuring Deflectometry Method for 3D Reconstruction of Mirror-Like Specular Surface",
"doi": null,
"abstractUrl": "/proceedings-article/icoias/2019/266200a020/1c8PadXkae4",
"parentPublication": {
"id": "proceedings/icoias/2019/2662/0",
"title": "2019 2nd International Conference on Intelligent Autonomous Systems (ICoIAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523840",
"title": "Mobile3DScanner: An Online 3D Scanner for High-quality Object Reconstruction with a Mobile Device",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523840/1wpqvrW88O4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900b542",
"title": "An Efficient 3D Synthetic Model Generation Pipeline for Human Pose Data Augmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900b542/1yJYl0Ho3QI",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a136",
"title": "MaCal - Macro Lens Calibration and the Focus Stack Camera Model",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a136/1zWEbfHVrnW",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1a3x4M4IIJa",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "1a3x6hGWsso",
"doi": "10.1109/ICVRV.2018.00009",
"title": "Keyframe-Based Texture Mapping for RGBD Human Reconstruction",
"normalizedTitle": "Keyframe-Based Texture Mapping for RGBD Human Reconstruction",
"abstract": "Realistic human model has a wide range of requirements in 3D content creation. A model with high-quality texture map can display human body surface details in low facets which could be toughly represented by geometric mesh. Image-based texture mapping suffers from discontinuities due to geometry inaccuracy, camera pose drifts, and illumination changes. In this paper, we propose a keyframe-based texture map generation method to obtain more desired texture mapping results. Our method firstly acquire the keyframes by performing a spatio-temporal sampling strategy, rather than just sampling keyframes according to time interval. Then, we apply an efficient patch-based optimization to the keyframes to make the texture data in different views alinged with each other. Finally, we generate a texture atlas from the aligned texture and the simplified mesh. Experimental results demonstrate that our method can get realistic human models with low facets and competitive details within short minutes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Realistic human model has a wide range of requirements in 3D content creation. A model with high-quality texture map can display human body surface details in low facets which could be toughly represented by geometric mesh. Image-based texture mapping suffers from discontinuities due to geometry inaccuracy, camera pose drifts, and illumination changes. In this paper, we propose a keyframe-based texture map generation method to obtain more desired texture mapping results. Our method firstly acquire the keyframes by performing a spatio-temporal sampling strategy, rather than just sampling keyframes according to time interval. Then, we apply an efficient patch-based optimization to the keyframes to make the texture data in different views alinged with each other. Finally, we generate a texture atlas from the aligned texture and the simplified mesh. Experimental results demonstrate that our method can get realistic human models with low facets and competitive details within short minutes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Realistic human model has a wide range of requirements in 3D content creation. A model with high-quality texture map can display human body surface details in low facets which could be toughly represented by geometric mesh. Image-based texture mapping suffers from discontinuities due to geometry inaccuracy, camera pose drifts, and illumination changes. In this paper, we propose a keyframe-based texture map generation method to obtain more desired texture mapping results. Our method firstly acquire the keyframes by performing a spatio-temporal sampling strategy, rather than just sampling keyframes according to time interval. Then, we apply an efficient patch-based optimization to the keyframes to make the texture data in different views alinged with each other. Finally, we generate a texture atlas from the aligned texture and the simplified mesh. Experimental results demonstrate that our method can get realistic human models with low facets and competitive details within short minutes.",
"fno": "849700a001",
"keywords": [
"Computational Geometry",
"Image Reconstruction",
"Image Sensors",
"Image Texture",
"RGBD Human Reconstruction",
"Realistic Human Model",
"3 D Content Creation",
"High Quality Texture Map",
"Human Body Surface Details",
"Geometric Mesh",
"Keyframe Based Texture Map Generation Method",
"Spatio Temporal Sampling Strategy",
"Texture Data",
"Texture Atlas",
"Aligned Texture",
"Patch Based Optimization",
"Image Reconstruction",
"Cameras",
"Optimization",
"Geometry",
"Solid Modeling",
"Image Color Analysis",
"Face",
"Texture Mapping",
"Patch Match",
"Alternating Optimization",
"Keyframe Selection"
],
"authors": [
{
"affiliation": null,
"fullName": "Yishu Heng",
"givenName": "Yishu",
"surname": "Heng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chen Wang",
"givenName": "Chen",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yue Qi",
"givenName": "Yue",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2018",
"issn": "2375-141X",
"isbn": "978-1-5386-8497-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "849700z016",
"articleId": "1a3x4VzInD2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "849700a008",
"articleId": "1a3x7jLsFPi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2014/4284/0/4284a180",
"title": "An Improved Texture Mapping Model Based on Mesh Parameterization in 3D Garments",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a180/12OmNxAlA30",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459378",
"title": "Superresolution texture maps for multiview reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459378/12OmNxuFBnH",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmip/2017/5954/0/5954a136",
"title": "High Quality Texture Mapping for Multi-view Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icmip/2017/5954a136/12OmNz4BdpT",
"parentPublication": {
"id": "proceedings/icmip/2017/5954/0",
"title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000e645",
"title": "Texture Mapping for 3D Reconstruction with RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000e645/17D45Wuc36V",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08708933",
"title": "UnstructuredFusion: Realtime 4D Geometry and Texture Reconstruction Using Commercial RGBD Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08708933/19Q3hT6JyUg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/06/09705143",
"title": "Adaptive Joint Optimization for 3D Reconstruction With Differentiable Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2023/06/09705143/1AIIcwNiqxq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800f949",
"title": "Joint Texture and Geometry Optimization for RGB-D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800f949/1m3ogA88vw4",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09374106",
"title": "DTexFusion: Dynamic Texture Fusion Using a Consumer RGBD Sensor",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09374106/1rPtmp3tBSM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h115",
"title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09645189",
"title": "Seamless Texture Optimization for RGB-D Reconstruction",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09645189/1zc6CdFskcU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysoMThT0Y",
"doi": "10.1109/ISMAR-Adjunct.2019.00-44",
"title": "Blended-Keyframes for Mobile Mediated Reality Applications",
"normalizedTitle": "Blended-Keyframes for Mobile Mediated Reality Applications",
"abstract": "With the recent developments of Mixed Reality (MR) devices and advances in 3D scene understanding, MR applications on mobile devices are becoming available to a large part of the society. These applications allow users to mix virtual content into the surrounding environment. However the ability to mediate (i.e., modify or alter) the surrounding environment remains a difficult and unsolved problem that limits the degree of immersion of current MR applications on mobile devices. In this paper, we present a method to mediate 2D views of a real environment using a single consumer-grade RGB-D camera and without the need of pre-scanning the scene. Our proposed method creates in real-time a dense and detailed keyframe-based 3D map of the real scene and takes advantage of a semantic instance segmentation to isolate target objects. We show that our proposed method allows to remove target objects in the environment and to replace them by their virtual counterpart, which are built on-the-fly. Such an approach is well suited for creating mobile Mediated Reality applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the recent developments of Mixed Reality (MR) devices and advances in 3D scene understanding, MR applications on mobile devices are becoming available to a large part of the society. These applications allow users to mix virtual content into the surrounding environment. However the ability to mediate (i.e., modify or alter) the surrounding environment remains a difficult and unsolved problem that limits the degree of immersion of current MR applications on mobile devices. In this paper, we present a method to mediate 2D views of a real environment using a single consumer-grade RGB-D camera and without the need of pre-scanning the scene. Our proposed method creates in real-time a dense and detailed keyframe-based 3D map of the real scene and takes advantage of a semantic instance segmentation to isolate target objects. We show that our proposed method allows to remove target objects in the environment and to replace them by their virtual counterpart, which are built on-the-fly. Such an approach is well suited for creating mobile Mediated Reality applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the recent developments of Mixed Reality (MR) devices and advances in 3D scene understanding, MR applications on mobile devices are becoming available to a large part of the society. These applications allow users to mix virtual content into the surrounding environment. However the ability to mediate (i.e., modify or alter) the surrounding environment remains a difficult and unsolved problem that limits the degree of immersion of current MR applications on mobile devices. In this paper, we present a method to mediate 2D views of a real environment using a single consumer-grade RGB-D camera and without the need of pre-scanning the scene. Our proposed method creates in real-time a dense and detailed keyframe-based 3D map of the real scene and takes advantage of a semantic instance segmentation to isolate target objects. We show that our proposed method allows to remove target objects in the environment and to replace them by their virtual counterpart, which are built on-the-fly. Such an approach is well suited for creating mobile Mediated Reality applications.",
"fno": "476500a211",
"keywords": [
"Cameras",
"Image Colour Analysis",
"Image Segmentation",
"Mobile Computing",
"Virtual Reality",
"Target Object Removal",
"Semantic Instance Segmentation",
"Virtual Content",
"3 D Scene Understanding",
"Mixed Reality Devices",
"Mobile Mediated Reality Applications",
"Dense Keyframe Based 3 D Map",
"Single Consumer Grade RGB D Camera",
"Mediate 2 D Views",
"Mobile Devices",
"MR Applications",
"Blended Keyframes",
"Three Dimensional Displays",
"Cameras",
"Image Reconstruction",
"Semantics",
"Image Segmentation",
"Virtual Reality",
"Solid Modeling",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Kyushu University / Dalian University of Technology",
"fullName": "Yu Xue",
"givenName": "Yu",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University",
"fullName": "Diego Thomas",
"givenName": "Diego",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Toulouse",
"fullName": "Frédéric Rayar",
"givenName": "Frédéric",
"surname": "Rayar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University",
"fullName": "Hideaki Uchiyama",
"givenName": "Hideaki",
"surname": "Uchiyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kyushu University",
"fullName": "Rin-ichiro Taniguchi",
"givenName": "Rin-ichiro",
"surname": "Taniguchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dalian University of Technology",
"fullName": "Boacai Yin",
"givenName": "Boacai",
"surname": "Yin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "211-216",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a205",
"articleId": "1gysmxeVup2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a217",
"articleId": "1gysl6bZquc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a037",
"title": "A Single Camera Image Based Approach for Glossy Reflections in Mixed Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a037/12OmNrJAdMm",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460081",
"title": "Mixed-reality snapshot system using environmental depth sensors",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460081/12OmNs0kyAh",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948512",
"title": "Diminished reality as challenging extension of mixed and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948512/12OmNy68EMC",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948517",
"title": "Collaboration in mediated and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a025",
"title": "Diminished Reality as Challenging Issue in Mixed and Augmented Reality (IWDR2015) Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a025/12OmNy6ZrYB",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759621",
"title": "An engine of virtual reality mixing environment based on real-time modeling and interaction",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759621/12OmNzb7Zpa",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2006/2671/0/04030822",
"title": "Integrating a Real-Time Captured Object into Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2006/04030822/14dcEdLb3x5",
"parentPublication": {
"id": "proceedings/cw/2006/2671/0",
"title": "2006 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a138",
"title": "Glossy Reflections for Mixed Reality Environments on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a138/17D45Wda7hc",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998303",
"title": "Physically-inspired Deep Light Estimation from a Homogeneous-Material Object for Mixed Reality Lighting",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998303/1hrXfo1lGb6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090568",
"title": "Real-time Illumination Estimation for Mixed Reality on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090568/1jIxuGbpWa4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.