2023
Sathaporn "Hubert" Hu; Derek Reilly
Comparative Glyph-Field Trajectory Analyses with an AR+Tablet Hybrid User Interface for Geospatial Analysis Tasks Proceedings Article
In: Normand, Jean-Marie; Sugimoto, Maki; Sundstedt, Veronica (Ed.): ICAT-EGVE 2023 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments, The Eurographics Association, 2023.
Links | BibTeX | Tags: ARTIV, immersive visualization, mobile
@inproceedings{Hu2023,
title = {Comparative Glyph-Field Trajectory Analyses with an AR+Tablet Hybrid User Interface for Geospatial Analysis Tasks},
author = {Sathaporn "Hubert" Hu and Derek Reilly},
editor = {Jean-Marie Normand and Maki Sugimoto and Veronica Sundstedt},
url = {https://diglib.eg.org:443/handle/10.2312/egve20231320},
doi = {10.2312/egve.20231320 },
year = {2023},
date = {2023-12-06},
urldate = {2023-12-06},
booktitle = {ICAT-EGVE 2023 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments},
publisher = {The Eurographics Association},
keywords = {ARTIV, immersive visualization, mobile},
pubstate = {published},
tppubtype = {inproceedings}
}
Majid Nasirinejad; Derek Reilly
mF+C: Linking Suboptimal Projections to Detail on Handheld Devices Journal Article
In: International Journal of Human Computer Studies (IJHCS), 2023, ISSN: 1071-5819.
Abstract | Links | BibTeX | Tags: mobile, projection, ubiquitous computing, visualization
@article{Nasirinejad2023,
title = {mF+C: Linking Suboptimal Projections to Detail on Handheld Devices},
author = {Majid Nasirinejad and Derek Reilly},
doi = {https://doi.org/10.1016/j.ijhcs.2023.103170.},
issn = {1071-5819},
year = {2023},
date = {2023-10-14},
journal = {International Journal of Human Computer Studies (IJHCS)},
abstract = {Mobile Focus + Context (mF+C) involves using a handheld device as a focus screen for content on an immersive display or mobile projector. In this work we examine how using a focus device can mitigate poor context image quality due to environmental factors. In an exploratory study we compare three techniques for linking focus and context: lens-focus (Lens), where the device works as a mobile lens held parallel to and in front of the context, center-focus (Centered), where the user holds the device in the center of the projection and pans both context and focus by swiping, and marker-focus (Marker), where the focus region is highlighted on the context, and the user pans the focus by swiping. Participants performed searching, tracing, and detail acquisition tasks with maps and electronics diagrams under a range of simulated projection conditions. All techniques were able to mitigate poor projection quality and performed comparably in time and precision, but the effectiveness of each technique was impacted by task type. There was lower variation in time between participants using Lens for tracing tasks versus the other techniques, but wider variation for searching tasks. Tasks completed using sub-optimal projections involved more time spent looking at the context image than tasks with clear projections, however this difference is less pronounced for the Lens technique. We propose a hybrid Lens-Marker approach for mobile Focus+Context applications in dynamic environments.},
keywords = {mobile, projection, ubiquitous computing, visualization},
pubstate = {published},
tppubtype = {article}
}
2022
Juliano Franz; Derek Reilly
Ride With Me: Exploring Group Road Cycling Through Contextual Design Proceedings Article
In: Proceedings of ACM SIGCHI Conference on Designing Interactive Systems (DIS 2022), ACM, 2022.
BibTeX | Tags: annotation, collaborative systems, cycling, design, ethnography, mobile, visualization
@inproceedings{FranzDIS2022,
title = {Ride With Me: Exploring Group Road Cycling Through Contextual Design},
author = {Juliano Franz and Derek Reilly},
year = {2022},
date = {2022-06-20},
urldate = {2022-06-20},
booktitle = {Proceedings of ACM SIGCHI Conference on Designing Interactive Systems (DIS 2022)},
publisher = {ACM},
keywords = {annotation, collaborative systems, cycling, design, ethnography, mobile, visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Fatima Alshammari; Khalid Tearo; Aaqib Mohammed; Rita Orji; Kirstie Hawkey; Derek Reilly
MAR: A Study of the Impact of Positive and Negative Reinforcement on Medication Adherence Reminders Proceedings Article
In: Proceedings of SeGAH 2020 , IEEE, 2020.
BibTeX | Tags: gamification, health, mobile, persuasive technology
@inproceedings{AlShammari2020,
title = {MAR: A Study of the Impact of Positive and Negative Reinforcement on Medication Adherence Reminders},
author = {Fatima Alshammari and Khalid Tearo and Aaqib Mohammed and Rita Orji and Kirstie Hawkey and Derek Reilly},
year = {2020},
date = {2020-08-12},
booktitle = {Proceedings of SeGAH 2020 },
publisher = {IEEE},
keywords = {gamification, health, mobile, persuasive technology},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Felwah Alqahtani; Derek Reilly
It’s the Gesture That (re)Counts: Annotating While Running to Recall Affective Experience Conference
Proceedings of Graphics Interface (GI 2018), Toronto, Canada, 2018.
Abstract | Links | BibTeX | Tags: embodied interaction, emotional recall, gesture, mobile, running, spatial annotation, visualization
@conference{Alqahtani2018,
title = {It’s the Gesture That (re)Counts: Annotating While Running to Recall Affective Experience},
author = {Felwah Alqahtani and Derek Reilly},
doi = {10.20380/GI2018.12},
year = {2018},
date = {2018-05-08},
booktitle = {Proceedings of Graphics Interface (GI 2018)},
address = {Toronto, Canada},
abstract = {We present results from a study exploring whether gestural annotations of felt emotion presented on a map-based visualization can support recall of affective experience during recreational runs. We compare gestural annotations with audio and video notes and a “mental note” baseline. In our study, 20 runners were asked to record their emotional state at regular intervals while running a familiar route. Each runner used one of the four methods to capture emotion over four separate runs. Five days after the last run, runners used an interactive map-based visualization to review and recall their running experiences. Results indicate that gestural annotation promoted recall of affective experience more effectively than the baseline condition, as measured by confidence in recall and detail provided. Gestural annotation was also comparable to video and audio annotation in terms of recollection confidence and detail. Audio annotation supported recall primarily through the runner's spoken annotation, but sound in the background was sometimes used. Video annotation yielded the most detail, much directly related to visual cues in the video, however using video annotations required runners to stop during their runs. Given these results we propose that background logging of ambient sounds and video may supplement gestural annotation.},
keywords = {embodied interaction, emotional recall, gesture, mobile, running, spatial annotation, visualization},
pubstate = {published},
tppubtype = {conference}
}
2017
Joseph Malloch; Carla Griggio; Joanna McGrenere; Wendy E. Mackay
Fieldward and Pathward: Dynamic Guides for Defining your own Gestures Conference
Proceedings of the ACM SIGCHI Conference on Human Factors in Computing Systems (CHI 2017), ACM Denver, USA, 2017.
Abstract | Links | BibTeX | Tags: dynamic guides, feedforward, gesture, mobile, progressive feedforward
@conference{Malloch2017_CHI,
title = {Fieldward and Pathward: Dynamic Guides for Defining your own Gestures},
author = {Joseph Malloch and Carla Griggio and Joanna McGrenere and Wendy E. Mackay},
doi = {10.1145/3025453.3025764},
year = {2017},
date = {2017-05-06},
booktitle = {Proceedings of the ACM SIGCHI Conference on Human Factors in Computing Systems (CHI 2017)},
address = {Denver, USA},
organization = {ACM},
abstract = {Although users accomplish ever more tasks on touch-enabled mobile devices, gesture-based interaction remains limited and almost never customizable by users. Our goal is to help users create gestures that are both personally memorable and reliably recognized by a touch-enabled mobile device. We address these competing requirements with two dynamic guides that use progressive feedforward to interactively visualize the "negative space" of unused gestures: the Pathward technique suggests four possible completions to the current gesture, and the Fieldward technique uses color gradients to reveal optimal directions for creating recognizable gestures. We ran a two-part experiment in which 27 participants each created 42 personal gesture shortcuts on a smartphone, using Pathward, Fieldward or No Feedforward. The Fieldward technique best supported the most common user strategy, i.e. to create a memorable gesture first and then adapt it to be recognized by the system. Users preferred the Fieldward technique to Pathward or No Feedforward, and remembered gestures more easily when using the technique. Dynamic guides can help developers design novel gesture vocabularies and support users as they design custom gestures for mobile applications.},
keywords = {dynamic guides, feedforward, gesture, mobile, progressive feedforward},
pubstate = {published},
tppubtype = {conference}
}