2024
Sathaporn "Hubert" Hu; Muhammad Raza; Derek Reilly
Gander: Preliminary Design and Evaluation of an AR + Tablet System for Geospatial Analysis Proceedings Article Forthcoming
In: IEEE Symposium on Mixed and Augmented Reality (ISMAR 2024) Adjunct Proceedings (MASK 2024), Forthcoming.
BibTeX | Tags: AR, ARTIV, augmented reality, design, geospatial analytics, immersive visualization, visualization
@inproceedings{Hu2024b,
title = {Gander: Preliminary Design and Evaluation of an AR + Tablet System for Geospatial Analysis},
author = {Sathaporn "Hubert" Hu and Muhammad Raza and Derek Reilly},
year = {2024},
date = {2024-10-22},
urldate = {2024-10-22},
booktitle = {IEEE Symposium on Mixed and Augmented Reality (ISMAR 2024) Adjunct Proceedings (MASK 2024)},
keywords = {AR, ARTIV, augmented reality, design, geospatial analytics, immersive visualization, visualization},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Seyed Adel Ghaeinian
Design and Implementation of an Interactive Visual Querying System for Maritime Data Masters Thesis
Dalhousie University, 2024.
Abstract | BibTeX | Tags: AMNIS, tabletop displays, visualization
@mastersthesis{Ghaeinian2024,
title = {Design and Implementation of an Interactive Visual Querying System for Maritime Data},
author = {Seyed Adel Ghaeinian},
year = {2024},
date = {2024-06-10},
school = {Dalhousie University},
abstract = {Automatic Identification System (AIS) data is a crucial foundation of maritime operations such as navigation, traffic management, and safety monitoring. This requires robust management, visualization, and interactive visual analytics systems for the operators. Since usability, effectiveness, and accuracy are key factors in such maritime operations, it is important to design, implement, and evaluate these tools meticulously and incorporate the latest advancements in user interaction and analytics. This research explores the design and implementation of a novel system architecture for maritime data querying and exploration, enabling enhanced user interactions through direct-manipulation techniques. This system architecture provides a collaborative environment that incorporates Mixed Reality (MR), a touchable table-top interface, as well as iterative design and evaluation of a graph-based Visual Query Builder (VQB). The aim of developing the VQB interface is to allow non-experts to explore and query maritime data without the need of having technical skills while enhancing the decision-making process and spatial awareness of the maritime operations. This research further evaluates the VQB interface in terms of efficiency, accuracy, and user preferences by conducting a user study. For this study, we developed a baseline web-based textual interface for SPARQL queries, enhanced with auto-correction features and additional spatial querying capabilities, to effectively measure and fairly compare the performance of the VQB interface. 20 students from the faculty of computer science participated in this study without prior knowledge of RDF data querying and SPARQL language. Overall, the results of this study were promising as it showed a higher efficiency and accuracy rate as well as less perceived workload among participants in the VQB interface compared to the baseline. These findings highlight the role of visual query interfaces in improving the user experience as well as elevating efficiency, especially for non-experts, allowing them to explore and query maritime data without the need to learn technical skills.},
keywords = {AMNIS, tabletop displays, visualization},
pubstate = {published},
tppubtype = {mastersthesis}
}
2023
Majid Nasirinejad; Derek Reilly
mF+C: Linking Suboptimal Projections to Detail on Handheld Devices Journal Article
In: International Journal of Human Computer Studies (IJHCS), 2023, ISSN: 1071-5819.
Abstract | Links | BibTeX | Tags: mobile, projection, ubiquitous computing, visualization
@article{Nasirinejad2023,
title = {mF+C: Linking Suboptimal Projections to Detail on Handheld Devices},
author = {Majid Nasirinejad and Derek Reilly},
doi = {https://doi.org/10.1016/j.ijhcs.2023.103170.},
issn = {1071-5819},
year = {2023},
date = {2023-10-14},
journal = {International Journal of Human Computer Studies (IJHCS)},
abstract = {Mobile Focus + Context (mF+C) involves using a handheld device as a focus screen for content on an immersive display or mobile projector. In this work we examine how using a focus device can mitigate poor context image quality due to environmental factors. In an exploratory study we compare three techniques for linking focus and context: lens-focus (Lens), where the device works as a mobile lens held parallel to and in front of the context, center-focus (Centered), where the user holds the device in the center of the projection and pans both context and focus by swiping, and marker-focus (Marker), where the focus region is highlighted on the context, and the user pans the focus by swiping. Participants performed searching, tracing, and detail acquisition tasks with maps and electronics diagrams under a range of simulated projection conditions. All techniques were able to mitigate poor projection quality and performed comparably in time and precision, but the effectiveness of each technique was impacted by task type. There was lower variation in time between participants using Lens for tracing tasks versus the other techniques, but wider variation for searching tasks. Tasks completed using sub-optimal projections involved more time spent looking at the context image than tasks with clear projections, however this difference is less pronounced for the Lens technique. We propose a hybrid Lens-Marker approach for mobile Focus+Context applications in dynamic environments.},
keywords = {mobile, projection, ubiquitous computing, visualization},
pubstate = {published},
tppubtype = {article}
}
2022
Juliano Franz; Derek Reilly
Ride With Me: Exploring Group Road Cycling Through Contextual Design Proceedings Article
In: Proceedings of ACM SIGCHI Conference on Designing Interactive Systems (DIS 2022), ACM, 2022.
BibTeX | Tags: annotation, collaborative systems, cycling, design, ethnography, mobile, visualization
@inproceedings{FranzDIS2022,
title = {Ride With Me: Exploring Group Road Cycling Through Contextual Design},
author = {Juliano Franz and Derek Reilly},
year = {2022},
date = {2022-06-20},
urldate = {2022-06-20},
booktitle = {Proceedings of ACM SIGCHI Conference on Designing Interactive Systems (DIS 2022)},
publisher = {ACM},
keywords = {annotation, collaborative systems, cycling, design, ethnography, mobile, visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Felwah Alqahtani; Derek Reilly
It’s the Gesture That (re)Counts: Annotating While Running to Recall Affective Experience Conference
Proceedings of Graphics Interface (GI 2018), Toronto, Canada, 2018.
Abstract | Links | BibTeX | Tags: embodied interaction, emotional recall, gesture, mobile, running, spatial annotation, visualization
@conference{Alqahtani2018,
title = {It’s the Gesture That (re)Counts: Annotating While Running to Recall Affective Experience},
author = {Felwah Alqahtani and Derek Reilly},
doi = {10.20380/GI2018.12},
year = {2018},
date = {2018-05-08},
booktitle = {Proceedings of Graphics Interface (GI 2018)},
address = {Toronto, Canada},
abstract = {We present results from a study exploring whether gestural annotations of felt emotion presented on a map-based visualization can support recall of affective experience during recreational runs. We compare gestural annotations with audio and video notes and a “mental note” baseline. In our study, 20 runners were asked to record their emotional state at regular intervals while running a familiar route. Each runner used one of the four methods to capture emotion over four separate runs. Five days after the last run, runners used an interactive map-based visualization to review and recall their running experiences. Results indicate that gestural annotation promoted recall of affective experience more effectively than the baseline condition, as measured by confidence in recall and detail provided. Gestural annotation was also comparable to video and audio annotation in terms of recollection confidence and detail. Audio annotation supported recall primarily through the runner's spoken annotation, but sound in the background was sometimes used. Video annotation yielded the most detail, much directly related to visual cues in the video, however using video annotations required runners to stop during their runs. Given these results we propose that background logging of ambient sounds and video may supplement gestural annotation.},
keywords = {embodied interaction, emotional recall, gesture, mobile, running, spatial annotation, visualization},
pubstate = {published},
tppubtype = {conference}
}
2016
Nabil Bin Hannan; Felwah Alqahtani; Derek Reilly
JogChalking: Capturing and Visualizing Affective Experience for Recreational Runners Conference
Proceedings of the 2016 ACM Conference Companion Publication on Designing Interactive Systems (DIS 2016), Brisbane, Australia, 2016.
Links | BibTeX | Tags: design, emotional recall, gesture, mobile annotation, running, visualization
@conference{BinHannan2016,
title = {JogChalking: Capturing and Visualizing Affective Experience for Recreational Runners},
author = {Nabil Bin Hannan and Felwah Alqahtani and Derek Reilly},
doi = {10.1145/2908805.2909406},
year = {2016},
date = {2016-06-06},
booktitle = {Proceedings of the 2016 ACM Conference Companion Publication on Designing Interactive Systems (DIS 2016)},
address = {Brisbane, Australia},
keywords = {design, emotional recall, gesture, mobile annotation, running, visualization},
pubstate = {published},
tppubtype = {conference}
}