skip to main content

Sketch Recognition Lab
Director: Dr. Tracy Anne Hammond

SRL Publications


 

Publications of the Sketch Recognition Lab

Looking for PhD Dissertations, MS Theses, and UG Honors Theses?


2017 
PublicationImage
 Alamudun, Folami; Yoon, Hong-Jun; Hudson, Kathleen B; Hammond, Tracy; Tourassi, Georgia D. Geometry and Gesture-based Features from Saccadic Eye-Movement as a Biometric in Radiology. 19th International Conference on Human-Computer Interaction. Number accepted, Vancouver. July, 2017. Link
Show Abstract:

Show BibTex
 
2017 
PublicationImage
 Alamudun, Folami; Yoon, Hong-Jun; Hudson, Kathleen B; Morin-Ducote, Garnetta; Hammond, Tracy; Tourassi, Georgia D. Fractal Analysis of Visual Search Activity for Mass Detection During Mammographic Screening. Medical Physics . Number accepted, Wiley, 2017. Link
Show Abstract:

Show BibTex
@article{alamudun2017MedPhy,
  title={Fractal Analysis of Visual Search Activity for Mass Detection During Mammographic Screening},
  author={Alamudun, Folami and Yoon, Hong-Jun and Hudson, Kathleen B and Morin-Ducote, Garnetta and Hammond, Tracy and Tourassi, Georgia D},
  journal={Medical Physics},
  volume={},
  pages={},
  year={2017},
  publisher={Wiley}
}
2017 
PublicationImage
 Jaideep, Ray; Polsley, Seth; Hammond, Tracy. SketchSeeker : Finding Similar Sketches. IEEE Transactions on Human-Machine Systems. Number accepted, 2017. Link
Show Abstract:

Show BibTex
 
2017 
PublicationImage
 Josh Cherian; Vijay Rajanna; Daniel Goldberg; Tracy Hammond. Did you Remember To Brush? : A Noninvasive Wearable Approach to Recognizing Brushing Teeth for Elderly Care. 11th EAI International Conference on Pervasive Computing Technologies for Healthcare. Number accepted, Barcelona. ACM, New York, USA, May 23-26, 2017. Link
Show Abstract:
Previous studies have shown that failing to regularly brush one's teeth can have a surprisingly serious health consequences, from periodontal disease to coronary heart disease to pancreatic cancer. This problem is especially worrying when caring for the elderly and/or individuals with dementia, as they often forget to or are unable to perform standard health activities such as brushing their teeth. To ensure that such individuals are correctly looked after they are placed under the supervision of caretakers or family members, simultaneously limiting their independence and placing an immense burden on their family members or caretakers. To address this problem we developed a non-invasive wearable system based on a single wrist-mounted accelerometer to accurately identify when a person brushed their teeth. We tested the efficacy of our system with a month-long in-the-wild study, and achieved an accuracy of 94% and an F-measure of 0.82.

Show BibTex
@inproceedings{Cherian2017pervasive,
 author = {Cherian, Josh and Rajanna, Vijay and Goldberg, Daniel and Hammond, Tracy},
 title = {Did you Remember To Brush? : A Noninvasive Wearable Approach to Recognizing Brushing Teeth for Elderly Care},
 booktitle = {11th EAI International Conference on Pervasive Computing Technologies for Healthcare},
 series = {ICDC},
 year = {2017}, 
 isbn = {},
 location = {New York, USA},
 pages = {}
}
2017 
PublicationImage
 Vijay Rajanna; Paul Taele; Seth Polsley; Tracy Hammond. A Gaze Gesture-Based User Authentication System to Counter Shoulder-Surfing Attacks. Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI '17).. Number accepted, Denver, CO. ACM, New York, USA, May 6-11, 2017. Link
Show Abstract:
Shoulder-surfing is the act of spying on an authorized user of a computer system with the malicious intent of gaining unauthorized access. Current solutions to address shoulder-surfing such as graphical passwords, gaze input, tactile interfaces, and so on are limited by low accuracy, lack of precise gaze-input, and susceptibility to video analysis attack. We present an intelligent gaze gesture-based system that authenticates users from their unique gaze patterns onto moving geometric shapes. The system authenticates the user by comparing their scan-path with each shapes' paths and recognizing the closest path. In a study with 15 users, authentication accuracy was found to be 99% with true calibration and 96% with disturbed calibration. Also, our system is 40% less susceptible and nearly nine times more time-consuming to video analysis attacks compared to a gaze- and PIN-based authentication system.

Show BibTex
@inproceedings{rajanna2017CHI, 
author = {Rajanna, Vijay and Taele, Paul and Polsley, Seth and Hammond, Tracy}, 
title = {A Gaze Gesture-Based User Authentication System to Counter Shoulder-Surfing Attacks}, 
booktitle = {Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems}, 
series = {CHI EA '17}, 
year = {2017}, 
isbn = {978-1-4503-4656-6/17/05}, 
location = {Denver, Colorado, USA}, 
url = {http://doi.acm.org/10.1145/3027063.3053070}, 
doi = {10.1145/3027063.3053070}, 
acmid = {3053070}, 
publisher = {ACM}, 
address = {New York, NY, USA}, 
 } 
2016 
PublicationImage
 Brooks, Randy; Hammond, Tracy; Koh, Jung-In. Score Improvement Distribution When Using Sketch Recognition Software (Mechanix) as a Tutor: Assessment of High School Classroom Pilot . 10th Conference on Pen and Touch Technology in Education. CPTTE 2016. Brown University, Providence, RI. March 31, 2016. Link
Show Abstract:

Show BibTex
@inproceedings{Brooks2016CPTTE,
 author = {Brooks, Randy and Hammond, Tracy and Koh, Jung-In},
 title = {Score Improvement Distribution When Using Sketch Recognition Software (Mechanix) as a Tutor: Assessment of High School Classroom Pilot},
 booktitle = {10th Conference on Pen and Touch Technology in Education. CPTTE 2016},
 series = {CPTTE},
 year = {2016}, 
 isbn = {},
 location = {Brown University},
 pages = {2}
}
2016 
PublicationImage
 Folami Alamudun, Hong-Jun Yoon, Tracy Hammond, Kathy Hudson, Garnetta Morin-Ducote, Georgia Tourassi. Shapelet analysis of pupil dilation for modeling visuo-cognitive behavior in screening mammography. Proc. SPIE 9787, Medical Imaging 2016: Image Perception, Observer Performance, and Technology Assessment, 97870M (March 24, 2016). Volume 9787, San Diego, CA. SPIE, February 27, 2016. Link
Show Abstract:
Our objective is to improve understanding of visuo-cognitive behavior in screening mammography under clinically equivalent experimental conditions. To this end, we examined pupillometric data, acquired using a head-mounted eye-tracking device, from 10 image readers (three breast-imaging radiologists and seven Radiology residents), and their corresponding diagnostic decisions for 100 screening mammograms. The corpus of mammograms comprised cases of varied pathology and breast parenchymal density. We investigated the relationship between pupillometric fluctuations, experienced by an image reader during mammographic screening, indicative of changes in mental workload, the pathological characteristics of a mammographic case, and the image readers’ diagnostic decision and overall task performance. To answer these questions, we extract features from pupillometric data, and additionally applied time series shapelet analysis to extract discriminative patterns in changes in pupil dilation. Our results show that pupillometric measures are adequate predictors of mammographic case pathology, and image readers’ diagnostic decision and performance with an average accuracy of 80%.

Show BibTex
@inproceedings{FolamiSPIE2016,
 author = {Alamudun, Folami and Yoon, Hong-Jun and Hammond, Tracy and Hudson, Kathy and Morin-Ducote, 
   Garnetta and Tourassi, Georgia},
 title = { Shapelet analysis of pupil dilation for modeling visuo-cognitive behavior in screening mammography },
 booktitle = {Proc. SPIE},
 series = {SPIE},
 year = {2016},
 volume = {9787},
 location = {San Diego, CA},
 doi = {10.1117/12.2217670},
 pages = {97870M-97870M-13},
 URL = { http://dx.doi.org/10.1117/12.2217670},
 eprint = {}
}
2016 
PublicationImage
 Hammond, Tracy; Valentine, Stephanie; Adler, Aaron; Payton, Mark. Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Human-Computer Interaction Series. pp. 350 pages. Springer, April , 2016. Link
Show Abstract:
Derived from contributions to the Workshop on Pen and Touch Technology on Education (WIPTTE) in 2015, this edited volume highlights recent developments for pen and tablet research within the education system with a particular focus on hardware and software developments, comprising the perspectives of teachers, school and university administrators, and researchers for educators at every level. Split into six distinct parts, the book explores topics like how classrooms are increasingly using sketch-based videos, created by teachers and students alike, and how the teaching of key skills such as literacy, languages, math, and art via pen and touch technologies within the classroom are leading to improvements in engagement, learning, and retention levels amongst students. Future perspectives of digital learning, as envisioned by current high school students, are also explored.

Show BibTex
@book{Hammond:2016,
 author = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron},
 title = {Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education},
 year = {2016},
 isbn = {9783319311913},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated},
} 
2016 
PublicationImage
 Hilton, Ethan C.; WilliFord, Blake; Li, W.; McTigue, Erin; Hammond, Tracy; Linsey, Julie . Consistently evaluating sketching ability in engineering curriculum. 4th ICDC, International Conference on Design and creativity. Atlanta, GA. November 4, 2016. Link
Show Abstract:
Numerous studies have found sketching to be a useful skill for engineers. Sketching has been found to improve spatial visualization skills and help increase creativity in the design process. Therefore, in recent semesters, there has been a push to further develop the sketching instruction at Georgia Tech. This development has included introducing different methods of sketching, such as perspective sketching, and introducing new tools, such as sketch-based online tutoring applications. However, a consistent, trusted method to accurately evaluate students’ sketching ability does not yet exist. This study outlines the first steps taken to create a rubric that can be used to create consistent evaluations of students’ sketching abilities. A reliable and valid rubric will allow for evaluation of different methods of sketching education as well as to help to determine the links between sketching ability and other skills such as design reasoning, creativity in idea generation, and self-efficacy.

Show BibTex
@inproceedings{Hilton2016icdc,
 author = {Hilton, Ethan C. and WilliFord, Blake and Li, W. and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 title = {Consistently evaluating sketching ability in engineering curriculum},
 booktitle = {4th ICDC, International Conference on Design and creativity},
 series = {ICDC},
 year = {2016}, 
 isbn = {},
 location = {Atlanta, GA},
 pages = {9}
}
2016 
PublicationImage
 Kaul, Purnendu; Rajanna, Vijay; Hammond, Tracy. Exploring Users' Perceived Activities in a Sketch-based Intelligent Tutoring System Through Eye Movement Data. ACM Symposium on Applied Perception (SAP '16). Anaheim, CA. ACM, July 22-23, 2016. Link
Show Abstract:

Show BibTex
@inproceedings{Purnendu2016SAP,
 author = {Kaul, Purnendu and Rajanna, Vijay and Hammond, Tracy},
 title = {Exploring Users' Perceived Activities in a Sketch-based Intelligent Tutoring System Through Eye Movement Data},
 booktitle = {ACM Symposium on Applied Perception (SAP '16)},
 series = {SAP},
 year = {2016}, 
 isbn = {},
 location = {Anaheim, CA},
 pages = {1}
}
2016 
PublicationImage
 Kim, Hong-Hoe; Taele, Paul; Seo, Jinsil; Jeffrey, Liew; Hammond, Tracy. A Novel Sketch-Based Interface for Improving Children’s Fine Motor Skills and School Readiness. Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling. Lisbon. SBIM, May, 2016. Link
Show Abstract:
Children’s fine motor skills are associated with enhanced drawing skills, as well as improved creativity, self-regulation skills, and school readiness. Assessing these skills enables parents and teachers to target areas of improvement for their children, so that they are better prepared for learning and achieving once they enter school. Conventional approaches rely on psychology-based tracing and drawing tasks using pencil-and-paper and performance metrics such as timing and accuracies. However, such approaches involve human experts to manually score children’s drawings and evaluate their fine motor skills, which is both time consuming and prone to human error or bias. This paper introduces our novel sketch-based educational interface, which can classify children’s fine motor skills more accurately than conventional methods by automatically classifying fine motor skills through sketch recognition techniques. The interface (1) employs a fine motor skill classifier, which decides children’s fine motor skills based on their drawing skills and (2) includes a pedagogical system that assists children to draw basic shapes such as alphabet letters or numbers based on developmental level and learning progress, and provides teachers and parents within formation on the maturity of the children’s fine motor skills that correspond to their school readiness. We evaluated both our interface and “star drawing test" with 70 children (3-8 years), and found that our interface determined children’s fine motor skills more accurately than the conventional approach. In addition to the fine motor skill assessment, our interface served as an educational tool that benefited children in teaching them how to draw, practice, and improve their drawing skills.

Show BibTex
@inproceedings{Kim2016SBIM,
 author = {Kim, Hong-Hoe and Taele, Paul and Seo, Jinsil and Jeffrey, Liew and Hammond, Tracy},
 title = {A Novel Sketch-Based Interface for Improving Children’s Fine Motor Skills and School Readiness},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
  series = {SAP},
 year = {2016},
 location = {Lisbon, Portugal},
 pages = {1-10}
}
2016 
PublicationImage
 Lara-Garduno, Raniero; Leslie, Nancy; Hammond, Tracy . SmartStrokes: Digitizing Paper-Based Neuropsychological Tests. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number 3, pp. 163-175. New York, NY. Springer, 2016. Link
Show Abstract:
Clinical neuropsychologists develop comprehensive behavioral profiles on their patients primarily by using paper-and-pencil test stimuli. Despite these tests being significantly cheaper and faster than complex procedures such as MRI scans, multiple drawbacks remain. Constructing these behavioral profiles can take upwards of six hours to fully complete, and the analysis of the sketches from these pencil-and-paper tests is still largely subjective and qualitative. We developed SmartStrokes, a testing suite that implements digital versions of common clinical neuropsychology pencil-and-paper tests, with the purpose of helping to automate and analyze patient sketches using the principles of sketch recognition.

Show BibTex
@incollection{LaraGarduno2016,
author={Lara-Garduno, Raniero
and Leslie, Nancy
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={SmartStrokes: Digitizing Paper-Based Neuropsychological Tests},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={163--175},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_11},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_11}
}
2016 
PublicationImage
 Polsley, Seth; Ray, Jaideep; Nelligan, Trevor; Helms, Michael; Linsey, Julie; Hammond, Tracy. Leveraging Trends in Student Interaction to Enhance the Effectiveness of Sketch-Based Educational Software. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number 3, pp. 103-114. New York, NY. Springer, 2016. Link
Show Abstract:
With the rapid adoption of software-based learning in classrooms, it is increasingly important to design more intelligent educational software, a goal of the emerging field of educational data mining. In this work, we analyze student activities from using a learning tool for engineers, Mechanix, in order to find trends that may be used to make the software a better tutor, combining its natural, sketch-based input with intelligent, experience-based feedback. We see a significant correlation between student performance and the amount of time they work on a problem before submitting; students who attempt to “game” the system by submitting their results too often perform worse than those who work longer (p< 0.05). We also found significance in the number of times a student attempted a problem before moving on, with a strong correlation between being willing to switch among problems and better performance (p< 0.05). Overall, we find that student trends like these could be paired with machine learning techniques to make more intelligent educational tools.

Show BibTex
@incollection{Polsley2016,
author={Polsley, Seth
and Ray, Jaideep
and Nelligan, Trevor
and Helms, Michael
and Linsey, Julie
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={Leveraging Trends in Student Interaction to Enhance the Effectiveness of Sketch-Based Educational Software},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={103--114},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_7},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_7}
}
2016 
PublicationImage
 Rajanna, Vijay; Hammond, Tracy . GAWSCHI: Gaze-Augmented, Wearable-Supplemented Computer-Human Interaction. Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA '16). Charleston, South Carolina. ACM, March 14-16, 2016. Link
Show Abstract:
Recent developments in eye tracking technology are paving the way for gaze-driven interaction as the primary interaction modality. Despite successful efforts, existing solutions to the "Midas Touch" problem have two inherent issues: 1) lower accuracy, and 2) visual fatigue that are yet to be addressed. In this work we present GAWSCHI: a Gaze-Augmented, Wearable-Supplemented Computer-Human Interaction framework that enables accurate and quick gaze-driven interactions, while being completely immersive and hands-free. GAWSCHI uses an eye tracker and a wearable device (quasi-mouse) that is operated with the user's foot, specifically the big toe. The system was evaluated with a comparative user study involving 30 participants, with each participant performing eleven predefined interaction tasks (on MS Windows 10) using both mouse and gaze-driven interactions. We found that gaze-driven interaction using GAWSCHI is as good (time and precision) as mouse-based interaction as long as the dimensions of the interface element are above a threshold (0.60" x 0.51"). In addition, an analysis of NASA Task Load Index post-study survey showed that the participants experienced low mental, physical, and temporal demand; also achieved a high performance. We foresee GAWSCHI as the primary interaction modality for the physically challenged and a means of enriched interaction modality for the able-bodied demographics.

Show BibTex
@inproceedings{RajannaETRA2016,
 author = {Rajanna, Vijay and Hammond, Tracy},
 title = {GAWSCHI: Gaze-augmented, Wearable-supplemented Computer-human Interaction},
 booktitle = {Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research \& Applications},
 series = {ETRA '16},
 year = {2016},
 isbn = {978-1-4503-4125-7},
 location = {Charleston, South Carolina},
 pages = {233--236},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2857491.2857499},
 doi = {10.1145/2857491.2857499},
 acmid = {2857499},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {eye tracking, foot-operated device, gaze interaction, midas touch, multi-modal interaction, 
   quasi-mouse, wearable devices},
}
2016 
PublicationImage
 Rajanna, Vijay; Hammond, Tracy . Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality. Proceedings of the 21st international conference on Intelligent User Interfaces (IUI '16). Doctoral Consortium.. Sonoma, California. ACM, March 7-10, 2016. Link
Show Abstract:
Transforming gaze input into a rich and assistive interaction modality is one of the primary interests in eye tracking research. Gaze input in conjunction with traditional solutions to the "Midas Touch" problem, dwell time or a blink, is not matured enough to be widely adopted. In this regard, we present our preliminary work, a framework that achieves precise "point and click" interactions in a desktop environment through combining the gaze and foot interaction modalities. The framework comprises of an eye tracker and a foot-operated quasi-mouse that is wearable. The system evaluation shows that our gaze and foot interaction framework performs as good as a mouse (time and precision) in the majority of tasks. Furthermore, this dissertation work focuses on the goal of realizing gaze-assisted interaction as a primary interaction modality to substitute conventional mouse and keyboard-based interaction methods. In addition, we consider some of the challenges that need to be addressed, and also present the possible solutions toward achieving our goal.

Show BibTex
@inproceedings{RajannaIUI2016,
 author = {Rajanna, Vijay Dandur},
 title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
 booktitle = {Companion Publication of the 21st International Conference on Intelligent User Interfaces},
 series = {IUI '16 Companion},
 year = {2016},
 isbn = {978-1-4503-4140-0},
 location = {Sonoma, California, USA},
 pages = {126--129},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2876456.2876462},
 doi = {10.1145/2876456.2876462},
 acmid = {2876462},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {authentication, eye tracking, foot input, gaze and foot interaction, tabletop interaction},
} 
2016 
PublicationImage
 Rajanna, Vijay; Hammond, Tracy. Gaze Typing Through Foot-Operated Wearable Device. the 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16). Reno, Nevada. ACM, October 24-26, 2016. Link
Show Abstract:
Gaze Typing, a gaze-assisted text entry method, allows individuals with motor (arm, spine) impairments to enter text on a computer using a virtual keyboard and their gaze. Though gaze typing is widely accepted, this method is limited by its lower typing speed, higher error rate, and the resulting visual fatigue, since dwell-based key selection is used. In this research, we present a gaze-assisted, wearable- supplemented, foot interaction framework for dwell-free gaze typing. The framework consists of a custom-built virtual keyboard, an eye tracker, and a wearable device attached to the user’s foot. To enter a character, the user looks at the character and selects it by pressing the pressure pad, attached to the wearable device, with the foot. Results from a preliminary user study involving two participants with motor impairments show that the participants achieved a mean gaze typing speed of 6.23 Words Per Minute (WPM). In addition, the mean value of Key Strokes Per Character (KPSC) was 1.07 (ideal 1.0), and the mean value of Rate of Backspace Activation (RBA) was 0.07 (ideal 0.0). Furthermore, we present our findings from multiple usability studies and design iterations, through which we created appropriate affordances and experience design of our gaze typing system.

Show BibTex
@inproceedings{Rjanna2016Access,
 author = {Rajanna, Vijay and Hammond, Tracy},
 title = {Gaze Typing Through Foot-Operated Wearable Device},
 booktitle = {The 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16)},
 series = {SIG},
 year = {2016}, 
 isbn = {},
 location = {Reno, Nevada},
 pages = {2}
}
2016 
PublicationImage
 Rajanna, Vijay; Vo, Patrick; Barth, Jerry; Mjelde, Matthew; Grey, Trevor; Oduola, Cassandra; Hammond, Tracy . KinoHaptics: An Automated, Wearable, Haptic Assisted, Physio-therapeutic System for Post-surgery Rehabilitation and Self-care. Journal of Medical Systems. Volume 40, Number 3, pp. 1-12. Springer, March, 2016. Link
Show Abstract:
A carefully planned, structured, and supervised physiotherapy program, following a surgery, is crucial for the successful diagnosis of physical injuries. Nearly 50 % of the surgeries fail due to unsupervised, and erroneous physiotherapy. The demand for a physiotherapist for an extended period is expensive to afford, and sometimes inaccessible. Researchers have tried to leverage the advancements in wearable sensors and motion tracking by building affordable, automated, physio-therapeutic systems that direct a physiotherapy session by providing audio-visual feedback on patient’s performance. There are many aspects of automated physiotherapy program which are yet to be addressed by the existing systems: a wide classification of patients’ physiological conditions to be diagnosed, multiple demographics of the patients (blind, deaf, etc.), and the need to pursue patients to adopt the system for an extended period for self-care. In our research, we have tried to address these aspects by building a health behavior change support system called KinoHaptics, for post-surgery rehabilitation. KinoHaptics is an automated, wearable, haptic assisted, physio-therapeutic system that can be used by a wide variety of demographics and for various physiological conditions of the patients. The system provides rich and accurate vibro-haptic feedback that can be felt by the user, irrespective of the physiological limitations. KinoHaptics is built to ensure that no injuries are induced during the rehabilitation period. The persuasive nature of the system allows for personal goal-setting, progress tracking, and most importantly life-style compatibility. The system was evaluated under laboratory conditions, involving 14 users. Results show that KinoHaptics is highly convenient to use, and the vibro-haptic feedback is intuitive, accurate, and has shown to prevent accidental injuries. Also, results show that KinoHaptics is persuasive in nature as it supports behavior change and habit building. The successful acceptance of KinoHaptics, an automated, wearable, haptic assisted, physio-therapeutic system proves the need and future-scope of automated physio-therapeutic systems for self-care and behavior change. It also proves that such systems incorporated with vibro-haptic feedback encourage strong adherence to the physiotherapy program; can have profound impact on the physiotherapy experience resulting in higher acceptance rate.

Show BibTex
@Article{Rajanna2015,
 author = {Rajanna, Vijay and Vo, Patrick and Barth, Jerry and Mjelde, Matthew
 and Grey, Trevor and Oduola, Cassandra and Hammond, Tracy},
 title = {KinoHaptics: An Automated, Wearable, Haptic Assisted, Physio-therapeutic System for Post-surgery 
   Rehabilitation and Self-care},
 journal = {Journal of Medical Systems},
 year = {2015},
 volume = {40},
 number = {3},
 pages = {1--12},
 issn = {1573-689X},
 doi = {10.1007/s10916-015-0391-3},
 url = {http://dx.doi.org/10.1007/s10916-015-0391-3}
}
2016 
PublicationImage
 Shiqiang Guo, Folami Alamudun, Tracy Hammond. ResuMatcher: A Personalized Resume-Job Matching System. Expert Systems with Applications. Elsevier, 2016. Link
Show Abstract:
Online jobs search through popular websites are quite beneficial having served for many years as a prominent tool for job seekers and employers alike. In spite of their valuable utility in linking employers with potential employees, the search process and technology utilized by job search websites have not kept pace with the rapid changes in computing capability and machine intelligence. The Information retrieval techniques utilized by these websites rely primarily on variants of manually entered search queries with some advanced similarity metrics for ranking search results. Advancements in machine intelligence techniques have enabled programmatic extraction of pertinent information about the job seeker and job postings without active user input. To this end, we developed a resume matching system, R ́esuMatcher, which intelligently extracts the qualifications and experience of a job seeker directly from his/her r ́esum ́e, and relevant information about the qualifications and experience requirements of job postings. Using a novel statistical similarity index, R ́esuMatcher returns results that are more relevant to the job seekers experience, academic, and technical qualifications, with minimal active user input. Our method provides up to a 34% improvement over existing information retrieval methods in the quality of search results. In addition however, R ́esuMatcher requires minimal active user input to search for jobs, compared to traditional manual search-based methods prevalent today. These improvements, we hypothesize, will lead to more relevant job search results and a better overall job search experience for job seekers. As an alternative to the fragmented organization-centric job application process, job recruitment websites offered the promise of simplifying and streamlining the job search process. However, these websites offer limited functionality using generic and simplistic information retrieval methods, which being non-domain lead to a poor and frustrating search experience. In this paper, we present R ́esuMatcher, a personalized job-r ́esum ́e matching system, which offers a novel statistical similarity index for ranking relevance between candidate r ́esum ́es and a database of available jobs. In our experiments we show that our method offers a 37.44% improvement over existing information retrieval methods in the quality of matches returned.

Show BibTex
@article{guo2016resumatcher,
  title={R{\'e}suMatcher: A personalized r{\'e}sum{\'e}-job matching system},
  author={Guo, Shiqiang and Alamudun, Folami and Hammond, Tracy},
  journal={Expert Systems with Applications},
  volume={60},
  pages={169--182},
  year={2016},
  publisher={Elsevier}
}
2016 
PublicationImage
 Smitherman, Seth; Goldberg Daniel W.; Hammond, Tracy A.;, Horney, Jennifer A.. Developing a Survey to Assess the Prevalence of Risk Factors for Neglected Tropical Diseases in Texas Using the CASPER Method (accepted). Health Security. 2016. Link
Show Abstract:

Show BibTex
@article{SmithermanJournal2016Casper,
author = {Seth Smitherman and Daniel Goldberg and Tracy Hammond and Jennifer Horney},
title = {Developing a Survey to Assess the Prevalence of Risk Factors for Neglected Tropical Diseases in Texas Using the CASPER Method},
journal = {Journal of Health Security},
year = {2016}
}
2016 
PublicationImage
 Taele, Paul; Barreto, Laura; Hammond, Tracy. A Stylus-Driven Intelligent Tutoring System for Music Education Instruction . In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number 3, pp. 141-161. New York, NY. Springer, 2016. Link
Show Abstract:
Inspiring musicians and non-musician hobbyists alike can enjoy various benefits from learning music theory beyond its importance in performing music itself. Such reasons include developing their mathematical abilities, improving their reading comprehension, expanding their memory capabilities, better appreciating music that they listen to, and so on. However, current resources that are available for teaching music theory to students present their own inherent disadvantages. Specifically, traditional music theory classroom centers assume that students already have existing musical knowledge, existing self-study paper-based materials lack immediate feedback, while emerging educational apps either lack stylus-based interaction or intelligent feedback appropriate for novice students. In this paper, we introduce a stylus-driven intelligent tutoring system for music education instruction that aims to combine the benefits while addressing the limitations of existing instructional resources for teaching music theory. Our proposed system provides an accessible educational application with an intelligent sketch user interface that is designed for novice students interested in learning music theory through a series of interactive music composition lessons and quizzes. Following the completion of a student’s composed solution to a prompted music theory question, our system first leverages appropriate sketch and gesture recognition techniques to automatically understand the student’s input, and then generates feedback and assessment of the student’s input that emulates those from a music theory instructor. From our evaluations, we demonstrate that not only did our system automatically understood students’ composed solutions with reasonable accuracy, but also that novice students were able to successfully grasp introductory music theory concepts from a single session using our system.

Show BibTex
@incollection{Barreto2016,
author={Barreto, Laura
and Taele, Paul
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={A Stylus-Driven Intelligent Tutoring System for Music Education Instruction},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={141--161},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_10},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_10}
}
2016 
PublicationImage
 Taele, Paul; Hammond, Tracy. An intelligent Sketch-Based Educational Interface for Learning ComplexWritten east Asian Phonetic Symbols. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number 3, pp. 129-140. New York, NY. Springer, 2016. Link
Show Abstract:
Literacy in written phonetic symbols for East Asian languages is important but challenging for novice language students with only English language fluency, but pedagogical approaches such as rote writing practice and written technique have successfully assisted students towards written mastery. Researchers and developers are also adapting these approaches into intelligent educational apps for students to exploit interactive computing technologies for learning written East Asian language phonetic symbols, whether they are full-time students in K12 or higher education enrolled in conventional classrooms or non-traditional students studying the subject as a passionate interest. However, related pen and touch educational computing apps for sketching practice of East Asian language phonetic symbols provide limited assessment and flexibility of students’ input and sketching style, while related recognition systems either focus more on expert users’ writing styles or cannot provide assessment for more complex phonetic symbols. In this article, we describe our preliminary work on an intelligent sketch-based educational interface developed specifically for assessing students’ sketched input of complex East Asian language phonetic symbols. The interface system relies on template matching from expert users’ sketched training data and various heuristics for assessing the visual structure and technical correctness of students’ more complex written phonetic symbols. From our evaluations of separate sketching data from both novice and expert writers, we were able to achieve reasonably robust performance for both visual structure and technical correctness of our workbook interface for complex written East Asian language phonetic symbols.

Show BibTex
@incollection{Taele2016,
author={Taele, Paul
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={An Intelligent Sketch-Based Educational Interface for Learning Complex Written East Asian Phonetic Symbols},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={129--140},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_9},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_9}
}

2016 
PublicationImage
 Valentine, Stephanie; Hammond, Tracy. An Analysis of Participation, Identity Conversations, and Social Networking Affordances on an Online Social Network for Children. Journal of Media Innovations. 2016. Link
Show Abstract:

Show BibTex
 
2016 
PublicationImage
 Valentine, Stephanie; Leyva-McMurtry, Angelica; Borgos-Rodriguez, Katya; Hammond, Tracy. The Digital Sash: A Sketch-Based Badge System in a Social Network for Children. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number IV, pp. 179-181. New York, NY. Springer, 2016. Link
Show Abstract:
In this chapter, we present a sketch-based reward system for promoting the practice of digital citizenship skills within our custom social network for children aged 7 to 12 years. This badge system prompts budding social networkers to complete fun, creative sketching activities and short writing tasks. Because we have deployed our custom social network (KidGab) within our local Girl Scouts Council, our badge system takes the form of a digital Girl Scout sash. In this work, we discuss the badges that prompted the most participation from our users and provide example responses for each. Finally, we consider methods in which our system and results might be generalized to suit non-scouting populations of children such as classrooms, clubs, and sports teams.

Show BibTex
@incollection{Valentine2016,
author={Valentine, Stephanie
and Leyva-McMurtry, Angelica
and Borgos-Rodriguez, Katya
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={The Digital Sash: A Sketch-Based Badge System in a Social Network for Children},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={179--189},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_12},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_12}
}
2016 
PublicationImage
 Valentine,Stephanie; Conrad, Hannah; Oduola, Cassandra; Hammond, Tracy. WIPTTE: 2015 High School Contest. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number VI, pp. 345-363. New York, NY. Springer, 2016. Link
Show Abstract:
The WIPTTE High School Contest was held on the first day of the WIPTTE conference on the Microsoft Campus in Redmond, Washington. The High School Contest is an annual event originating in the Department of Computer Science and Engineering at Texas A&M University, which extended into the WIPTTE conference. 2015 marked the second year of the WIPTTE High School Contest. During the contest, high school students participate in an intense day of brainstorming, design, prototyping, and presentation to compete against top schools across the United States. Nineteen high school and middle school students combined into four teams from two different schools: Renton Prep from Renton, WA, and University Prep from Seattle, WA. The high school students participated in many elements of WIPTTE throughout the Contest, including watching and commenting on the opening keynotes and presenting for the entire WIPTTE audience. Additionally, many students were able to participate in all three days of the conference. The students were active and engaged in the WIPTTE community, particularly while providing valuable feedback during the You-Try-It sessions.

Show BibTex
@incollection{ValentineWIPTTE2016,
author={Valentine, Stephanie
and Conrad, Hannah
and Oduola, Cassandra
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={WIPTTE 2015 High School Contest},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={345--364},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_25},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_25}
}

2016 
PublicationImage
 Williford, Blake; Taele, Paul; Nelligan, Trevor; Li, Wayne; Linsey, Julie; Hammond, Tracy . PerSketchTivity: an intelligent pen-based educational application for design sketching instruction. In: T. Hammond, S. Valentine, A. Adler, editors, Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education. Number 3, pp. 115-127. New York, NY. Springer, 2016. Link
Show Abstract:
Design sketching is an important and versatile skill for engineering students to master in order to translate their design thoughts effectively onto a visual medium, whether it is to proficiently produce hand-drawn sketches onto paper, seamlessly interact with intelligent sketch-based modeling interfaces, or reap the various educational benefits associated with drawing. Traditional instructional approaches for teaching design sketching are frequently constrained by the availability of experienced human instructors or the lack of supervised learning from self-practice, while relevant intelligent educational applications for sketch instruction have focused more on assessing users’ art drawings or cognitive developmental progress. We introduce PerSketchTivity, an intelligent pen-based computing educational application that not only teaches engineering students how to hone and practice their design sketching skills through stylus-and-touchscreen interaction, but also aiding their motivation and self-regulated learning through real-time feedback. From the qualitative results of our usability tests of our application from eight university student participants of varying skill levels and disciplines, we observed that participants well-rated the usability of the application while also providing valuable feedback to improve the application even further.

Show BibTex
@incollection{Williford2016,
author={Williford, Blake
and Taele, Paul
and Nelligan, Trevor
and Li, Wayne
and Linsey, Julie
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={PerSketchTivity: An Intelligent Pen-Based Educational Application for Design Sketching Instruction},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={115--127},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_8},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_8}
}
2015 
PublicationImage
 Hammond, Tracy; Payton, Mark; Adler, Aaron; Valentine, Stephanie. Introduction. In: T. Hammond, S. Valentine, A. Adler, and M. Payton, editors, The Impact of Pen and Touch Technology on Education.. Volume 0, Number 0, pp. v-xix. New York, NY. Springer, August, 2015. Link
Show Abstract:

Show BibTex
@incollection{Hammond2015Intro,
 title = {Introduction}, 
 author = {Hammond, Tracy and Payton, Mark and Adler, Aaron and Valentine, Stephanie},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {v--xix},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}
2015 
PublicationImage
 Hammond, Tracy; Valentine, Stephanie; Adler, Aaron; Payton, Mark. The Impact of Pen and Touch Technology on Education. Human-Computer Interaction Series. pp. 387 pages. Springer, August 15, 2015. Link
Show Abstract:

Show BibTex
@book{Hammond:2015:IPT:2815658,
 author = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark},
 title = {The Impact of Pen and Touch Technology on Education},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
} 
2015 
PublicationImage
 Kim, Hong-Hoe; Valentine, Stephanie; Taele, Paul; Hammond, Tracy. EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children Emerging Technology Research Strand. In: T. Hammond, S. Valentine, A. Adler, and M. Payton, editors, The Impact of Pen and Touch Technology on Education.. Volume 1, Number 4, pp. 35-46. New York, NY. Springer, August, 2015. Link
Show Abstract:

Show BibTex
@incollection{Kim2015EasySketch,
 title = {EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children 
   Emerging Technology Research Strand}, 
 author = {Kim, Hong-Hoe and Valentine, Stephanie and Taele, Paul and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {35--46},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}
2015 
PublicationImage
 Matthew Green, Benjamin Caldwell, Michael Helms, Julie Linsey, Tracy Hammond. Using Natural Sketch Recognition Software to Provide Instant Feedback on Statics Homework (Truss Free Body Diagrams): Assessment of a Classroom Pilot. 2015 ASEE Annual Conference and Exposition. pp. 26.1671.1 - 26.1671.12. Seattle, WA. ASEE, June 14, 2015. Link
Show Abstract:
Using Natural Sketch Recognition Software to Provide Instant Feedback on Statics Homework: Assessment of a Classroom PilotDespite the importance of hand-sketched Free Body Diagrams for engineering education and practice, large class sizes often prevent detailed feedback on such diagrams. Relatively recently computing technology has become powerful enough to enable rapid and plentiful feedback on hand-sketched engineering diagrams. Researchers have recently developed the free “Mechanix”sketch recognition tutoring system for free body diagrams (FBDs) and trusses which provides intelligent and immediate feedback.This paper will describe the process and results of piloting this software at a primarily undergraduate university with approximately 40 students enrolled in a Statics class, contrasted with a control group. Results will include attitudes towards technology, online homework scores, test scores, and self-reported perceptions of the effectiveness of the sketch-recognition software. Preliminary results look very positive, and the full paper will include a detailed data analysis of both quantitative learning outcomes and qualitative comments from users.

Show BibTex
@INPROCEEDINGS{GreenASEE2015,
 author = {Green, Matthew G. and Caldwell, Benjamin W. and Helms, Michael and Linsey, Julie S. and Hammond, Tracy Anne},
 title = {Using Natural Sketch Recognition Software to Provide Instant Feedback on Statics Homework (Truss
   Free Body Diagrams): Assessment of a Classroom Pilot},
 booktitle = {2015 ASEE Annual Conference and Exposition},
 year = {2015},
 month = {June},
 address = {Seattle, Washington},
 publisher = {ASEE Conferences},
 note = {https://peer.asee.org/25007},
 number = {10.18260/p.25007}
}
2015 
PublicationImage
 Nelligan, Trevor; Polsley, Seth; Ray, Jaideep; Helms, Michael; Linsey, Julie; Hammond, Tracy. Mechanix: A Sketch-Based Educational Interface. Proceedings of the 2015 ACM International Conference on Intelligent User Interfaces (IUI 2015). pp. 53-56. Atlanta, Georgia. ACM, March 29-1, 2015. Link
Show Abstract:
At the university level, high enrollment numbers in classes can be overwhelming for professors and teaching assistants to manage. Grading assignments and tests for hundreds of students is time consuming and has led towards a push for software-based learning in large university classes. Unfortunately, traditional quantitative question-and-answer mechanisms are often not sufficient for STEM courses, where there is a focus on problem-solving techniques over finding the right answers. Working through problems by hand can be important in memory retention, so in order for software learning systems to be effective in STEM courses, they should be able to intelligently understand students sketches. Mechanix is a sketch-based system that allows students to step through problems designed by their instructors with personalized feedback and optimized interface controls. Optimizations like color-coding, menu bar simplification, and tool consolidation are recent improvements in Mechanix that further the aim to engage and motivate students in learning.

Show BibTex
@inproceedings{Nelligan:2015:MSE:2732158.2732194,
 author = {Nelligan, Trevor and Polsley, Seth and Ray, Jaideep and Helms, Michael and Linsey, Julie and Hammond, Tracy},
 title = {Mechanix: A Sketch-Based Educational Interface},
 booktitle = {Proceedings of the 20th International Conference on Intelligent User Interfaces Companion},
 series = {IUI Companion '15},
 year = {2015},
 isbn = {978-1-4503-3308-5},
 location = {Atlanta, Georgia, USA},
 pages = {53--56},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2732158.2732194},
 doi = {10.1145/2732158.2732194},
 acmid = {2732194},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {computer-assisted instruction (cai), graphical user interfaces (gui), guides, interaction styles, user-centered design}
} 
2015 
PublicationImage
 Rajanna, Vijay; Alamudun, Folami; Goldberg, Daniel; Hammond, Tracy . Let Me Relax: Toward Automated Sedentary State Recognition and Ubiquitous Mental Wellness Solutions. MobiHealth 2015 - 5th EAI International Conference on Wireless Mobile Communication and Healthcare - Transforming healthcare through innovations in mobile and wireless technologies. London. October 14-16, 2015. Link
Show Abstract:
Advances in ubiquitous computing technology improve workplace productivity, reduce physical exertion, but ultimately result in a sedentary work style. Sedentary behavior is associated with an increased risk of stress, obesity, and other health complications. Let Me Relax is a fully automated sedentary-state recognition framework using a smartwatch and smartphone, which encourages mental wellness through interventions in the form of simple relaxation techniques. The system was evaluated through a comparative user study of 22 participants split into a test and a control group. An analysis of NASA Task Load Index pre- and post- study survey revealed that test subjects who followed relaxation methods, showed a trend of both increased activity as well as reduced mental stress. Reduced mental stress was found even in those test subjects that had increased inactivity. These results suggest that repeated interventions, driven by an intelligent activity recognition system, is an effective strategy for promoting healthy habits, which reduce stress, anxiety, and other health risks associated with sedentary workplaces.

Show BibTex
@inproceedings{Rajanna:2015,
 author = {Rajanna, Vijay and Alamudun, Folami and Goldberg, Daniel and Hammond, Tracy},
 title = {Let Me Relax: Toward Automated Sedentary State Recognition and Ubiquitous Mental Wellness Solutions},
 booktitle = {Proceedings of the 5th EAI International Conference on Wireless Mobile Communication and Healthcare},
 series = {MOBIHEALTH'15},
 year = {2015}, 
 isbn = {978-1-63190-088-4},
 location = {London, Great Britain},
 pages = {28--33},
 numpages = {6},
 url = {http://dx.doi.org/10.4108/eai.14-10-2015.2261900},
 doi = {10.4108/eai.14-10-2015.2261900},
 acmid = {2897461}, 
 publisher = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
 address = {ICST, Brussels, Belgium, Belgium},
 keywords = {anxiety, cognitive reappraisal, intervention techniques, mental wellness, personal health assistant, relaxation, 
   sedentary state recognition, stress, ubiquitous computing},
}
2015 
PublicationImage
 Raymond, Dwayne; Liew, Jeffrey; Hammond, Tracy. A Vision for Education: Transforming How Formal Systems are Taught within Mass Lecture by useing Pen Technology to Create a Personalized Learning Environment. In: T. Hammond, S. Valentine, A. Adler, and M. Payton, editors, The Impact of Pen and Touch Technology on Education.. Volume 7, Number 37, pp. 355-364. New York, NY. Springer, August, 2015. Link
Show Abstract:

Show BibTex
@incollection{raymond2015vision,
 title = {A Vision for Education: Transforming How Formal Systems are Taught Within Mass Lectures by 
   Using Pen Technology to Create a Personalized Learning Environment},
 author = {Raymond, Dwayne and Liew, Jeffrey and Hammond, Tracy A},
 booktitle = {The Impact of Pen and Touch Technology on Education},
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark},
 pages = {355--363},
 year = {2015},
 publisher = {Springer International Publishing Switzerland},
 doi = {10.1007/978-3-319-15594-4}
}
2015 
PublicationImage
 Sriharish Vangavolu, Hayden Wood, Joseph Newman, Seth Polsley, Tracy Hammond. Frontier: A Directed Graph System for Web Navigation. CHItaly 2015. 11th biannual Conference of the Italian SIGCHI Chapter. Rome. September 28-30, 2015. Link
Show Abstract:
The continuous progress from machine-oriented languages to human-oriented interfaces has given rise to a specific research field devoted to investigate human-computer interaction (HCI). Since its birth, the results it achieved have created new possibilities for applications of wider and wider diffusion, which however are often hindered by the so-called "digital divide". Digital divide entails two distinct gaps. One is technological and economical in its nature, when special equipment and connections are required. The other one is cultural, entailing the difficulties encountered by the so called "digital immigrants" with respect to "digital natives" in adapting to the digital society and to its required abilities and skills. Nowadays, HCI studies have a special focus on closing both gaps, aiming at designing applications that are less demanding under both points of view. At the same time, new possibilities are also offered to users with special needs, who cannot be effectively supported by traditional interfaces. The topics HCI deals with range from general principles to more and more specialized areas, where specific requirements can be derived from new ways of addressing everyday activities, and drive research and design. On the one hand, the general aim is to increase both expressive richness and usability of human-computer interfaces at the same time. On the other hand, accessibility, cultural heritage, interaction for children are only some popular examples of application fields considered in HCI research. In an orthogonal way, the development of new trends follows different lines according to the specific communities addressed by the applications. In this way, personal and social interaction alternate and complement each other in new digital scenarios.

Show BibTex
@inproceedings{VangavoluCHitaly2015,
 author = {Vangavolu, Sriharish and Wood, Hayden and Newman, Joseph and Polsley, Seth and Hammond, Tracy},
 title = {Frontier: A Directed Graph System for Web Navigation},
 booktitle = {Proceedings of the 11th Biannual Conference on Italian SIGCHI Chapter},
 series = {CHItaly 2015},
 year = {2015},
 isbn = {978-1-4503-3684-0},
 location = {Rome, Italy},
 pages = {82--85},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2808435.2808465},
 doi = {10.1145/2808435.2808465},
 acmid = {2808465},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {contextual History, graph visualization, session management, web navigation}
} 
2015 
PublicationImage
 Taele, Paul; Barreto, Laura; Hammond, Tracy. Maestoso: An Intelligent Educational Sketching Tool for Learning Music Theory. The Twenty-Seventh Annual Conference on Innovative Applications of Artificial Intelligence at AAAI (IAAI 2015). pp. 7 pages. Austin, Texas. AAAI, January 27-29, 2015. Link
Show Abstract:
Learning music theory not only has practical benefits for musicians to write, perform, understand, and express music better, but also for both non-musicians to improve critical thinking, math analytical skills, and music appreciation. However, current external tools applicable for learning music theory through writing when human instruction is unavailable are either limited in feedback, lacking a written modality, or assuming already strong familiarity of music theory concepts. In this paper, we describe Maestoso, an educational tool for novice learners to learn music theory through sketching practice of quizzed music structures. Maestoso first automatically recognizes students’ sketched input of quizzed concepts, then relies on existing sketch and gesture recognition techniques to automatically recognize the input, and finally generates instructor-emulated feedback. From our evaluations, we demonstrate that Maestoso performs reasonably well on recognizing music structure elements and that novice students can comfortably grasp introductory music theory in a single session.

Show BibTex
@inproceedings{TaeleAAAI2015,
 author = {Taele, Paul and Barreto, Laura and Hammond, Tracy},
 title = {Maestoso: An Intelligent Educational Sketching Tool for Learning Music Theory},
 booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence},
 series = {AAAI'15},
 year = {2015},
 isbn = {0-262-51129-0},
 location = {Austin, Texas},
 pages = {3999--4005},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=2888116.2888271},
 acmid = {2888271},
 publisher = {AAAI Press}
}
2015 
PublicationImage
 Taele, Paul; Hammond, Tracy. InvisiShapes: A Recognition System for Sketched 3D Primitives in Continuous Interaction Spaces. 2015 International Symposium on Smart Graphics. Chengdu. August 26-28, 2015. Link
Show Abstract:
Continued improvements and rising ubiquity in touchscreen and motion-sensing technologies enable users to leverage mid-air input modalities for intelligent surface sketching into the third dimension. How- ever, existing approaches largely either focus on constrained 3D gesture sets, require specialized hardware setups, or do not deviate beyond surface sketching assumptions. We present InvisiShapes, a recognition system for users to sketch 3D geometric primitives in continuous interaction spaces that explore surfaces and mid-air environments. Our system leverages a collection of sketch and gesture recognition techniques and heuristics and takes advantage of easily accessible computing hardware for users to incorporate depth to their sketches. From our interaction study and user evaluations, we observed that our system successfully accomplishes strong recognition and intuitive interaction capabilities on collected sketch+motion data and interactive sketching scenarios, respectively.

Show BibTex
@inproceedings{Taele2015SG,
 author = {Taele, Paul and Hammond, Tracy},
 title = {InvisiShapes: A Recognition System for Sketched 3D Primitives in Continuous Interaction Spaces},
 booktitle = {Proceedings of the 2015 International Symposium on Smart Graphics, Chengdu, China},
 series = {SG},
 year = {2015}, 
 isbn = {},
 location = {Chengdu, China},
 pages = {12}
}
2015 
PublicationImage
 Taele, Paul; Hammond, Tracy. Enhancing Instruction of Written East Asian Languages with Sketch Recognition-Based Intelligent Language Workbook Interfaces. In: T. Hammond, S. Valentine, A. Adler, and M. Payton, editors, The Impact of Pen and Touch Technology on Education.. Volume 3, Number 13, pp. 119-126. New York, NY. Springer, August, 2015. Link
Show Abstract:

Show BibTex
@incollection{Taele2015Enhancing,
 title = {Enhancing Instruction of Written East Asian Languages with Sketch Recognition-Based Intelligent Language
   Workbook Interfaces}, 
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {119--126},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}
2015 
PublicationImage
 Valentine, Stephanie; Lara-Garduno, Raniero; Linsey, Julie; Hammond, Tracy. Mechanix: A Sketch-Based Tutoring System that Automatically Corrects Hand-Sketched Statics Homework. In: T. Hammond, S. Valentine, A. Adler, and M. Payton, editors, The Impact of Pen and Touch Technology on Education.. Volume 3, Number 9, pp. 91-105. New York, NY. Springer, August, 2015. Link
Show Abstract:

Show BibTex
@incollection{Valentine2015Mechanix,
 title = {Mechanix: A Sketch-Based Tutoring System that Automatically Corrects Hand-Sketched Statics Homework}, 
 author = {Valentine, Stephanie and Lara-Garduno, Raniero and Linsey, Julie and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {91--105},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}
2014 
PublicationImage
 Atilola, Olufunmilola; Valentine, Stephanie; Kim, Hong-Hoe; Turner, David; McTigue, Erin; Hammond, Tracy; Linsey, Julie. Mechanix: A natural sketch interface tool for teaching truss analysis and free-body diagrams. Artificial Intelligence for Engineering Design, Analysis and Manufacturing (AIEDAM). Volume 28, Number 2, pp. 169-192. Cambridge University Press, May, 2014. Link
Show Abstract:
Massive open online courses, online tutoring systems, and other computer homework systems are rapidly changing engineering education by providing increased student feedback and capitalizing upon online systems’ scalability. While online homework systems provide great benefits, a growing concern among engineering educators is that students are losing both the critical art of sketching and the ability to take a real system and reduce it to an accurate but simplified free-body diagram (FBD). For example, some online systems allow the drag and drop of forces onto FBDs, but they do not allow the user to sketch the FBDs, which is a vital part of the learning process. In this paper, we discuss Mechanix, a sketch recognition tool that provides an efficient means for engineering students to learn how to draw truss FBDs and solve truss problems. The system allows students to sketch FBDs into a tablet computer or by using a mouse and a standard computer monitor. Using artificial intelligence, Mechanix can determine not only the component shapes and features of the diagram but also the relationships between those shapes and features. Because Mechanix is domain specific, it can use those relationships to determine not only whether a student’s work is correct but also why it is incorrect. Mechanix is then able to provide immediate, constructive feedback to students without providing final answers. Within this manuscript, we document the inner workings of Mechanix, including the artificial intelligence behind the scenes, and present studies of the effects on student learning. The evaluations have shown that Mechanix is as effective as paper-and-pencil-based homework for teaching method of joints truss analysis; focus groups with students who used the program have revealed that they believe Mechanix enhances their learning and that they are highly engaged while using it.

Show BibTex
@article{Atilola2014AIE,
 author = {Atilola, Olufunmilola and Valentine, Stephanie and Kim, Hong-Hoe and Turner, David and McTigue, Erin and Hammond,Tracy and Linsey, Julie},
 title = {Mechanix: A natural sketch interface tool for teaching truss analysis and free-body diagrams},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {null},
 issue = {Special Issue 02},
 month = {5},
 year = {2014},
 issn = {1469-1760},
 pages = {169--192},
 numpages = {24},
 doi = {10.1017/S0890060414000079},
 URL = {http://journals.cambridge.org/article_S0890060414000079}
}
2014 
PublicationImage
 Hammond, Tracy. Dialectical Creativity: Sketch-Negate-Create. Studying Visual and Spatial Reasoning for Design Creativity. pp. 91-108. Springer, November 9, 2014. Link
Show Abstract:
Dialectical Creativity is the act of formulating a new concept through the original idea (the thesis), developing opposing contradictory ideas (the antithesis), and culminating on a more developed concretized idea that both negates and encompasses both the thesis and the antithesis (the synthesis). Sketching is a fundamental part of ideation. The act of performing ideation with an inherently abstract hand-drawn sketch, complete with messiness, allows the sketcher, through the misinterpretation of their own strokes, to evoke antithetical concepts, enabling the sketcher to quickly develop a creative synthetic idea. In the dialectical process there is a constant tension between creative change and the natural tendency to seek stability. Sketch recognition is the automated understanding of hand drawn diagrams by a computer, and can be used to both enhance creativity and/or idea stability. This paper discusses the Sketch Dialectic and its impact on the field of sketch recognition.

Show BibTex
@incollection{Hammond2015,
 author = {Hammond, Tracy},
 editor = {Gero, S. John},
 chapter = {Dialectical Creativity: Sketch-Negate-Create},
 title = {Studying Visual and Spatial Reasoning for Design Creativity},
 year = {2015},
 publisher = {Springer Netherlands},
 address = {Dordrecht},
 pages = {91--108},
 isbn = {978-94-017-9297-4},
 doi = {10.1007/978-94-017-9297-4_6},
 url = {http://dx.doi.org/10.1007/978-94-017-9297-4_6}
}
2014 
PublicationImage
 Hammond, Tracy; Linsey, Julie. Design Computing and Cognition (DCC'12). Artificial Intelligence for Engineering Design, Analysis and Manufacturing (AIEDAM). Volume 28, Number 2, pp. 113-114. Cambridge University Press, May, 2014. Link
Show Abstract:

Show BibTex
@article{Hammond2014AIE,
 author = {Hammond,Tracy and Linsey,Julie},
 title = {Design Computing and Cognition (DCC'12)},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {null},
 issue = {Special Issue 02},
 month = {5},
 year = {2014},
 issn = {1469-1760},
 pages = {113--114},
 numpages = {2},
 doi = {10.1017/S089006041400002X},
 URL = {http://journals.cambridge.org/article_S089006041400002X}
}
2014 
PublicationImage
 Hong, Yan; Vollmer Dahlke, Deborah; Ory, Marcia; Goldberg, Daniel; Cargill, Jessica; Kellstedt, Debra; Pulczinski, Jairus; Hammond,Tracy; Hernandez, Edgar. Development of ICANFIT: A Mobile Device Application to Promote Physical Activity and Access to Health Information Among Older Canc. Healthography, American Public Health Association 142nd Annual Meeting & Expo (APHA). pp. 1 page. New Orleans, LA. APHA, November 15-19, 2014. Link
Show Abstract:

Show BibTex
@inproceedings{hong2014development,
 title = {Development of icanfit: A mobile device application to promote physical activity and access to health information
   among older cancer survivors},
 author = {Hong, Yan and Vollmer Dahlke, Deborah and Ory, Marcia and Goldberg, Daniel and Cargill, Jessica and
   Kellstedt, Debra and Pulczinski, Jairus and Hammond,Tracy and Hernandez, Edgar},
 booktitle = {142nd APHA Annual Meeting and Exposition (November 15-November 19, 2014)},
 year = {2014},
 organization = {APHA},
 note = {1 page}
}
2014 
PublicationImage
 Kim, Hong-hoe; Taele, Paul; Valentine, Stephanie; Hammond, Tracy. Developing Intelligent Sketch-Based Applications for Children’s Fine Motor Sketching Skill Development. 2014 International Conference on Intelligent User Interfaces (IUI) Workshop on Sketch: Pen and Touch Recognition. pp. 1-8. Haifa, Israel. ACM, February 24, 2014. Link
Show Abstract:
Abilities for fine motor control and executive attention are aspects of self-regulation that contribute to children’s school readiness and achievement, and can be taught and improved through sketching and writing activities. Recent interactive sketching applications have emerged to assist children in developing self-regulation skills through playful learning interfaces. Existing applications tend to focus on rote-based activities where children trace over shapes with little to no feedback for children to self-regulate their learning and monitor their improvements. In this paper, we present our initial child-centered intelligent sketching user interface prototype called EasySketch, designed to support children’s development of self-regulation skills, particularly fine motor, accuracy, and attention-related skills. Our prototype improves upon existing applications by providing immediate evaluation and constructive feedback for self-regulated learning of sketching and writing skills. In the process of evaluating and providing feedback to improve self-regulation, our sketch-based application teaches children pre-reading and pre-math skills such as writing digits and letters.

Show BibTex
@inproceedings{Kim2014Developing,
 title = {Developing Intelligent Sketch-Based Applications for Children’s Fine Motor Sketching Skill Development},
 author = {Kim, Hong-hoe and Taele, Paul and Valentine, Stephanie and Hammond, Tracy},
 booktitle = {2014 International Conference on Intelligent User Interfaces (IUI) Workshop on Sketch:
   Pen and Touch Recognition},
 year = 2014,
 address = {Haifa, Israel},
 month = 2,
 organization = {IUI},
 publisher = {ACM}
}
2014 
PublicationImage
 Kim, Hong-hoe; Valentine, Stephanie; Taele, Paul; Hammond, Tracy. EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children Emerging Technology Research Strand. Workshop on the Impact of Pen & Touch Technology on Education (WIPTTE). College Station, TX. WIPTTE, March 12-15, 2014. Link
Show Abstract:

Show BibTex
@inproceedings{Kim2014easysketch,
 title = {EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children Emerging
   Technology Research Strand},
 author = {Kim, Hong-hoe and Valentine, Stephanie and Taele, Paul and Hammond, Tracy},
 booktitle = {Workshop on the Impact of Pen & Touch Technology on Education (WIPTTE)},
 year = 2014,
 address = {College Station, TX},
 month = 3,
 organization = {WIPTTE}
}
2014 
PublicationImage
 Prasad, Manoj; Russell, Murat I; Hammond, Tracy. A User Centric Model to Design Tactile Codes with Shapes and Waveforms. Haptics Symposium (HAPTICS). pp. 597-602. Houston, TX. IEEE, February 23-26, 2014. Link
Show Abstract:
The tactile medium of communication with users is appropriate for displaying information in situations where auditory and visual mediums are saturated. There are situations where a subject’s ability to receive information through either of these channels is severely restricted by the environment they are in or through any physical impairments that the subject may have. Usually, the tactile information is dis- played in the form of codes. These tactile codes can vary in both shape, and waveform of the code. Designers use variations shape or waveform as tactile codes. Usability of tactile codes depends on the users’ ability to distinguish between these variations. We have built two vibrotactile displays, Tactor I and Tactor II, each with nine actuators arranged in a three-by-three matrix with differing contact areas that can represent a total of 511 shapes. We used two dimensions of tactile medium, shapes and waveforms, to represent verb phrases and evaluated ability of users to perceive the tactile code. We propose a measure to rate the distinguishability between two shapes, a graph model with shapes as nodes and distinguishability between shapes as weights of edges, and an algorithm to cluster distinguishable shapes. We evaluated the distinguishability of shapes from the clustering algorithm against the experimenter’s choice of shapes for tactile codes with eight users. The results show that the users can distinguish the shapes proposed by the clustering algorithm with higher accuracy than the shapes chosen by the experimenter. The results from the study also show that users can identify simultaneously presented waveforms and shapes in the codes without reduction in waveform identification accuracy.

Show BibTex
@INPROCEEDINGS{Prasad2014Model,
 author = {Prasad, Manoj and Russell, Murat I. and Hammond, Tracy Anne},
 booktitle = {Haptics Symposium (HAPTICS), 2014 IEEE},
 title = {A user centric model to design tactile codes with shapes and waveforms},
 year = {2014},
 pages = {597--602},
 keywords = {actuators; display instrumentation; haptic interfaces; user centred design; actuators; auditory mediums; 
   graph model; shape clustering algorithm; tactile code design; tactile codes; tactile information; tactile medium; 
   user centric model; vibrotactile displays; visual mediums; Arrays; Carbon; Clustering algorithms; Heating; Shape; 
   Time factors; Usability},
 doi = {10.1109/HAPTICS.2014.6775523},
 month = {Feb}
}
2014 
PublicationImage
 Prasad, Manoj; Russell, Murat; Hammond, Tracy A. Designing Vibrotactile Codes to Communicate Verb Phrases. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM). Volume 11, Number 1, pp. Article 11: pp.1-21. ACM, September, 2014. Link
Show Abstract:
Soldiers, to guard themselves from enemy assault, have to maintain visual and auditory awareness of their environment. Their visual and auditory senses are thus saturated. This makes these channels less usable for communication. The tactile medium of communication with users is appropriate for displaying information in such situations. Research in interpersonal communication among soldiers shows that the most common form of communication between soldiers involves the use of verb phrases. In this article, we have developed 11 a three-by-three tactile display and proposed a method for mapping the components of a verb phrase to two dimensions of tactile codes—shape and waveform. Perception of tactile codes by users depends on the ability of users to distinguish shape and waveform of the code. We have proposed a measure to rate the distinguish-ability of any two shapes and created a graph-based user-centric model using this measure to select distinguishable shapes from a set of all presentable shapes. We conducted two user studies to evaluate the ability of users to perceive tactile information. The results from our first study showed users’ ability to perceive tactile shapes, tactile waveforms, and form verb phrases from tactile codes. The recognition accuracy and time taken to distinguish were better when the shapes were selected from the graph model than when shapes were chosen based on intuition. The second user study was conducted to test the performance of users while performing a primary visual task simultaneously with a secondary audio or haptic task. Users were more familiar with perceiving information from an auditory medium than from a haptic medium, which was reflected in their performance. Thus the performance of users in the primary visual task was better while using an audio medium of communication than while using a haptic medium of communication.

Show BibTex
@article{Prasad:2014:DVC,
 author = {Prasad, Manoj and Russell, Murat and Hammond, Tracy A.},
 title = {Designing Vibrotactile Codes to Communicate Verb Phrases},
 journal = {ACM Trans. Multimedia Comput. Commun. Appl.},
 issue_date = {September 2014},
 volume = {11},
 number = {1s},
 month = oct,
 year = {2014},
 issn = {1551-6857},
 pages = {11:1--11:21},
 articleno = {11},
 numpages = {21},
 url = {http://doi.acm.org/10.1145/2637289},
 doi = {10.1145/2637289},
 acmid = {2637289},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {Tactile interface, Vibrotactile pattern perception, communication, graph model, perception model,
   tactile code, user centric design}
}
2014 
PublicationImage
 Prasad, Manoj; Taele, Paul; Goldberg, Daniel; Hammond, Tracy A.. HaptiMoto: Turn-by-Turn Haptic Route Guidance Interface for Motorcyclists. Proceedings of the 32nd Annual ACM Conference on Human Factors in Computing Systems (CHI). pp. 3597-3606. Toronto, Canada. ACM, April 27-May 1 27-1, 2014. Link
Show Abstract:
A national study by the Australian Transport Safety Bureau revealed that motorcyclist deaths were nearly thirty times more prevalent than that of drivers of other vehicles. These fatalities represent approximately 5% of all highway deaths each year, yet motorcycles account for only 2% of all registered vehicles in the United States. Motorcyclists are highly exposed on the road, so maintaining situational awareness at all times is crucial. Route guidance systems enable users to efficiently navigate between locations using dynamic visual maps and audio directions, and have been well tested with motorists, but remain unsafe for use by motorcyclists. Audio/visual routing systems decrease motorcyclists’ situational awareness and vehicle control, and thus elevate chances of an accident. To enable motorcyclists to take advantage of route guidance while maintaining situational awareness, we created HaptiMoto, a wearable haptic route guidance system. HaptiMoto uses tactile signals to encode the distance and direction of approaching turns, thus avoiding interference with audio/visual awareness. Our evaluations demonstrate that HaptiMoto is both intuitive and a safer alternative for motorcyclists compared to existing solutions.

Show BibTex
@inproceedings{Prasad:2014:HTH:2611222.2557404,
 author = {Prasad, Manoj and Taele, Paul and Goldberg, Daniel and Hammond, Tracy A.},
 title = {HaptiMoto: Turn-by-turn Haptic Route Guidance Interface for Motorcyclists},
 booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
 series = {CHI '14},
 year = {2014},
 isbn = {978-1-4503-2473-1},
 location = {Toronto, Ontario, Canada},
 pages = {3597--3606},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2556288.2557404},
 doi = {10.1145/2556288.2557404},
 acmid = {2557404},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {advanced traveler information system, route guidance, tactile interface, vibro-tactile}
} 
2014 
PublicationImage
 Prasad, Manoj; Taele, Paul; Olubeko, Ayo; Hammond, Tracy. HaptiGo: A Navigational ‘Tap on the Shoulder’. Haptics Symposium (HAPTICS). pp. 339-345. Houston, TX. IEEE, February 23-26, 2014. Link
Show Abstract:
Complex inter-personal interactions occur in the course of pedestrian navigation. Within familiar environments, prior knowledge helps pedestrians reach their destination seamlessly. However, in unexplored environments or when otherwise engaged, a greater awareness of surroundings or higher cognitive loads are required. We propose HaptiGo, a lightweight haptic vest that provides pedestrians both navigational intelligence and obstacle detection capabilities. HaptiGo consists of optimally-placed vibro- tactile sensors that utilize natural and small form factor interaction cues, thus emulating the invisible sensation of being passively guided towards the intended direction. We evaluated HaptiGo through a study conducted on a group of pedestrians, whom were tasked with navigating through several different waypoints while engaged in cognitively demanding tasks. We found that HaptiGo was able to successfully navigate users with timely alerts of incoming obstacles without increasing cognitive load, thereby increasing their environmental awareness. Additionally, we show that users are able to respond to directional information without training.

Show BibTex
@INPROCEEDINGS{Prasad2014Haptigo,
 author = {Prasad, Manoj and Taele, Paul and Olubeko, Ayobami and Hammond, Tracy},
 booktitle = {Haptics Symposium (HAPTICS), 2014 IEEE},
 title = {HaptiGo: A navigational tap on the shoulder},
 year = {2014},
 pages = {339--345},
 keywords = {haptic interfaces; pedestrians; tactile sensors; HaptiGo; cognitive loads; complex inter-personal 
   interactions; environmental awareness; lightweight haptic vest;navigational intelligence; navigational tap on
   the shoulder; obstacle detection capability; optimally-placed vibrotactile sensors; pedestrian navigation; 
   small form factor interaction cues; Belts; Haptic interfaces; Mobile communication; Navigation;Tactile 
   sensors; Vibrations},
 doi = {10.1109/HAPTICS.2014.6775478},
 month = {Feb}
}
2014 
PublicationImage
 Rajanna, Vijay; Lara-Garduno, Raniero; Jyoti Behera, Dev; Madanagopal, Karthic; Goldberg, Daniel; Hammond, Tracy. Step Up Life: A Context Aware Health Assistant. Proceedings of the Third ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health. pp. 21-30. Dallas, Texas. ACM, November 4-7, 2014. Link
Show Abstract:
A recent trend in the popular health news is, reporting the dangers of prolonged inactivity in one’s daily routine. The claims are wide in variety and aggressive in nature, link- ing a sedentary lifestyle with obesity and shortened lifespans [25]. Rather than enforcing an individual to perform a physical exercise for a predefined interval of time, we propose a design, implementation, and evaluation of a context aware health assistant system (called Step Up Life) that encourages a user to adopt a healthy life style by performing simple, and contextually suitable physical exercises. Step Up Life is a smart phone application which provides physical activity reminders to the user considering the practical constraints of the user by exploiting the context information like the user location, personal preferences, calendar events, time of the day and the weather [9]. A fully functional implementation of Step Up Life is evaluated by user studies.

Show BibTex
@inproceedings{Rajanna:2014:SUL:2676629.2676636,
 author = {Rajanna, Vijay and Lara-Garduno, Raniero and Behera, Dev Jyoti and Madanagopal, Karthic and 
   Goldberg, Daniel and Hammond, Tracy},
 title = {Step Up Life: A Context Aware Health Assistant},
 booktitle = {Proceedings of the Third ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '14},
 year = {2014},
 isbn = {978-1-4503-3136-4},
 location = {Dallas, Texas},
 pages = {21--30},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2676629.2676636},
 doi = {10.1145/2676629.2676636},
 acmid = {2676636},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {context aware systems, environmental monitoring, geographic information systems, healthgis, 
   individual health, personal health assistant, public health, sensors}
} 
2014 
PublicationImage
 Taele, Paul; Hammond, Tracy. Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces. Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces (IUI) Doctoral Consortium. pp. 53-55. Haifa, Israel. ACM, February 24, 2014. Link
Show Abstract:
As commercial motion-tracking sensors achieve greater reliability and ubiquity, intelligent sketching user interfaces can expand beyond traditional surface environments for richer surfaceless sketching interactions. However, relevant techniques for automatically recognizing sketches in surface- less interaction spaces are either largely constrained, due to limited gesture input vocabularies from existing gesture recognition techniques; or unexplored, due to being adapted specifically for surface environments by existing sketch recognition techniques. This dissertation research therefore proposes to investigate techniques for developing intelligent surfaceless sketching user interfaces. The core research work will focus on investigating automated recognition techniques for better understanding the content of surfaceless sketches, and determining optimal interaction techniques for improving related intuitive sketching cues in those surfaceless interaction spaces.

Show BibTex
@inproceedings{TaeleIUI2014,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces},
 booktitle = {Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces},
 series = {IUI Companion '14},
 year = {2014},
 isbn = {978-1-4503-2729-9},
 location = {Haifa, Israel},
 pages = {53--56},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2559184.2559185},
 doi = {10.1145/2559184.2559185},
 acmid = {2559185},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {natural user interfaces, sketch recognition, surfaceless interaction}
}
2013 
PublicationImage
 Atilola, Olufumilola; McTigue, Erin M.; Hammond, Tracy; Linsey, Julie. Mechanix: Evaluating the Effectiveness of a Sketch Recognition Truss Tutorin Program Against Other Truss Programs. 120th American Society for Engineering Education Annual Conference & Exposition (ASEE). pp. 15 pages. Atlanta, GA. ASEE, June 23-26, 2013. Link
Show Abstract:
Mechanix is a sketch recognition program that was developed at Texas A&M University. Mechanix provides an efficient and effective means for engineering students to learn how to draw truss free-body diagrams (FBDs) and solve truss problems. The Mechanix interface allows for students to sketch these FBDs, as they normally would by hand, into a tablet computer; a mouse can also be used with a regular computer monitor. Mechanix is able to provide immediate and intelligent feedback to the students, and it tells them if they are missing any components of the FBD. The program is also able to tell students whether their solved reaction forces or member forces are correct or not without actually providing the answers. A recent and exciting feature of Mechanix is the creative design mode which allows students to solve open-ended truss problems; an instructor can give their students specific minimum requirements for a truss/bridge, and the student uses Mechanix to solve and create this truss. The creative design feature of Mechanix can check if the students’ truss is structurally sound, and if it meets the minimum requirements stated by the instructor. This paper presents a study to evaluate the effectiveness and advantages of using Mechanix in the classroom as a supplement to traditional teaching and learning methods. Mechanix is also tested alongside an established and popular truss program, WinTruss, to see how learning gains differ and what advantages Mechanix offers over other truss analysis software. Freshman engineering classes were recruited for this experiment and were divided into three conditions: a control condition (students who were not exposed to Mechanix or WinTruss and did their assignments on paper), a Mechanix condition (students who used Mechanix in class and for their assignments, and a WinTruss condition (students who used the WinTruss program for their assignments). The learning gains among these three groups were evaluated using a series of quantitative formal assessments which include a statics concepts inventory, homework sets, quizzes, exam grades and truss/bridge creative design solutions. Qualitative data was also collected through focus groups for all three conditions to gather the students’ impressions of the programs for the experimental group and general teaching styles for the control group. Results from previous evaluations show Mechanix highly engages students and helps them learn basic truss mechanics. This evaluation will be compared with previous evaluations to show that Mechanix continues to be a great tool for enhancing student learning.

Show BibTex
@inproceedings{Atilola2013ASEE,
 title = {Mechanix: Evaluating the Effectiveness of a Sketch Recognition Truss Tutorin Program Against Other Truss Programs},
 author = {Atilola, Olufumilola and McTigue, Erin M. and Hammond, Tracy and Linsey, Julie},
 booktitle = {120th American Society for Engineering Education Annual Conference & Exposition (ASEE). June 23-26},
 year = 2013,
 address = {Atlanta, GA},
 month = 6,
 organization = {ASEE},
 note = {15 pages}
}
2013 
PublicationImage
 Bartley, Joey; Forsyth, Jonathon; Pendse, Prachi; Xin, Da; Brown, Garrett; Hagseth, Paul; Agrawal, Ashish; Goldberg, Daniel W; Hammond, Tracy. World of Workout: a Contextual Mobile RPG to Encourage Long Term Fitness. Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health. pp. 60-67. Orlando, FL. ACM, November 5, 2013. Link
Show Abstract:
In today’s digital world many individuals spend their day in front of a computer or mobile phone for entertainment. Individuals enjoy a more sedentary lifestyle from advances in technology. This is one of the leading factors contributing to a decrease in fitness level for large parts of the populations in developed countries. We want to design a mobile role-playing game (RPG) where the character evolves based on the exercises the user performs in reality. This design can motivate and persuade a potentially large demographic of users to engage in physical activity for an extended period of time through the enjoyment of an engaging game. This novel application has shown the capability of automatically identifying and counting the exercises performed by the user. This automatic activity recognition and numeration is performed solely through the accelerometer of a single smartphone held by the user while exercising. The type and amount of exercise improve the characters speed, strength, and stamina based on the type and amount of exercise performed.

Show BibTex
@inproceedings{Bartley:2013:WWC:2535708.2535718,
 author = {Bartley, Joey and Forsyth, Jonathon and Pendse, Prachi and Xin, Da and Brown, Garrett and Hagseth,
   Paul and Agrawal, Ashish and Goldberg, Daniel W. and Hammond, Tracy},
 title = {World of Workout: A Contextual Mobile RPG to Encourage Long Term Fitness},
 booktitle = {Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '13},
 year = {2013},
 isbn = {978-1-4503-2529-5},
 location = {Orlando, Florida},
 pages = {60--67},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2535708.2535718},
 doi = {10.1145/2535708.2535718},
 acmid = {2535718},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {GIS, activity recognition, exergaming, fitness, gaming, health, location, motivation, pattern recognition,
   smartphones, wearable computing}
} 
2013 
PublicationImage
 Cummings, Danielle; Prasad, Manoj; Lucchese, George; Aikens, Christopher; Hammond, Tracy. Multi-Modal Location-Aware System for Paratrooper Team Coordination. CHI'13 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 2385-2387. Paris, France. ACM, April 27-May 2, 2013. Link
Show Abstract:
Navigation and assembly are critical tasks for Soldiers in battlefield situations [3]. Paratroopers, in particular, must be able to parachute into a battlefield and locate and assemble their equipment as quickly and quietly as possible. Current assembly methods rely on bulky and antiquated equipment that inhibit the speed and effectiveness of such operations. To address this we have created a multi-modal mobile navigation system that uses ruggedized to mark assembly points and smartphones to assist in navigating to these points while minimizing cognitive load and maximizing situational awareness. To achieve this task, we implemented a novel beacon receiver protocol that allows an infinite number of receivers to listen to the encrypted beaconing message using only ad-hoc Wi-Fi technologies. The system was evaluated by U.S. Army Paratroopers and proved quick to learn and efficient at moving Soldiers to navigation waypoints. Beyond military operations, this system could be applied to any task that requires the assembly and coordination of many individuals or teams, such as emergency evacuations, fighting wildfires or locating airdropped humanitarian aid.

Show BibTex
@inproceedings{CummingsCHI2013,
 author = {Cummings, Danielle and Prasad, Manoj and Lucchese, George and Aikens, Christopher and Hammond, Tracy A.},
 title = {Multi-modal Location-aware System for Paratrooper Team Coordination},
 booktitle = {CHI '13 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '13},
 year = {2013},
 isbn = {978-1-4503-1952-2},
 location = {Paris, France},
 pages = {2385--2388},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2468356.2468779},
 doi = {10.1145/2468356.2468779},
 acmid = {2468779},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {ad-hoc networks, location-aware, military applications, user-centered design}
}
2013 
PublicationImage
 Damaraju, Sashikanth; Seo, Jinsil Hwaryoung; Hammond, Tracy; Kerne, Andruid. Multi-Tap Sliders: Advancing Touch Interaction for Parameter Adjustment. Proceedings of the 2013 International Conference on Intelligent User Interfaces (IUI). pp. 445-452. Santa Monica, CA. ACM, March 19-22, 2013. Link
Show Abstract:
Research in multi-touch interaction has typically been focused on direct spatial manipulation; techniques have been create to result in the most intuitive mapping between the movement of the hand and the resultant change in the virtual object. However, as we attempt to design for more complex operations, the expectation of spatial manipulation becomes infeasible. We introduce Multi-tap Sliders for operation in what we call abstract parametric spaces that do not have an obvious literal spatial representation, such as exposure, brightness, contrast and saturation for image editing. This new widget design promotes multi-touch interaction for prolonged use in scenarios that require adjustment of multiple parameters as part of an operation. The multi-tap sliders encourage the user to keep her visual focus on the target, instead of the requiring to look back at the interface. Our research emphasizes ergonomics, clear visual design, and fluid transition between the selection of parameters and their subsequent adjustment for a given operation. We demonstrate a new technique for quickly selecting and adjusting multiple numerical parameters. A preliminary user study points out improvements over the traditional sliders.

Show BibTex
@inproceedings{DamarajuIUI2013,
 author = {Damaraju, Sashikanth and Seo, Jinsil Hwaryoung and Hammond, Tracy and Kerne, Andruid},
 title = {Multi-tap Sliders: Advancing Touch Interaction for Parameter Adjustment},
 booktitle = {Proceedings of the 2013 International Conference on Intelligent User Interfaces},
 series = {IUI '13},
 year = {2013},
 isbn = {978-1-4503-1965-2},
 location = {Santa Monica, California, USA},
 pages = {445--452},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2449396.2449453},
 doi = {10.1145/2449396.2449453},
 acmid = {2449453},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {exploratory interfaces, multi-touch, parametric spaces}
}
2013 
PublicationImage
 Goldberg, Daniel W; Cockburn, Myles G; Hammond, Tracy A; Jacquez, Geoffrey M; Janies, Daniel; Knoblock, Craig; Kuhn, Werner; Pultar, Edward; Raubal, Martin. Envisioning a Future for a Spatial-Health CyberGIS Marketplace. Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health. pp. 27-30. Orlando, FL. ACM, November 5, 2013. Link
Show Abstract:
In this position paper, we describe a vision for the future of a so-called “Spatial-Health CyberGIS Marketplace”. We first situate this proposed new computing ecosystem within the set of currently-available enabling technologies and techniques. We next provide a detailed vision of the capabilities and features of an ecosystem that will benefit individuals, industries, and government agencies. We conclude with a set of research challenges, both technological & societal, which must be overcome in order for such a vision to be fully realized.

Show BibTex
@inproceedings{Goldberg:2013:EFS:2535708.2535716,
 author = {Goldberg, Daniel W. and Cockburn, Myles G. and Hammond, Tracy A. and Jacquez, Geoffrey M. and
   Janies, Daniel and Knoblock, Craig and Kuhn, Werner and Pultar, Edward and Raubal, Martin},
 title = {Envisioning a Future for a Spatial-health CyberGIS Marketplace},
 booktitle = {Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '13},
 year = {2013},
 isbn = {978-1-4503-2529-5},
 location = {Orlando, Florida},
 pages = {27--30},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2535708.2535716},
 doi = {10.1145/2535708.2535716},
 acmid = {2535716},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {HealthGIS, data integration, environmental monitoring, health interventions, individual health,
   online analytics, public health, sensors, streaming data}
}
2013 
PublicationImage
 Hammond, Tracy; Linsey, Julie. AIEDAM Special Issue CFP, May 2014, Vol. 28, No. 2. Artificial Intelligence for Engineering Design, Analysis and Manufacturing. Volume 27, Number 1, pp. 83-84. Cambridge University Press, February, 2013. Link
Show Abstract:

Show BibTex
@article{Hammond2013AIE,
 author = {Hammond,Tracy and Linsey, Julie},
 title = {AI EDAM Special Issue, May 2014, Vol. 28, No. 2},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {27},
 issue = {01},
 month = {2},
 year = {2013},
 issn = {1469-1760},
 pages = {83--84},
 numpages = {2},
 doi = {10.1017/S089006041200039X},
 URL = {http://journals.cambridge.org/article_S089006041200039X}
}
2013 
PublicationImage
 Kim, Hong-hoe; Taele, Paul; Valentine, Stephanie; McTigue, Erin; Hammond, Tracy. KimCHI: A Sketch-Based Developmental Skill Classifier to Enhance Pen-Driven Educational Interfaces for Children. Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling, Expressive 2013 - The Joint Symposium on Computational Aesthetics and Sketch-Based Interfaces and modeling and Non-Photorealistic Animation and Rendering. pp. 33-42. Anaheim, CA. ACM, July 19-21, 2013. Link
Show Abstract:
Sketching is one of the many valuable lifelong skills that children require in their overall development, and many educational psychologists manually analyze children’s sketches to assess their developmental progress. The disadvantages of manual assessment are that it is time-consuming and prone to human error and bias, so intelligent sketching interfaces have strong potential in automating this process. Unfortunately, current sketch recognition techniques concentrate solely on recognizing the meaning of sketches, rather than the sketcher’s developmental skill; and do not perform well on children’s sketched input, as most are trained on and developed for adult’s sketches. We introduce our proposed solution called KimCHI, a specialized sketch classification technique which utilizes a sketching interface for assessing the developmental skills of children from their sketches. Our approach relies on sketch feature selection to automatically classify the developmental progress of children’s sketches as either developmental or mature. We evaluated our classifiers through a user study, and our classifiers were able to differentiate the users’ development skill and gender with reasonable accuracy. We subsequently created an initial sketching interface utilizing our specialized classifier called EasySketch for demonstrating educational applications to assist children in developing their sketching skills.

Show BibTex
@inproceedings{Kim:2013:KSD:2487381.2487389,
 author = {Kim, Hong-hoe and Taele, Paul and Valentine, Stephanie and McTigue, Erin and Hammond, Tracy},
 title = {KimCHI: A Sketch-based Developmental Skill Classifier to Enhance Pen-driven Educational Interfaces for Children},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '13},
 year = {2013},
 isbn = {978-1-4503-2205-8},
 location = {Anaheim, California},
 pages = {33--42},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2487381.2487389},
 doi = {10.1145/2487381.2487389},
 acmid = {2487389},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {EasySketch, KimCHI: children's developmental skill classifier, age and gender difference}
}
2013 
PublicationImage
 Taele, Paul; Hammond, Tracy. Adapting Surface Sketch Recognition Techniques for Surfaceless Sketches. Proceedings of the Twenty-Third International Joint Conference on Artificial Intelligence (IJCAI). pp. 3243-3244. Beijing, China. AAAI Press, August 3-9, 2013. Link
Show Abstract:
Researchers have made significant strides in developing recognition techniques for surface sketches, with realized and potential applications to motivate extending these techniques towards analogous surfaceless sketches. Yet surface sketch recognition techniques remain largely untested in surfaceless environments and are still highly constrained for related surfaceless gesture recognition techniques. The focus of the research is to investigate the performance of surface sketch recognition techniques in more challenging surfaceless environments, with the aim of addressing existing limitations through improved surfaceless sketch recognition techniques.

Show BibTex
@inproceedings{Taele:2013:ASS:2540128.2540630,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Adapting Surface Sketch Recognition Techniques for Surfaceless Sketches},
 booktitle = {Proceedings of the Twenty-Third International Joint Conference on Artificial Intelligence},
 series = {IJCAI '13},
 year = {2013},
 isbn = {978-1-57735-633-2},
 location = {Beijing, China},
 pages = {3243--3244},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=2540128.2540630},
 acmid = {2540630},
 publisher = {AAAI Press}
} 
2013 
PublicationImage
 Valentine, Stephanie; Vides, Francisco; Lucchese, George; Turner, David; Kim, Hong-hoe; Li, Wenzhe; Linsey, Julie; Hammond, Tracy. Mechanix: A Sketch-Based Tutoring and Grading System for Free-Body Diagrams. AI Magazine. Volume 34, Number 1, pp. 55-66. AAAI, January, 2013. Link
Show Abstract:
Introductory engineering courses within large universities often have annual enrollments that can reach up to a thousand students. In this article, we introduce Mechanix, a sketch-based deployed tutoring system for engineering students enrolled in statics courses. Our system not only allows students to enter planar trussand free-body diagrams into the system, just as they would with pencil and paper, but our system also checks the student’s work against a hand-drawn answer entered by the instructor, and then returns immediate and detailed feedback to the student. Students are allowed to correct any errors in their work and resubmit until the entire content is correct and thus all of the objectives are learned. Since Mechanix facilitates the grading and feedback processes, instructors are now able to assign more free-response questions, increasing teacher’s knowledge of student comprehension. Furthermore, the iterative correction process allows students to learn during a test, rather than simply display memorized information.

Show BibTex
@article{Valentine2013Mechanix,
 author = {Valentine, Stephanie and Vides, Francisco and Lucchese, George and Turner, David and Kim, Hong-hoe
   and Li, Wenzhe and Linsey, Julie and Hammond, Tracy},
 title = {Mechanix: A Sketch-Based Tutoring and Grading System for Free-Body Diagrams},
 journal = {AI Magazine},
 year = {2013},
 volume = {34},
 number = {1},
 pages = {55–66}
}
2012 
PublicationImage
 Atilola, Olufunmilola; Vides, Francisco; Mctigue, Erin M; Linsey, Julie S; Hammond, Tracy Anne. Automatic Identification of Student Misconceptions and Errors for Truss Analysis. 119th American Society for Engineering Education Annual Conference & Exposition (ASEE). pp. 13 pages. San Antonio, TX. ASEE, June 10-13, 2012. Link
Show Abstract:
Mechanix is a sketch recognition program that was developed at Texas A&M University. Mechanix provides an efficient and effective means for engineering students to learn how to draw truss free-body diagrams (FBDs) and solve truss problems. The Mechanix interface allows for students to sketch these FBDs, as they normally would by hand, into a tablet computer; a mouse can also be used with a regular computer monitor. Mechanix is able to provide immediate and intelligent feedback to the students, and it tells them if they are missing any components of the FBD. The program is also able to tell students whether their solved reaction forces or member forces are correct or not without actually providing the answers. A recent and exciting feature of Mechanix is the creative design mode which allows students to solve open-ended truss problems; an instructor can give their students specific minimum requirements for a truss/bridge, and the student uses Mechanix to solve and create this truss. The creative design feature of Mechanix can check if the students’ truss is structurally sound, and if it meets the minimum requirements stated by the instructor. This paper presents a study to evaluate the effectiveness and advantages of using Mechanix in the classroom as a supplement to traditional teaching and learning methods. Mechanix is also tested alongside an established and popular truss program, WinTruss, to see how learning gains differ and what advantages Mechanix offers over other truss analysis software. Freshman engineering classes were recruited for this experiment and were divided into three conditions: a control condition (students who were not exposed to Mechanix or WinTruss and did their assignments on paper), a Mechanix condition (students who used Mechanix in class and for their assignments, and a WinTruss condition (students who used the WinTruss program for their assignments). The learning gains among these three groups were evaluated using a series of quantitative formal assessments which include a statics concepts inventory, homework sets, quizzes, exam grades and truss/bridge creative design solutions. Qualitative data was also collected through focus groups for all three conditions to gather the students’ impressions of the programs for the experimental group and general teaching styles for the control group. Results from previous evaluations show Mechanix highly engages students and helps them learn basic truss mechanics. This evaluation will be compared with previous evaluations to show that Mechanix continues to be a great tool for enhancing student learning.

Show BibTex
@inproceedings{Atilola2012ASEE,
 title = {Automatic Identification of Student Misconceptions and Errors for Truss Analysis},
 author = {Atilola, Olufunmilola and Vides, Francisco and Mctigue, Erin M and Linsey, Julie S and Hammond, Tracy Anne},
 booktitle = {119th American Society for Engineering Education Annual Conference & Exposition (ASEE). June 10–13},
 year = 2012,
 address = {San Antonio, TX},
 month = 6,
 organization = {ASEE},
 note = {13 pages}
}
2012 
PublicationImage
 Cummings, Danielle; Fymat, Stephane; Hammond, Tracy. Sketch-Based Interface for Interaction with Unmanned Air Vehicles. CHI'12 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 1511-1516. Austin, TX. ACM, May 5-10, 2012. Link
Show Abstract:
In order to decrease the number of casualties and limit the number of potentially dangerous situations that Soldiers encounter, the US military is exploring the use of autonomous Unmanned Aircraft Systems (UAS) to fulfill air support requests (ASR) from the field. The interface for this system must provide interaction in modes that facilitate the completion of the support request in various scenarios, and it must be usable by operators of all skill levels, without requiring extensive training or considerable expertise. Sketches are a simple and natural way to exchange information and ideas. Sketching as a form of human-computer interaction can be very useful in areas where information is represented graphically. In this paper we present the development of an interface that that allows the user to plan an ASR using sketch and other inputs while conforming to the users mental model of natural interaction.

Show BibTex
@inproceedings{Cummings:2012:SII:2212776.2223664,
 author = {Cummings, Danielle and Fymat, Stephane and Hammond, Tracy},
 title = {Sketch-based Interface for Interaction with Unmanned Air Vehicles},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {1511--1516},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223664},
 doi = {10.1145/2212776.2223664},
 acmid = {2223664},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {multimodal interaction, sketch recognition, unmanned air system}
} 
2012 
PublicationImage
 Cummings, Danielle; Lucchese, George; Prasad, Manoj; Aikens, Chris; Ho, Jimmy; Hammond, Tracy. GeoTrooper: A Mobile Location-Aware System for Team Coordination. Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest Group on Human-Computer Interaction (CHINZ). pp. p.102. Dunedin, New Zealand. ACM, July 2-3, 2012. Link
Show Abstract:
Navigation and assembly are critical tasks for Soldiers in battlefield situations. Soldiers must locate equipment, supplies and teammates quickly and quietly in order to ensure the success of their mission. This task can be extremely difficult and take a significant amount of time without guidance or extensive experience. To facilitate the re-assembly and coordination of airborne paratrooper teams, we have developed a location-aware system that uses an ad-hoc Wi-Fi network in order to broadcast and receive GPS coordinates of equipment and/or rendezvous points. The system consists of beacons, ruggedized computers placed at assembly points that broadcast their position over Wi- Fi, and receivers, handheld Android devices which orient the user towards the beacons and/or any predetermined coordinates.

Show BibTex
@inproceedings{Cummings:2012:GML:2379256.2379286,
 author = {Cummings, Danielle and Lucchese, George and Prasad, Manoj and Aikens, Chris and Ho, Jimmy and Hammond, Tracy},
 title = {GeoTrooper: A Mobile Location-aware System for Team Coordination},
 booktitle = {Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest
   Group on Human-Computer Interaction},
 series = {CHINZ '12},
 year = {2012},
 isbn = {978-1-4503-1474-9},
 location = {Dunedin, New Zealand},
 pages = {102--102},
 numpages = {1},
 url = {http://doi.acm.org/10.1145/2379256.2379286},
 doi = {10.1145/2379256.2379286},
 acmid = {2379286},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {location-based system, military applications, mobile-computing}
}
2012 
PublicationImage
 Cummings, Danielle; Lucchese, George; Prasad, Manoj; Aikens, Chris; Ho, Jimmy; Hammond, Tracy. Haptic and AR Interface for Paratrooper Coordination. Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest Group on Human-Computer Interaction (CHINZ). pp. 52-55. Dunedin, New Zealand. ACM, July 2-3, 2012. Link
Show Abstract:
Applications that use geolocation data are becoming a common addition to GPS-enabled devices. In terms of mobile computing, there is extensive research in progress to create human-computer interfaces that integrate seamlessly with the user’s tasks. When viewing location-based data in a real-world environment, a natural interaction would be to allow the user to see relevant information based on his or her location within an environment. In this paper, we discuss the use of a multi-modal interface that uses haptic feedback and augmented reality to deliver navigation information to paratroopers in the field. This interface was developed for GeoTrooper, a location-based tracking system that visualizes GPS data broadcast by mobile beacons.

Show BibTex
@inproceedings{Cummings:2012:HAI:2379256.2379265,
 author = {Cummings, Danielle and Lucchese, George and Prasad, Manoj and Aikens, Chris and Ho, Jimmy and Hammond, Tracy},
 title = {Haptic and AR Interface for Paratrooper Coordination},
 booktitle = {Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest
   Group on Human-Computer Interaction},
 series = {CHINZ '12},
 year = {2012},
 isbn = {978-1-4503-1474-9},
 location = {Dunedin, New Zealand},
 pages = {52--55},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2379256.2379265},
 doi = {10.1145/2379256.2379265},
 acmid = {2379265},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {location-based system, military applications, mobile-computing}
}
2012 
PublicationImage
 Cummmings, Danielle; Fymat, Stephane; Hammond, Tracy. Reddog: A Smart Sketch Interface for Autonomous Aerial Aystems. Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling (SBIM). pp. 21-28. Annecy, France. Eurographics Association, June 4-6, 2012. Link
Show Abstract:
In order to decrease the number of casualties and limit the number of potentially dangerous situations that Soldiers encounter, the US military is exploring the use of autonomous Unmanned Aircraft Systems (UAS) to fulfill air support requests (ASR) from the field. The interface for such a system must provide interaction in modes that facilitate the completion of the support request in various scenarios, and it must be usable by operators of all skill levels, without requiring extensive training or considerable expertise. Sketches are a simple and natural way to exchange graphical information and ideas. In this paper we present the development of an interface that that allows the user to plan an ASR using sketch and other inputs while conforming to the userŠs mental model of natural interaction.

Show BibTex
@inproceedings{Cummmings:2012:RSS:2331067.2331071,
 author = {Cummmings, D. and Fymat, S. and Hammond, T.},
 title = {RedDog: A Smart Sketch Interface for Autonomous Aerial Systems},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '12},
 year = {2012},
 isbn = {978-3-905674-42-2},
 location = {Annecy, France},
 pages = {21--28},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=2331067.2331071},
 acmid = {2331071},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}
2012 
PublicationImage
 Cummmings, Danielle; Vides, Francisco; Hammond, Tracy. I Don't Believe My Eyes!: Geometric Sketch Recognition for a Computer Art Tutorial. Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling (SBIM). pp. 97-106. Annecy, France. Eurographics Association, June 4-6, 2012. Link
Show Abstract:
Drawing is a common form of communication and a means of artistic expression. Many of us believe that the ability to draw accurate representations of objects is a skill that either comes naturally or is the result of hours of study or practice or both. As a result many people become intimidated when confronted with the task of drawing. Many books and websites have been developed to teach people step-by-step skills to draw various objects, but they lack the live feedback of a human examiner. We designed EyeSeeYou, a sketch recognition system that teaches users to draw eyes using a simple drawing technique. The system automatically evaluates the freehand drawn sketch of an eye at various stages during creation. We conducted frequent evaluations of the system in order to take an iterative development approach based on user feedback. Our system balances the flexibility of free-hand drawing with step-by-step instructions and realtime assessment. It also provides rigorous feedback to create a constructive learning environment to aid the user in improving her drawing. This paper describes the implementation details of the sketch recognition system. A similar implementation method could be used to provide sketching tutorials for a wide number of images.

Show BibTex
@inproceedings{Cummmings:2012:IDB:2331067.2331082,
 author = {Cummmings, D. and Vides, F. and Hammond, T.},
 title = {I Don'T Believe My Eyes!: Geometric Sketch Recognition for a Computer Art Tutorial},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '12},
 year = {2012},
 isbn = {978-3-905674-42-2},
 location = {Annecy, France},
 pages = {97--106},
 numpages = {10},
 url = {http://dl.acm.org/citation.cfm?id=2331067.2331082},
 acmid = {2331082},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}
2012 
PublicationImage
 Li, Wenzhe; Hammond, Tracy. Using Scribble Gestures to Enhance Editing Behaviors of Sketch Recognition Systems. CHI'12 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 2213-2218. Austin, TX. ACM, May 5-10, 2012. Link
Show Abstract:
Mechanix is a computer-assisted tutoring system for engineering students. It uses recognition of freehand sketches to provide instant, detailed, and formative feedback as a student progresses through each homework problem. By using recognition algorithms, the system allows students to solve free-body diagrams and truss problems as if they were using a pen and paper. However, the system currently provides little support for students to edit their drawings by using free hand sketches. Specifically, students may wish to delete part or the whole of a line or shape, and the natural response is to scribble that part of shape out. We developed a new method for integrating scribble gestures into a sketch recognition system. The algorithm automatically identifies and distinguishes scribble gestures from regular drawing input using three features. If the stroke is classified as a scribble, then the algorithm further decides which shape or which part of shape to be deleted. Instead of using slower brute-force methods, we use geometric-based linear-time algorithms which efficiently detect a scribble gesture and remove the intended shapes in real-time.

Show BibTex
@inproceedings{Li:2012:USG:2212776.2223778,
 author = {Li, Wenzhe and Hammond, Tracy},
 title = {Using Scribble Gestures to Enhance Editing Behaviors of Sketch Recognition Systems},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {2213--2218},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223778},
 doi = {10.1145/2212776.2223778},
 acmid = {2223778},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {editing, gesture recognition, pen-input computing, sketch recognition}
} 
2012 
PublicationImage
 Lucchese, George; Field, Martin; Ho, Jimmy; Gutierrez-Osuna, Ricardo; Hammond, Tracy. GestureCommander: Continuous Touch-Based Gesture Prediction. CHI'12 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 1925-1930. Austin, TX. ACM, May 5-10, 2012. Link
Show Abstract:
GestureCommander is a touch-based gesture control system for mobile devices that is able to recognize gestures as they are being performed. Continuous recognition allows the system to provide visual feedback to the user and to anticipate user commands to possibly decrease perceived response time. To achieve this goal we employ two Hidden Markov Model (HMM) systems, one for recognition and another for generating visual feedback. We analyze a set of geometric features used in other gesture recognition systems and determine a subset that works best for HMMs. Finally we demonstrate the practicality of our recognition HMMs in a proof of concept mobile application for Google’s Android mobile platform that has a recognition accuracy rate of 96% over 15 distinct gestures.

Show BibTex
@inproceedings{Lucchese:2012:GCT:2212776.2223730,
 author = {Lucchese, George and Field, Martin and Ho, Jimmy and Gutierrez-Osuna, Ricardo and Hammond, Tracy},
 title = {GestureCommander: Continuous Touch-based Gesture Prediction},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {1925--1930},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223730},
 doi = {10.1145/2212776.2223730},
 acmid = {2223730},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {gesture recognition, mobile computing}
} 
2012 
PublicationImage
 Prasad, Manoj; Hammond, Tracy. Observational Study on Teaching Artifacts Created Using Tablet PC. CHI'12 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 301-316. Austin, TX. ACM, May 5-10, 2012. Link
Show Abstract:
Teaching typically involves communication of knowledge in multiple modalities. The ubiquity of pen-enabled technologies in teaching has made the accurate capture of user ink data possible, alongside technologies to recognize voice data. When annotating on a white board or other presentation surface, teachers often have a specific style of structuring contents taught in a lecture. The availability of sketch data and voice data can enable researchers to analyze trends followed by teachers in writing and annotating notes. Using ethnographic methods, we have observed the structure that teachers use while presenting lectures on mathematics. We have observed the practices followed by teachers in writing and speaking the lecture content, and have derived models that would help computer scientists identify the structure of the content. This observational study motivates the idea that we can use speech and color change events to distinguish between strokes meant for drawing versus those meant for attention marks.

Show BibTex
@inproceedings{Prasad:2012:OST:2212776.2212809,
 author = {Prasad, Manoj and Hammond, Tracy},
 title = {Observational Study on Teaching Artifacts Created Using Tablet PC},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {301--316},
 numpages = {16},
 url = {http://doi.acm.org/10.1145/2212776.2212809},
 doi = {10.1145/2212776.2212809},
 acmid = {2212809},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {ethnography, multimodal, pen enabled technologies, sketch recognition, sketching, tablet pc}
} 
2012 
PublicationImage
 Taele, Paul; Hammond, Tracy. Initial Approaches for Extending Sketch Recognition to Beyond-Surface Environments. CHI'12 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 2039-2044. Austin, TX. ACM, May 5-10, 2012. Link
Show Abstract:
Sketch recognition researchers have long concentrated their energies on investigating issues related to computer systems’ difficulties in recognizing hand-drawn diagrams, but the focus has largely been on recognizing sketches on physical surfaces. While beyond-surface sketching actively takes place in diverse forms and in various activities, directly applying existing on-surface sketch recognition techniques beyond physical surfaces is far from trivial. In this paper, we investigate initial approaches for locating corners and extracting primitive geometric shapes in beyond-surface sketches, which are important ingredients of subsequent higher-level interpretations for building richer sketching interfaces. Moreover, we investigate preliminary challenges of sketch recognition in beyond-surface environments and discuss possible solutions for achieving successful next-step extensions of this work.

Show BibTex
@inproceedings{Taele:2012:IAE:2212776.2223749,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Initial Approaches for Extending Sketch Recognition to Beyond-surface Environments},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {2039--2044},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223749},
 doi = {10.1145/2212776.2223749},
 acmid = {2223749},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {beyond-surface interactions, low-level processing, sketch recognition}
} 
2012 
PublicationImage
 Valentine, Stephanie; Vides, Francisco; Lucchese, George; Turner, David; Kim, Hong-hoe; Li, Wenzhe; Linsey, Julie; Hammond, Tracy. Mechanix: A Sketch-Based Tutoring System for Statics Courses. Proceedings of the Twenty-Fourth Innovative Applications of Artificial Intelligence Conference (IAAI). pp. 2253-2260. Toronto, Canada. AAAI, July 22-26, 2012. Link
Show Abstract:
Introductory engineering courses within large universities often have annual enrollments which can reach up to a thousand students. It is very challenging to achieve differentiated instruction in classrooms with class sizes and student diversity of such great magnitude. Professors can only assess whether students have mastered a concept by using multiple choice questions, while detailed homework assignments, such as planar truss diagrams, are rarely assigned because professors and teaching assistants would be too overburdened with grading to return assignments with valuable feedback in a timely manner. In this paper, we introduce Mechanix, a sketch-based deployed tutoring system for engineer- ing students enrolled in statics courses. Our system not only allows students to enter planar truss and free body diagrams into the system just as they would with pen- cil and paper, but our system checks the student’s work against a hand-drawn answer entered by the instructor, and then returns immediate and detailed feedback to the student. Students are allowed to correct any errors in their work and resubmit until the entire con- tent is correct and thus all of the objectives are learned. Since Mechanix facilitates the grading and feedback processes, instructors are now able to assign free response questions, increasing teacher’s knowledge of student comprehension. Furthermore, the iterative correction process allows students to learn during a test, rather than simply displaying memorized information.

Show BibTex
@inproceedings{valentine2012mechanix,
 title = {Mechanix: A Sketch-Based Tutoring System for Statics Courses.},
 author = {Valentine, Stephanie and Vides, Francisco and Lucchese, George and Turner, David and
   Kim, Hong-hoe and Li, Wenzhe and Linsey, Julie and Hammond, Tracy},
 booktitle = {Proceedings of the Twenty-Fourth Innovative Applications of Artificial Intelligence Conference (IAAI)},
 year = {2012},
 address = {Toronto, Canada},
 month = {July},
 organization = {AAAI},
 pages = {2253–2260}
}
2012 
PublicationImage
 Vides, F.; Taele, P.; Kim, H.; Ho, J.; T Hammond. Intelligent Feedback for Kids Using Sketch Recognition. ACM SIGCHI 2012 Conference on Human Factors in Computing Systems Workshop on Educational Interfaces, Software, and Technology. ACM, 2012. Link
Show Abstract:

Show BibTex
@INPROCEEDINGS{Vides:2012:CHI-EIST,
 author = {Vides, F and Taele, P and Kim, H and Ho, J and T Hammond},
 title = {Intelligent Feedback for Kids Using Sketch Recognition},
 booktitle = {ACM SIGCHI 2012 Conference on Human Factors in Computing Systems Workshop on Educational
   Interfaces, Software, and Technology},
 year = {2012},
 publisher = {ACM}
}
2011 
PublicationImage
 Atilola, Olufunmilola; Field, Martin; McTigue, Erin; Hammond, Tracy; Linsey, Julie. Mechanix: A Sketch Recognition Truss Tutoring System. American Society of Mechanical Engineers (ASME) 2011 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference, Volume 7: 5th International Conference on Micro- and Nanosystems; 8th International Conference on Design and Design Education; 21st Reliability, Stress Analysis, and Failure Prevention Conference.. Volume 7, pp. 645-654. Washington, DC. ASME, August 28-30, 2011. Link
Show Abstract:

Show BibTex
@INPROCEEDINGS{atilola2011asme,
 author = {Atilola, Olufunmilola and Field, Martin and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 booktitle= {American Society of Mechanical Engineers (ASME) 2011 International Design Engineering Technical 
   Conferences and Computers and Information in Engineering Conference, Volume 7: 5th International Conference
   on Micro- and Nanosystems; 8th International Conference on Design and Design Education; 21st Reliability, 
   Stress Analysis, and Failure Prevention Conference},
 title = {Mechanix: A Sketch Recognition Truss Tutoring System},
 year = {2011},
 volume= {7},
 pages = {645–654},
 month = {August 28–30},
 address = {Washington, DC},
 publisher = {ASME}
}
2011 
PublicationImage
 Atilola, Olufunmilola; Field, Martin; McTigue, Erin; Hammond, Tracy; Linsey, Julie. Evaluation of a Natural Sketch Interface for Truss FBDs and Analysis. Frontiers in Education Conference (FIE). pp. S2E-1 - S2E-6. Rapid City, SD. IEEE, October 12-15, 2011. Link
Show Abstract:

Show BibTex
@inproceedings{Atilola:2011:ENS:2192607.2193253,
 author = {Atilola, Olufunmilola and Field, Martin and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 title = {Evaluation of a Natural Sketch Interface for Truss FBDs and Analysis},
 booktitle = {Proceedings of the 2011 Frontiers in Education Conference},
 series = {FIE '11},
 year = {2011},
 isbn = {978-1-61284-468-8},
 pages = {S2E-1--1-S2E-6},
 url = {http://dx.doi.org/10.1109/FIE.2011.6142959},
 doi = {10.1109/FIE.2011.6142959},
 acmid = {2193253},
 publisher = {IEEE Computer Society},
 address = {Washington, DC, USA}
} 
2011 
PublicationImage
 Field, Martin; Valentine, Stephanie; Linsey, Julie; Hammond, Tracy. Sketch Recognition Algorithms for Comparing Complex and Unpredictable Shapes. Proceedings of the Twenty-Second international Joint Conference on Artificial Intelligence (IJCAI). Volume 3, pp. 2436-2441. Barcelona, Spain. AAAI Press, July 16-22, 2011. Link
Show Abstract:

Show BibTex
@inproceedings{Field:2011:SRA:2283696.2283803,
 author = {Field, Martin and Valentine, Stephanie and Linsey, Julie and Hammond, Tracy},
 title = {Sketch Recognition Algorithms for Comparing Complex and Unpredictable Shapes},
 booktitle = {Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence - Volume Three},
 series = {IJCAI'11},
 year = {2011},
 isbn = {978-1-57735-515-1},
 location = {Barcelona, Catalonia, Spain},
 pages = {2436--2441},
 numpages = {6},
 url = {http://dx.doi.org/10.5591/978-1-57735-516-8/IJCAI11-406},
 doi = {10.5591/978-1-57735-516-8/IJCAI11-406},
 acmid = {2283803},
 publisher = {AAAI Press}
} 
2011 
PublicationImage
 Hammond, Tracy Anne; Adler, Aaron. IUI 2011 workshop: sketch recognition. Proceedings of the 16th International Conference on Intelligent User Interfaces (IUI). pp. 465-466. Palo Alto, CA. ACM, February 13-16, 2011. Link
Show Abstract:

Show BibTex
@inproceedings{Hammond:2011:IWS:1943403.1943503,
author = {Hammond, Tracy Anne and Adler, Aaron},
title = {{IUI} 2011 Workshop: Sketch Recognition},
booktitle = {Proceedings of the 16th International Conference on Intelligent User Interfaces},
series = {{IUI} '11},
year = {2011},
isbn = {978-1-4503-0419-1},
location = {Palo Alto, CA, USA},
pages = {465--466},
numpages = {2},
url = {http://doi.acm.org/10.1145/1943403.1943503},
doi = {10.1145/1943403.1943503},
acmid = {1943503},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {CAD, design, document processing, intelligent user interfaces, pen-input computing, sketch
   recognition, sketch understanding, sketching, tablet PCs}
} 
2011 
PublicationImage
 Hammond, Tracy; Paulson, Brandon. Recognizing Sketched Multistroke Primitives. ACM Transactions on Interactive Intelligent Systems (TIIS). Volume 1, Number 1. Article 4, pp. 1-34. ACM, October, 2011. Link
Show Abstract:

Show BibTex
@article{Hammond:2011:RSM:2030365.2030369,
 author = {Hammond, Tracy and Paulson, Brandon},
 title = {Recognizing Sketched Multistroke Primitives},
 journal = {ACM Trans. Interact. Intell. Syst.},
 issue_date = {October 2011},
 volume = {1},
 number = {1},
 month = oct,
 year = {2011},
 issn = {2160-6455},
 pages = {4:1--4:34},
 articleno = {4},
 numpages = {34},
 url = {http://doi.acm.org/10.1145/2030365.2030369},
 doi = {10.1145/2030365.2030369},
 acmid = {2030369},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {Sketch recognition, intelligent user interfaces, neural networks, primitive recognition}
} 
2011 
PublicationImage
 Kebodeaux, Kourtney; Field, Martin; Hammond, Tracy. Defining Precise Measurements with Sketched Annotations. Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling (SBIM). pp. 79-86. Vancouver, Canada. ACM, August 5-7, 2011. Link
Show Abstract:

Show BibTex
@inproceedings{Kebodeaux:2011:DPM:2021164.2021179,
 author = {Kebodeaux, Kourtney and Field, Martin and Hammond, Tracy},
 title = {Defining Precise Measurements with Sketched Annotations},
 booktitle = {Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '11},
 year = {2011},
 isbn = {978-1-4503-0906-6},
 location = {Vancouver, British Columbia, Canada},
 pages = {79--86},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2021164.2021179},
 doi = {10.1145/2021164.2021179},
 acmid = {2021179},
 publisher = {ACM},
 address = {New York, NY, USA}
} 
2011 
PublicationImage
 Li, Wenzhe; Hammond, Tracy Anne. Recognizing Text Through Sound Alone. Proceedings of the Twenty-Fifth AAAI Conference on Artificial Intelligence (AAAI). pp. 1481-1486. San Francisco, CA. AAAI, August 7-11, 2011. Link
Show Abstract:
This paper presents an acoustic sound recognizer to recognize what people are writing on a table or wall by utilizing the sound signal information generated from a key, pen, or fingernail moving along a textured surface. Sketching provides a natural modality to interact with text, and sound is an effective modality for distinguishing text. However, limited research has been conducted in this area. Our system uses a dynamic time- warping approach to recognize 26 hand-sketched characters (A-Z) solely through their acoustic signal. Our initial prototype system is user-dependent and relies on fixed stroke ordering. Our algorithm relied mainly on two features: mean amplitude and MFCCs (Mel-frequency cepstral coefficients). Our results showed over 80% recognition accuracy.

Show BibTex
@inproceedings{WLi2011,
 author = {Wenzhe Li and Tracy Hammond},
 title = {Recognizing Text Through Sound Alone},
 booktitle = {AAAI Conference on Artificial Intelligence},
 year = {2011},
 keywords = {},
 abstract = {This paper presents an acoustic sound recognizer to recognize what people are writing on a table or wall by
   utilizing the sound signal information generated from a key, pen, or fingernail moving along a textured surface. Sketching
   provides a natural modality to interact with text, and sound is an effective modality for distinguishing text. However,
   limited research has been conducted in this area. Our system uses a dynamic time- warping approach to recognize 26
   hand-sketched characters (A-Z) solely through their acoustic signal. Our initial prototype system is user-dependent and
   relies on fixed stroke ordering. Our algorithm relied mainly on two features: mean amplitude and MFCCs (Mel-frequency
   cepstral coefficients). Our results showed over 80% recognition accuracy.},
 url = {http://www.aaai.org/ocs/index.php/AAAI/AAAI11/paper/view/3791}
}
2011 
PublicationImage
 Lupfer, Nic; Field, Martin; Kerne, Andruid; Hammond, Tracy. sketchy: Morphing User Sketches for Artistic Assistance. Proceedings of the 2011 ACM International conference on intelligent user interface. Palo Alto, CA. ACM, February 13-16, 2011. Link
Show Abstract:

Show BibTex
@INPROCEEDINGS{lupfer2011ACM,
 author = {Lupfer, Nic and Field, Martin and Kerne, Andruid and Hammond, Tracy},
 booktitle = {Proceedings of the 2011 ACM International conference on intelligent user interface},
 title = {sketchy: Morphing User Sketches for Artistic Assistance},
 year = {2011},
 month = {February 13-16},
 address = {Palo Alto, CA},
 publisher = {ACM}
}
2011 
PublicationImage
 Paulson, Brandon; Cummings, Danielle; Hammond, Tracy. Object Interaction Detection Using Hand Posture Cues in an Office Setting. International Journal of Human-Computer Studies (IJHCS). Volume 69, Number 1, pp. 19-29. Elsevier, January, 2011. Link
Show Abstract:

Show BibTex
@article{Paulson:2011:OID:1897345.1897540,
 author = {Paulson, Brandon and Cummings, Danielle and Hammond, Tracy},
 title = {Object Interaction Detection Using Hand Posture Cues in an Office Setting},
 journal = {Int. J. Hum.-Comput. Stud.},
 issue_date = {January, 2011},
 volume = {69},
 number = {1-2},
 month = jan,
 year = {2011},
 issn = {1071-5819},
 pages = {19--29},
 numpages = {11},
 url = {http://dx.doi.org/10.1016/j.ijhcs.2010.09.003},
 doi = {10.1016/j.ijhcs.2010.09.003},
 acmid = {1897540},
 publisher = {Academic Press, Inc.},
 address = {Duluth, MN, USA},
 keywords = {Activity recognition, Context-aware, Cyberglove, Glove-based interaction, Hand gesture, Hand posture, Haptics}
}
2011 
PublicationImage
 Valentine, Stephanie; Field, Martin; Smith, A; Hammond, T. A Shape Comparison Technique for Use in Sketch-Based Tutoring Systems. Proceedings of the 2011 Intelligent User Interfaces Workshop on Sketch Recognition (Palo Alto, CA, USA, 2011) . Volume 11, Number 5, pp. 4 pages. Palo Alto, CA. IUI, February 13, 2011. Link
Show Abstract:

Show BibTex
@INPROCEEDINGS{valentine2011shape,
 author = {Valentine, Stephanie and Field, Martin and Smith, A and Hammond, T},
 title = {A Shape Comparison Technique for Use in Sketch-Based Tutoring Systems},
 booktitle = {Proceedings of the 2011 Intelligent User Interfaces Workshop on Sketch Recognition (Palo Alto, CA, USA, 2011)},
 year = {2011},
 month = {February 13},
 address = {Palo Alto, CA},
 publisher = {ASEE Conferences},
 note = {4 pages}
}
2011 
PublicationImage
 Wolin, Aaron; Field, Martin; Hammond, Tracy. Combining Corners from Multiple Segmenters. Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling (SBIM). pp. 117-124. Vancouver, Canada. ACM, August 5-7, 2011. Link
Show Abstract:

Show BibTex
@inproceedings{Wolin:2011:CCM:2021164.2021185,
 author = {Wolin, Aaron and Field, Martin and Hammond, Tracy},
 title = {Combining Corners from Multiple Segmenters},
 booktitle = {Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '11},
 year = {2011},
 isbn = {978-1-4503-0906-6},
 location = {Vancouver, British Columbia, Canada},
 pages = {117--124},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2021164.2021185},
 doi = {10.1145/2021164.2021185},
 acmid = {2021185},
 publisher = {ACM},
 address = {New York, NY, USA}
} 
2010 
PublicationImage
 Costagliola, Gennaro; Hammond, Tracy; Plimmer, Beryl. Editorial: JVLC Special Issue on Sketch Computation. Journal of Visual Languages & Computing (JVLC). Volume 21, Number 2, pp. 67-68. Elsevier, April, 2010. Link
Show Abstract:

Show BibTex
@article{Costagliola:2010:EJS:1752259.1752502,
 author = {Costagliola, Gennaro and Hammond, Tracy and Plimmer, Beryl},
 title = {Editorial: JVLC Special Issue on Sketch Computation},
 journal = {J. Vis. Lang. Comput.},
 issue_date = {April, 2010},
 volume = {21},
 number = {2},
 month = apr,
 year = {2010},
 issn = {1045-926X},
 pages = {67--68},
 numpages = {2},
 url = {http://dx.doi.org/10.1016/j.jvlc.2010.01.003},
 doi = {10.1016/j.jvlc.2010.01.003},
 acmid = {1752502},
 publisher = {Academic Press, Inc.},
 address = {Orlando, FL, USA}
} 
2010 
PublicationImage
 David, Jessica; Eoff, Brian; Hammond, Tracy. CoSke-An Exploration in Collaborative Sketching. Computer Supported Cooperative Work Posters (CSCW). pp. 471-472. Savannah, GA. February 6-10, 2010. Link
Show Abstract:
Collaboration is a helpful tool for inspiring creativity and promoting idea generation. To assist sketch collaboration using digital sketching, we developed CoSke (short for Collaborative Sketching), a server application that lets multiple users, each sketching on their own client, draw collaboratively on a shared canvas. We performed a user study to investigate how users react to varying methods of collaborative interaction, comparing the shared digital canvas to traditional pen and paper methods, as well as same room versus distinct locations. User surveys recorded participants' qualitative opinions about the methods. Points of communication such as hand gestures, eye contact, and contribution were recorded by proctors. The results from these metrics along with user study comments suggest that paper-based methods may impede collaboration due to the physical constraints inherent in a shared physical drawing space, and that speech is vital to effective sketch collaboration. Proctor recordings also provide insight into which face-to-face methods of collaborative communication can be translated into the digital realm. Further examination of the data collected from this and future studies will provide further insight to these questions and guidance on how developers can envision and build a system that will truly provide for the capabilities and natural flow of face-to-face human sketching communication.

Show BibTex
@INPROCEEDINGS{david2010coske,
 author = {David, Jessica and Eoff, Brian and Hammond, Tracy},
 booktitle = {Computer Supported Cooperative Work Posters (CSCW)},
 title = {CoSke-An Exploration in Collaborative Sketching},
 year = {2010},
 pages = {471–472},
 month = {February 6–10},
 address = {Savannah, GA}
}
2010 
PublicationImage
 Dixon, Daniel; Prasad, Manoj; Hammond, Tracy. iCanDraw: Using Sketch Recognition and Corrective Feedback to Assist a User in Drawing Human Faces. Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (CHI). pp. 897-906. Atlanta, GA. ACM, April 10-15, 2010. Link
Show Abstract:
When asked to draw, many people are hesitant because they consider themselves unable to draw well. This paper describes the first system for a computer to provide direction and feedback for assisting a user to draw a human face as accurately as possible from an image. Face recognition is first used to model the features of a human face in an image, which the user wishes to replicate. Novel sketch recognition algorithms were developed to use the information provided by the face recognition to evaluate the hand-drawn face. Two design iterations and user studies led to nine design principles for providing such instruction, presenting reference media, giving corrective feedback, and receiving actions from the user. The result is a proof-of- concept application that can guide a person through step- by-step instruction and generated feedback toward producing his/her own sketch of a human face in a reference image.

Show BibTex
@inproceedings{Dixon:2010:IUS:1753326.1753459,
 author = {Dixon, Daniel and Prasad, Manoj and Hammond, Tracy},
 title = {iCanDraw: Using Sketch Recognition and Corrective Feedback to Assist a User in Drawing Human Faces},
 booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
 series = {CHI '10},
 year = {2010},
 isbn = {978-1-60558-929-9},
 location = {Atlanta, Georgia, USA},
 pages = {897--906},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1753326.1753459},
 doi = {10.1145/1753326.1753459},
 acmid = {1753459},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {assistive and corrective feedback, computer-aided instruction, pen-input computing, sketch recognition}
} 
2010 
PublicationImage
 Hammond, Tracy Anne; Logsdon, Drew; Paulson, Brandon; Johnston, Joshua; Peschel, Joshua M; Wolin, Aaron; Taele, Paul. A Sketch Recognition System for Recognizing Free-Hand Course of Action Diagrams. Proceedings of the Twenty-Second Innovative Applications of Artificial Intelligence Conference (IAAI). pp. 1781-1786. Atlanta, GA. AAAI, July 11-15, 2010. Link
Show Abstract:
Military course-of-action (COA) diagrams are used to depict battle scenarios and include thousands of unique symbols, complete with additional textual and designator modifiers. We have created a real-time sketch recognition interface that recognizes 485 freely-drawn military course-of-action sym- bols. When the variations (not allowable by other systems) are factored in, our system is several orders of magnitude larger than the next biggest system. On 5,900 hand-drawn symbols, the system achieves an accuracy of 90% when con- sidering the top 3 interpretations and requiring every aspect of the shape (variations, text, symbol, location, orientation) to be correct.

Show BibTex
@inproceedings{Hammond2010IAAI,
 author = {Hammond, Tracy and Logsdon, Drew and Paulson, Brandon and Johnston, Joshua  and Peschel, Joshua 
   and Wolin, Aaron  and Taele, Paul},
 title = {A Sketch Recognition System for Recognizing Free-Hand Course of Action Diagrams},
 booktitle = {Innovative Applications of Artificial Intelligence},
 year = {2010},
 pages = {1781--1786},
 month = {July 11--15},
 address = {Atlanta, GA},
 keywords = {course of action diagrams; sketch recognition; pen input computing},
 url = {http://www.aaai.org/ocs/index.php/IAAI/IAAI10/paper/view/1581}
}
2010 
PublicationImage
 Hammond, Tracy; Davis, Randall. Creating the Perception-Based LADDER Sketch Recognition Language. Proceedings of the 8th ACM Conference on Designing Interactive Systems (DIS). pp. 141-150. Aarhus, Denmark. ACM, August 16-20, 2010. Link
Show Abstract:
Sketch recognition is automated understanding of hand- drawn diagrams. Current sketch recognition systems exist for only a handful of domains, which contain on the order of 10-20 shapes. Our goal was to create a generalized method for recognition that could work for many domains, increasing the number of shapes that could be recognized in real-time, while maintaining a high accuracy. In an effort to effectively recognize shapes while allowing drawing freedom (both drawing-style freedom and perceptually- valid variations), we created the shape description language modeled after the way people naturally describe shapes to 1) create an intuitive and easy to understand description, providing transparency to the underlying recognition process, and 2) to improve recognition by providing recognition flexibility (drawing freedom) that is aligned with how humans perceive shapes. This paper describes the results of a study performed to see how users naturally describe shapes. A sample of 35 subjects described or drew approximately 16 shapes each. Results show a common vocabulary related to Gestalt grouping and singularities. Results also show that perception, similarity, and context play an important role in how people describe shapes. This study resulted in a language (LADDER) that allows shape recognizers for any domain to be automatically generated from a single hand-drawn example of each shape. Sketch systems for over 30 different domains have been automatically generated based on this language. The largest domain contained 923 distinct shapes, and achieved a recognition accuracy of 83% (and a top-3 accuracy of 87%) on a corpus of over 11,000 sketches, which recognizes almost two orders of magnitude more shapes than any other existing system.

Show BibTex
@inproceedings{Hammond:2010:CPL:1858171.1858197,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Creating the Perception-based LADDER Sketch Recognition Language},
 booktitle = {Proceedings of the 8th ACM Conference on Designing Interactive Systems},
 series = {DIS '10},
 year = {2010},
 isbn = {978-1-4503-0103-9},
 location = {Aarhus, Denmark},
 pages = {141--150},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1858171.1858197},
 doi = {10.1145/1858171.1858197},
 acmid = {1858197},
 publisher = {ACM},
 address = {New York, NY, USA}
} 
2010 
PublicationImage
 Hammond, Tracy; Lank, Edward; Adler, Aaron. SkCHI: Designing Sketch Recognition Interfaces. CHI'10 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 4501-4504. Atlanta, GA. ACM, April 10-15, 2010. Link
Show Abstract:
Sketch recognition user interfaces currently treat the pen in the same manner as a mouse and keyboard. The aim of this workshop is to promote thought and discussion about how to move beyond this to create natural and intuitive pen-based interfaces. To this end, the workshop will include panel discussions, group discussions, and even an instructional session on drawing sketches.

Show BibTex
@inproceedings{Hammond:2010:SDS:1753846.1754184,
 author = {Hammond, Tracy and Lank, Edward and Adler, Aaron},
 title = {SkCHI: Designing Sketch Recognition Interfaces},
 booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '10},
 year = {2010},
 isbn = {978-1-60558-930-5},
 location = {Atlanta, Georgia, USA},
 pages = {4501--4504},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/1753846.1754184},
 doi = {10.1145/1753846.1754184},
 acmid = {1754184},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {pen computing, pen-input computing, sketch computing, sketch interfaces, sketch recognition, tablet pc}
} 
2010 
PublicationImage
 Hammond, Tracy; Logsdon, Drew; Peschel, Joshua; Johnston, Joshua; Taele, Paul; Wolin, Aaron; Paulson, Brandon. A Sketch Recognition Interface that Recognizes Hundreds of Shapes in Course-of-Action Diagrams. CHI'10 Extended Abstracts on Human Factors in Computing Systems (CHI). pp. 4213-4218. Atlanta, GA. ACM, April 10-15, 2010. Link
Show Abstract:
Sketch recognition is the automated recognition of hand drawn diagrams. Military course-of-action (COA) diagrams are used to depict battle scenarios. The domain of military course of action diagrams is particularly interesting because it includes tens of thousands of different geometric shapes, complete with many additional textual and designator modifiers. Existing sketch recognition systems recognize on the order of at most 20 different shapes. Our sketch recognition interface recognizes 485 different freely drawn military course-of-action diagram symbols in real time, with each shape containing its own elaborate set of text labels and other variations. We are able to do this by combining multiple recognition techniques in a single system. When the variations (not allowable by other systems) are factored in, our system is several orders of magnitude larger than the next biggest system. On 5,900 hand-drawn symbols drawn by 8 researchers, the system achieves an accuracy of 90% when considering the top 3 interpretations and requiring every aspect of the shape (variations, text, symbol, location, orientation) to be correct.

Show BibTex
@inproceedings{Hammond:2010:SRI:1753846.1754128, 
 author = {Hammond, Tracy and Logsdon, Drew and Peschel, Joshua and Johnston, Joshua and Taele, Paul and 
   Wolin, Aaron and Paulson, Brandon},
 title = {A Sketch Recognition Interface That Recognizes Hundreds of Shapes in Course-of-action Diagrams},
 booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '10},
 year = {2010},
 isbn = {978-1-60558-930-5},
 location = {Atlanta, Georgia, USA},
 pages = {4213--4218},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/1753846.1754128},
 doi = {10.1145/1753846.1754128},
 acmid = {1754128},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {course-of-action diagrams, pen-based input, sketch recognition}
}
2010 
PublicationImage
 Hammond, Tracy; Prasad, Manoj; Dixon, Daniel. Art 101: Learning to Draw through Sketch recognition. Proceedings of 10th International Symposium on Smart Graphics, Lecture Notes In Computer Science 6133. Volume 633, pp. 277-280. Banff, Canada. Springer Berlin Heidelberg, June 24-26, 2010. Link
Show Abstract:
iCanDraw is a drawing tool that can assist novice users to draw. The goal behind the system is to enable the users to perceive objects beyond what they know and improve their spatial cognitive skills. One of the early tasks in a beginner art class is to accurately reproduce an image, in an attempt to teach users to draw what they see, rather then what they know, improving spatial cognition skills. The iCanDraw system assists users to reproduce a human face, providing real-time drawing feedback enabled by face and sketch recognition technologies. We are presenting an art installation piece, where the conference participants using the iCanDraw ‘smart graphics’ system create the art in real- time at the conference.

Show BibTex
@inproceedings{Hammond:2010:ALD:1894345.1894383,
 author = {Hammond, Tracy and Prasad, Manoj and Dixon, Daniel},
 title = {Art 101: Learning to Draw Through Sketch Recognition},
 booktitle = {Proceedings of the 10th International Conference on Smart Graphics},
 series = {SG'10},
 year = {2010},
 isbn = {3-642-13543-9, 978-3-642-13543-9},
 location = {Banff, Canada},
 pages = {277--280},
 numpages = {4},
 url = {http://dl.acm.org/citation.cfm?id=1894345.1894383},
 acmid = {1894383},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg},
 keywords = {face recognition, sketch recognition, spatial cognition}
} 
2010 
PublicationImage
 Johnston, Joshua; Hammond, Tracy. Computing Confidence Values for Geometric Constraints for Use in Sketch Recognition. Proceedings of the Seventh Sketch-Based Interfaces and Modeling Symposium (SBIM). pp. 71-78. Annecy, France. Eurographics Association, June 7-10, 2010. Link
Show Abstract:
Geometric constraints are used by many sketch recognition systems to perform high-level assembly of components of a sketch into semantic structures. However, with a few notable exceptions, most of the current recognition systems do not have constraints that use real-valued notions of confidence. We discuss methods for assigning confidence values to different kinds of constraints. We show how these confidence values equate to user perception, how they can be used to balance speed and accuracy in recognition algorithms, and how they can be used to assign confidence values to the high-level shapes they are used to construct. We use these constraints to extend the LADDER shape definition language in a system that recognizes 5,900 hand-drawn examples of 485 different military course-of-action diagrams at an accuracy of 89.9%.

Show BibTex
@inproceedings{Johnston:2010:CCV:1923363.1923376,
 author = {Johnston, J. and Hammond, T.},
 title = {Computing Confidence Values for Geometric Constraints for Use in Sketch Recognition},
 booktitle = {Proceedings of the Seventh Sketch-Based Interfaces and Modeling Symposium},
 series = {SBIM '10},
 year = {2010},
 isbn = {978-3-905674-25-5},
 location = {Annecy, France},
 pages = {71--78},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=1923363.1923376},
 acmid = {1923376},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}
2010 
PublicationImage
 Miller, Jace; Hammond, Tracy. Wiiolin: A Virtual Instrument Using the Wii Remote. Proceedings of the 2010 Conference on New Interfaces for Musical Expression (NIME). pp. 497-500. Sydney, Australia. June 15-18, 2010. Link
Show Abstract:
The console gaming industry is experiencing a revolution in terms of user control, and a large part to Nintendo’s introduction of the Wii remote. The online open source development community has embraced the Wii remote, integrating the inexpensive technology into numerous applications. Some of the more interesting applications demonstrate how the remote hardware can be leveraged for nonstandard uses. In this paper we describe a new way of interacting with the Wii remote and sensor bar to produce music. The Wiiolin is a virtual instrument which can mimic a violin or cello. Sensor bar motion relative to the Wii remote and button presses are analyzed in real-time to generate notes. Our design is novel in that it involves the remote’s infrared camera and sensor bar as an integral part of music production, allowing users to change notes by simply altering the angle of their wrist, and henceforth, bow. The Wiiolin introduces a more realistic way of instrument interaction than other attempts that rely on button presses and accelerometer data alone.

Show BibTex
@INPROCEEDINGS{miller2010wiiolin
 author = {Miller, Jace and Hammond, Tracy},
 booktitle = {Proceedings of the 2010 Conference on New Interfaces for Musical Expression (NIME)},
 title = {Wiiolin: A Virtual Instrument Using the Wii Remote},
 year = {2010},
 pages = {497–500},
 month = {June 15-18},
 address = {Sydney, Australia},
 publisher = {}
}
2010 
PublicationImage
 Rajan, Pankaj; Taele, Paul; Hammond, Tracy. Evaluation of Paper-Pen based Sketching Interface. Proceedings of the 16th International Conference on Distributed Multimedia Systems (DMS). pp. 321-326. Oak Brook, IL. SRL, October 14-16, 2010. Link
Show Abstract:
The retrieval and browsing of diagrammatic information extracted from hand-drawn diagrams would open up a rich form of information interaction. However, such sketches currently require hand- annotations in order to be understood by the computer. While improvements in sketch recognition algorithms have enabled automatic recognition for T ablet PC- sketched diagrams, such progress has been constrained to online algorithms. As a result, offline algorithms that are relevant to diagrams sketched on paper remain dominantly domain-dependent, and are also restrictive in the number of diagrams that can be understood. In this paper, we discuss our research aims for providing users with information interaction that exploit the advantages of automatic correction capabilities found in online sketch recognition algorithms, with the low-cost advantages found in paper usage.

Show BibTex
@inproceedings{rajan2010evaluation,
 title = {Evaluation of Paper-Pen based Sketching Interface.},
 author = {Rajan, Pankaj and Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the 16th International Conference on Distributed Multimedia Systems (DMS)},
 pages = {321-326},
 year = {2010}
}
2010 
PublicationImage
 Taele, Paul; Dixon, Daniel; and Hammond, Tracy. Telling the User, “No”: Sketch Recognition for Improving Sketch Technique. SkCHI: Designing Sketch Recognition Interfaces, A CHI 2010 Workshop. pp. 4 pages. Atlanta, GA. April 10, 2010. Link
Show Abstract:
The role of sketch recognition since its inception has been to allow the computer to passively understand the drawn input that a user provides. Whether via gesture, shape, order, or context, the computer does its best to infer what is being drawn and then triggers the appropriate response, usually beautification. However, sketch recognition has matured enough to have the capability to inform the user that his/her drawn input could be better. For this position paper, we lobby for the use of sketch recognition to instruct students in their drawing ability, and then present an overview of research work incorporating sketch recognition interfaces that have advanced such grounds.

Show BibTex
@INPROCEEDINGS{taele2010No,
 author = {Taele, Paul and Dixon, Daniel and Hammond, Tracy},
 booktitle = {SkCHI: Designing Sketch Recognition Interfaces, A CHI 2010 Workshop},
 title = {Telling the User, “No”: Sketch Recognition for Improving Sketch Technique},
 year = {2010},
 month = {April 10},
 address = {Atlanta, GA},
 notes = {4 pages}
}
2010 
PublicationImage
 Taele, Paul; Hammond, Tracy. LAMPS: A Sketch Recognition-Based Teaching Tool for Mandarin Phonetic Symbols I. Journal of Visual Languages & Computing (JLVC). Volume 21, Number 2, pp. 109-120. Elsevier, April, 2010. Link
Show Abstract:
The non-Romanized Mandarin Phonetic Symbols I (MPS1) system is a highly advantageous phonetic system for native English users studying Chinese Mandarin to learn, yet its steep initial learning curve discourages language programs to instead adopt Romanized phonetic systems. Computer-assisted language instruction (CALI) can greatly reduce this learning curve, in order to enable students to sooner benefit from the long-term advantages presented in MPS1 usage during the course of Chinese Mandarin study. Unfortunately, the technologies surrounding existing online handwriting recognition algorithms and CALI applications are insufficient in providing a ‘‘dynamic’’ counterpart to traditional paper-based workbooks employed in the classroom setting. In this paper, we describe our sketch recognition-based LAMPS system for teaching MPS1 by emulating the naturalness and realism of paper-based workbooks, while extending their functionality with human instructor-level critique and assessment at an automated level.

Show BibTex
@article{Taele:2010:LSR:1752259.1752505,
 author = {Taele, Paul and Hammond, Tracy},
 title = {LAMPS: A Sketch Recognition-based Teaching Tool for Mandarin Phonetic Symbols I},
 journal = {J. Vis. Lang. Comput.},
 issue_date = {April, 2010},
 volume = {21},
 number = {2},
 month = apr,
 year = {2010},
 issn = {1045-926X},
 pages = {109--120},
 numpages = {12},
 url = {http://dx.doi.org/10.1016/j.jvlc.2009.12.004},
 doi = {10.1016/j.jvlc.2009.12.004},
 acmid = {1752505},
 publisher = {Academic Press, Inc.},
 address = {Orlando, FL, USA},
 keywords = {Bopomofo, Chinese, Sketch recognition}
} 
2009 
PublicationImage
 Bhat, Akshay; Hammond, Tracy. Using Entropy to Distinguish Shape Versus Text in Hand-Drawn Diagrams. Proceedings of the Twenty-First International Joint Conference on Artificial Intelligence (IJCAI). pp. 1395-1400. Pasadena, CA. AAAI, July 11-17, 2009. Link
Show Abstract:
Most sketch recognition systems are accurate in recognizing either text or shape (graphic) ink strokes, but not both. Distinguishing between shape and text strokes is, therefore, a critical task in recognizing hand-drawn digital ink diagrams that contain text labels and annotations. We have found the ‘entropy rate’ to be an accurate criterion of classification. We found that the entropy rate is significantly higher for text strokes compared to shape strokes and can serve as a distinguishing factor between the two. Using a single feature – zero-order entropy rate – our system produced a correct classification rate of 92.06% on test data belonging to diagrammatic domain for which the threshold was trained on. It also performed favorably on an unseen domain for which no training examples were supplied.

Show BibTex
@inproceedings{Bhat:2009:UED:1661445.1661669,
 author = {Bhat, Akshay and Hammond, Tracy},
 title = {Using Entropy to Distinguish Shape Versus Text in Hand-drawn Diagrams},
 booktitle = {Proceedings of the 21st International Jont Conference on Artifical Intelligence},
 series = {IJCAI'09},
 year = {2009},
 location = {Pasadena, California, USA},
 pages = {1395--1400},
 numpages = {6},
 url = {http://dl.acm.org/citation.cfm?id=1661445.1661669},
 acmid = {1661669},
 publisher = {Morgan Kaufmann Publishers Inc.},
 address = {San Francisco, CA, USA}
}
2009 
PublicationImage
 Corey, Paul; Eoff, Brian; Hammond, Tracy. Sketch Off: A Sketch Recognition Competition. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces (IUI). pp. 4 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
In this paper we describe the competition to be conducted at the Sketch Recognition Workshop of IUI 2009. The Sketch Recognition Competition promotes discussion, innovation, and competition within the sketch recognition community.

Show BibTex
@inproceedings{corey2009IUI,
 title = {Sketch Off: A Sketch Recognition Competition},
 author = {Corey, Paul and Eoff, Brian and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}
2009 
PublicationImage
 Dixon, Daniel; Prasad, Manoj; Hammond, Tracy. iCanDraw?: A Methodology for Using Assistive Sketch Recognition to Improve a Userʼs Drawing Ability. ACM Symposium on User Interface Software and Technology (UIST) Posters. pp. 2 pages. Vancouver, Canada. ACM, October 4-7, 2009. Link
Show Abstract:
When asked to draw, most people are hesitant because they believe themselves unable to draw well and are unsure of what adjustments are needed when drawing to make their sketch look right. This poster presents work on an application, iCanDraw?, that guides a user in drawing a human face through assistive sketch recognition. The major contributions are a methodology for an application to process and guide from a reference image as well as nine design principles for assistive sketch recognition.

Show BibTex
@article{dixon2009icandraw,
 title = {i{C}an{D}raw?: A Methodology for Using Assistive Sketch Recognition to Improve a User's Drawing Ability},
 author = {Dixon, Daniel and Prasad, Manoj and Hammond, Tracy},
 booktitle = {ACM Symposium on User Interface Software and Technology (UIST) Posters},
 year = {2009}, 
 address = {Vancouver, Canada},
 month = {10},
 organization = {ACM},
 notes = {2 pages}
}
2009 
PublicationImage
 Eoff, Brian David; Hammond, Tracy. Who Dotted That 'i'?: Context Free User Differentiation through Pressure and Tilt Pen Data. Proceedings of Graphics Interface (GI). Kelowna, Canada. Canadian Information Processing Society, May 25-27, 2009. pp. 149-156. Kelowna, Canada. Canadian Information Processing Society, May 25-27, 2009. Link
Show Abstract:
With the proliferation of tablet PCs and multi-touch computers, collaborative input on a single sketched surface is becoming more and more prevalent. The ability to identify which user draws a specific stroke on a shared surface is widely useful in a) security/forensics research, by effectively identifying a forgery, b) sketch recognition, by providing the ability to employ user- dependent recognition algorithms on a multi-user system, and c) multi-user collaborative systems, by effectively discriminating whose stroke is whose in a complicated diagram. To ensure an adaptive user interface, we cannot expect nor require that users will self-identify nor restrict themselves to a single pen. Instead, we prefer a system that can automatically determine a stroke’s owner, even when strokes by different users are drawn with the same pen, in close proximity, and near in timing. We present the results of an experiment that shows that the creator of an individual pen strokes can be determined with high accuracy, without supra-stroke context (such as timing, pen- ID, nor location), and based solely on the physical mechanics of how these strokes are drawn (specifically, pen tilt, pressure, and speed). Results from free-form drawing data, including text and doodles, but not signature data, show that our methods differentiate a single stroke (such as that of a dot of an ‘i’) between two users at an accuracy of 97.5% and between ten users at an accuracy of 83.5%.

Show BibTex
@inproceedings{Eoff:2009:DIC:1555880.1555916,
 author = {Eoff, Brian David and Hammond, Tracy},
 title = {Who Dotted That 'I'?: Context Free User Differentiation Through Pressure and Tilt Pen Data},
 booktitle = {Proceedings of Graphics Interface 2009},
 series = {GI '09},
 year = {2009},
 isbn = {978-1-56881-470-4},
 location = {Kelowna, British Columbia, Canada},
 pages = {149--156},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=1555880.1555916},
 acmid = {1555916},
 publisher = {Canadian Information Processing Society},
 address = {Toronto, Ont., Canada, Canada}
} 
2009 
PublicationImage
 Hammond, Tracy A; Davis, Randall. Recognizing Interspersed Sketches Quickly. Proceedings of Graphics Interface (GI). Kelowna, Canada. Canadian Information Processing Society, May 25-27, 2009. pp. 157-166. Kelowna, Canada. Canadian Information Processing Society, May 25-27, 2009. Link
Show Abstract:
Sketch recognition is the automated recognition of hand-drawn diagrams. When allowing users to sketch as they would naturally, users may draw shapes in an interspersed manner, starting a second shape before finishing the first. In order to provide freedom to draw interspersed shapes, an exponential combination of subshapes must be considered. Because of this, most sketch recognition systems either choose not to handle interspersing, or handle only a limited pre-defined amount of interspersing. Our goal is to eliminate such interspersing drawing constraints from the sketcher. This paper presents a high-level recognition algorithm that, while still exponential, allows for complete interspersing freedom, running in near real-time through early effective sub-tree pruning. At the core of the algorithm is an indexing technique that takes advantage of geometric sketch recognition techniques to index each shape for efficient access and fast pruning during recognition. We have stress- tested our algorithm to show that the system recognizes shapes in less than a second even with over a hundred candidate subshapes on screen.

Show BibTex
@inproceedings{Hammond:2009:RIS:1555880.1555917,
 author = {Hammond, Tracy A. and Davis, Randall},
 title = {Recognizing Interspersed Sketches Quickly},
 booktitle = {Proceedings of Graphics Interface 2009},
 series = {GI '09},
 year = {2009},
 isbn = {978-1-56881-470-4},
 location = {Kelowna, British Columbia, Canada},
 pages = {157--166},
 numpages = {10},
 url = {http://dl.acm.org/citation.cfm?id=1555880.1555917},
 acmid = {1555917},
 publisher = {Canadian Information Processing Society},
 address = {Toronto, Ont., Canada, Canada}
}
2009 
PublicationImage
 Hammond, Tracy Anne. IUI'09 Workshop Summary: Sketch Recognition. Proceedings of the 14th International Conference on Intelligent User Interfaces (IUI). pp. 501-502. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
This paper describes the IUI’09 workshop on Sketch Recognition.

Show BibTex
@inproceedings{Hammond:2009:IWS:1502650.1502736,
 author = {Hammond, Tracy Anne},
 title = {IUI'09 Workshop Summary: Sketch Recognition},
 booktitle = {Proceedings of the 14th International Conference on Intelligent User Interfaces},
 series = {IUI '09},
 year = {2009},
 isbn = {978-1-60558-168-2},
 location = {Sanibel Island, Florida, USA},
 pages = {501--502},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1502650.1502736},
 doi = {10.1145/1502650.1502736},
 acmid = {1502736},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {cad, document processing, pen input computing, sketch recognition, sketch understanding, sketching}
} 
2009 
PublicationImage
 Hammond, Tracy; Paulson, Brandon; Eoff, Brian. Eurographics Tutorial on Sketch Recognition. Eurographics 2009-Tutorials. pp. 4 pages. Munich, Germany. The Eurographics Association, March 30 -April 3, 2009. Link
Show Abstract:
Sketch recognition is the automated understanding of hand-drawn diagrams. Despite the prevalence of keyboards and mice, hand-drawings still pervade in education, design, and other diagrams. This full day tutorial explains why sketch recognition is important, the underlying algorithms, how sketch recognition can be used in traditional interfaces, and the field’s experiences with sketch recognition used in different domains.

Show BibTex
@inproceedings{hammond2009eurographics,
 title = {Eurographics Tutorial on Sketch Recognition},
 author = {Hammond, Tracy and Paulson, Brandon and Eoff, Brian},
 booktitle = {Eurographics 2009-Tutorials},
 year = {2009},
 address = {Munich, Germany},
 month = 3,
 organization = {The Eurographics Association},
 notes    = {4 pages}
}
2009 
PublicationImage
 Kaster, Brandon L; Jacobson, Emily R; Hammond, Tracy A. SSSOUSA: Automatically Generating Secure and Searchable Data Collection Studies. International Workshop on Visual Languages and Computing (VLC). pp. 6 pages. Redwood City, CA. DMS, September 10-12, 2009. Link
Show Abstract:
SOUSA is a Sketch-based Online User Study Application developed to aid in the creation of a universal, standardized set of sketch data. This paper describes a Secure and Searchable interface created for SOUSA (SSSOUSA) to make sketch data collection more efficient and practical for researchers and more accessible to a general audience. The expected contribution of our work will be an increase in participation of researchers and practitioners in the field of sketch recognition. We ultimately hope to develop a large, robust repository of sketch data. A motivating factor behind our work is to allow sketch recognition researchers to focus on higher-level tasks, rather than data collection. Features of our interface include a standardized collection mechanism and set of sketch data, which will allow new sketch recognition algorithms to be compared more easily with existing models. Our new interface will allow researchers to download and search their own, as well as other publically available, data gathered from collection and verification studies. This new interface will be hosted by the Sketch Recognition Laboratory at Texas A&M University, providing researchers a single, unified solution for sketch data collection and management.

Show BibTex
@inproceedings{kaster2009sssousa,
 title = {SSSOUSA: Automatically Generating Secure and Searchable Data Collection Studies},
 author = {Kaster, Brandon L and Jacobson, Emily R and Hammond, Tracy A},
 booktitle = {International workshop on visual languages and computing. Redwood City, CA, USA: VLC},
 year = {2009},
 address = {Redwood City, CA},
 month = 9,
 organization = {DMS},
 notes = {6 pages}
}
2009 
PublicationImage
 Paulson, Brandon; Hammond, Tracy. Towards a Framework for Truly Natural Low-level Sketch Recognition. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces (IUI). pp. 4 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
Although stroke-based systems may be considered the state- of-the-art in low-level sketch recognition, they still contain constraints and intricacies that may be invisible to most novice users. In this paper, we identify some common assumptions and problems of stroke-based systems and propose a plan for the development of a new low-level framework to deal with these issues. The broader impact of this framework will be the development of sketch recognition systems which place fewer (and hopefully no) drawing constraints on users and will allow for more natural sketching, starting at the lowest and most fundamental level.

Show BibTex
@inproceedings{paulson2009IUI,
 title = {Towards a Framework for Truly Natural Low-level Sketch Recognition},
 author = {Paulson, Brandon and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}
2009 
PublicationImage
 Peschel, Joshua M; Paulson, Brandon; Hammond, Tracy. A Surfaceless Pen-Based Interface. Proceedings of the Seventh ACM Conference on Creativity and Cognition. pp. 433-434. Berkeley, CA. ACM, October 27-30, 2009. Link
Show Abstract:
Freehand drawing on a computer screen allows users to provide input through a natural mode of human interaction. With this freedom of expression, however, there exists a paradoxical limitation: the user is bound through the existing interface to the fixed drawing surface. In this work, we overcome this limitation by presenting a surfaceless pen-based interface with an application in the field of sketch recognition. A pilot study was conducted to examine the usability of the surfaceless pen-based interface. Results indicated that learning to use the device is relatively straightforward, but that interaction difficulty increases in a directly proportionally manner with drawing complexity.

Show BibTex
@inproceedings{Peschel:2009:SPI:1640233.1640338,
 author = {Peschel, Joshua M. and Paulson, Brandon and Hammond, Tracy},
 title = {A Surfaceless Pen-based Interface},
 booktitle = {Proceedings of the Seventh ACM Conference on Creativity and Cognition},
 series = {C\&\#38;C '09},
 year = {2009},
 isbn = {978-1-60558-865-0},
 location = {Berkeley, California, USA},
 pages = {433--434},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1640233.1640338},
 doi = {10.1145/1640233.1640338},
 acmid = {1640338},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {pen-based technology, surfaceless user interface}
}
2009 
PublicationImage
 Rajan, Pankaj; Hammond, Tracy. Applying Online Sketch Recognition Algorithms to a Scanned-In Sketch. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces Posters (IUI). pp. 3 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
The goal of our research is to combine the power of stroke-based sketch recognition with the flexibility and ease of use of a piece of paper. In this paper we will present preliminary results of our algorithm integrated with an online sketch recognition system built with LADDER. We have also presented a comparison of our paper based interface with tablet based sketching interface.

Show BibTex
@inproceedings{rajan2009IUI,
 title = {Applying Online Sketch Recognition Algorithms to a Scanned-In Sketch},
 author = {Rajan, Pankaj and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {3 pages}
}
2009 
PublicationImage
 Shahzad, Nabeel; Paulson, Brandon; Hammond, Tracy. Urdu Qaeda: Recognition System for Isolated Urdu Characters. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces (IUI). pp. 4 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
This paper presents an online system for recognizing isolated, hand-sketched Urdu characters drawn on a Tablet PC. Attributes of Urdu characters are analyzed to define a set of features which are then trained and classified using a weighted, linear classifier. As a proof of concept, we have integrated our recognition algorithm into an application used to help people learn the Urdu language. Preliminary results obtained from our studies showed an accuracy of 92.8% for native Urdu writers.

Show BibTex
@inproceedings{Shahzad2009IUI,
 title = {Urdu Qaeda: Recognition System for Isolated Urdu Characters},
 author = {Shahzad, Nabeel and Paulson, Brandon and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes    = {4 pages}
}
2009 
PublicationImage
 Taele, Paul; Hammond, Tracy. Hashigo: A Next-Generation Sketch Interactive System for Japanese Kanji. Proceedings of the Twenty-First Innovative Applications of Artificial Intelligence Conference (IAAI). pp. 153-158. Pasadena, CA. AAAI, July 14-16, 2009. Link
Show Abstract:
Language students can increase their effectiveness in learning written Japanese by mastering the visual structure and written technique of Japanese kanji. Yet, existing kanji handwriting recognition systems do not assess the written technique sufficiently enough to discourage students from developing bad learning habits. In this paper, we describe our work on Hashigo, a kanji sketch interactive system which achieves human instructor-level critique and feedback on both the visual structure and written technique of students’ sketched kanji. This type of automated critique and feedback allows students to target and correct specific deficiencies in their sketches that, if left untreated, are detrimental to effective long-term kanji learning.

Show BibTex
@inproceedings{taele2009hashigo,
 title = {Hashigo: A Next-Generation Sketch Interactive System for Japanese Kanji},
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the Twenty-First Innovative Applications of Artificial Intelligence Conference (IAAI)},
 year = {2009},
 address = {Pasadena, CA},
 month = 7,
 organization = {AAAI},
 pages = {153–158}
}
2009 
PublicationImage
 Taele, Paul; Peschel, Joshua; Hammond, Tracy. A Sketch Interactive Approach to Computer-Assisted Biology Instruction. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces Posters (IUI). pp. 2 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
Existing computer-assisted instructional (CAI) techniques for introductory biology are presently restrictive in scope, due to their focus on utilizing drills that aim for rote memorization instead of providing interaction that aids in intuitive understanding. In this paper, we discuss a prototype system for assessing learner understanding of introductory cell biology concepts using sketch-based interaction and recognition techniques.

Show BibTex
@inproceedings{taele2009interactive,
 title = {A Sketch Interactive Approach to Computer-Assisted Biology Instruction},
 author = {Taele, Paul and Peschel, Joshua and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User
   Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {2 pages}
}
2009 
PublicationImage
 Wolin, Aaron; Eoff, Brian; Hammond, Tracy. Search Your Mobile Sketch: Improving the Ratio of Interaction to Information on Mobile Devices. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces (IUI). pp. 4 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
A mobile device’s small interaction space and undersized keyboard can sometimes make textual input difficult and impractical. Many mobile devices are predisposed for sketch- ing as they come with a stylus or touch-screen capabilities, and sketched icons are a natural way to label objects on such a device. In this paper we present a sketch recognition over- lay in Google Maps that allows users to search for location markers based on simple graphics and hand-drawn symbols.

Show BibTex
@inproceedings{wolin2009mobilesketch,
 title = {Search Your Mobile Sketch: Improving the Ratio of Interaction to Information on Mobile Devices},
 author = {Wolin, Aaron and Eoff, Brian and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}
2009 
PublicationImage
 Wolin, Aaron; Paulson, Brandon; Hammond, Tracy. Sort, Merge, Repeat: An Algorithm for Effectively Finding Corners in Hand-Sketched Strokes. Proceedings of the 6th Eurographics Symposium on Sketch-Based Interfaces and Modeling (SBIM). pp. 93-100. New Orleans, LA. ACM, August 1-2, 2009. Link
Show Abstract:
Free-sketch recognition systems attempt to recognize freely-drawn sketches without placing stylistic constraints on the users. Such systems often recognize shapes by using geometric primitives that describe the shape’s appearance rather than how it was drawn. A free-sketch recognition system necessarily allows users to draw several primitives using a single stroke. Corner finding, or vertex detection, is used to segment these strokes into their underlying primitives (lines and arcs), which in turn can be passed to the geometric recognizers. In this paper, we present a new multi-pass corner finding algorithm called MergeCF that is based on continually merging smaller stroke segments with similar, larger stroke segments in order to eliminate false positive corners. We compare MergeCF to two benchmark corner finders with substantial improvements in corner detection.

Show BibTex
@inproceedings{Wolin:2009:SMR:1572741.1572758,
 author = {Wolin, A. and Paulson, B. and Hammond, T.},
 title = {Sort, Merge, Repeat: An Algorithm for Effectively Finding Corners in Hand-sketched Strokes},
 booktitle = {Proceedings of the 6th Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '09},
 year = {2009},
 isbn = {978-1-60558-602-1},
 location = {New Orleans, Louisiana},
 pages = {93--99},
 numpages = {7},
 url = {http://doi.acm.org/10.1145/1572741.1572758},
 doi = {10.1145/1572741.1572758},
 acmid = {1572758},
 publisher = {ACM},
 address = {New York, NY, USA},
}
2009 
PublicationImage
 Zhu, Yuxiang; Johnston, Joshua; Hammond, Tracy. RingEdit: A Control Point Based Editing Approach in Sketch Recognition Systems. Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User Interfaces (IUI). pp. 6 pages. Sanibel, FL. ACM, February 8-11, 2009. Link
Show Abstract:
Editing a sketch should be one of the essential features provided by sketch recognition systems to allow people to modify what they have drawn, without having to delete and redraw shapes. This paper introduces a control point based editing approach we call RingEdit. RingEdit differs from other sketch editors in that the user actually draws their own control points on the sketch, rather than relying on control points generated by the recognition system. It provides modes that allow moving, rotating, scaling, and bending on both the shape level and stroke level. RingEdit shows great editing capabilities.

Show BibTex
@inproceedings{zhu2009IUI,
 title = {RingEdit: A Control Point Based Editing Approach in Sketch Recognition Systems},
 author = {Zhu, Yuxiang and Johnston, Joshua and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {6 pages}
}
2009 
PublicationImage
 Zomeren, Maarten van; Peschel, Joshua M; Mann, Timothy; Knezek, Gabe; Doebbler, James; Davis, Jeremy; Hammond, Tracy A; Oomes, Augustinus HJ; Murphy, Robin R. Human-Robot Interaction Observations from a Proto-Study Using SUAVs for Structural Inspection. Proceedings of the 4th ACM/IEEE international Conference on Human Robot Interaction (HRI). pp. 235-236. San Diego, CA. ACM, March 11-13, 2009. Link
Show Abstract:

Show BibTex
@inproceedings{Zomeren:2009:HIO:1514095.1514153,
 author = {Zomeren, Maarten van and Peschel, Joshua M. and Mann, Timothy and Knezek, Gabe and 
   Doebbler, James and Davis, Jeremy and Hammond, Tracy A. and Oomes, Augustinus H.J. and Murphy, Robin R.},
 title = {Human-robot Interaction Observations from a Proto-study Using SUAVs for Structural Inspection},
 booktitle = {Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction},
 series = {HRI '09},
 year = {2009},
 isbn = {978-1-60558-404-1},
 location = {La Jolla, California, USA},
 pages = {235--236},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1514095.1514153},
 doi = {10.1145/1514095.1514153},
 acmid = {1514153},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {(s)UAV, human robot interaction, interface, rescue robotics}
}
2008 
PublicationImage
 Berque, D.; Evans, E.; Hammond, T.; Mock, K.; Payton, M. and Sweeny, D. Tablet PCs in K-12 Education: No More Blank Slates. International Society for Technology in Education. ISTE, 2008. Link
Show Abstract:

Show BibTex
@book{hammond2008Tablet,
 author = {Berque, D. and Evans, E. and Hammond, T. and Mock, K. and Payton, M. and Sweeny, D}, 
 title = {Tablet PCs in K-12 Education: No More Blank Slates},
 publisher = {International Society for Technology in Education, ISTE},
 year = {2008},
 isbn = {1564842428}
}
2008 
PublicationImage
 Choi, Heeyoul; Hammond, Tracy. Sketch Recognition Based on Manifold Learning. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1786-1787. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
Current feature-based methods for sketch recognition systems rely on human-selected features. Certain machine learning techniques have been found to be good nonlinear features extractors. In this paper, we apply a manifold learning method, kernel Isomap, with a new algorithm for multi-stroke sketch recognition, which significantly outperforms the standard feature- based techniques.

Show BibTex
@inproceedings{Choi:2008:SRB:1620270.1620353,
 author = {Choi, Heeyoul and Hammond, Tracy},
 title = {Sketch Recognition Based on Manifold Learning},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1786--1787},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620353},
 acmid = {1620353},
 publisher = {AAAI Press}
} 
2008 
PublicationImage
 Choi, Heeyoul; Paulson, Brandon; Hammond, Tracy. Gesture Recognition Based on Manifold Learning. Structural, Syntactic, and Statistical Pattern Recognition (SSPR), Lecture Notes in Computer Science. Volume 5342, pp. 247-256. Springer Berlin Heidelberg, 2008. Link
Show Abstract:
Current feature-based gesture recognition systems use human-chosen features to perform recognition. Effective features for classification can also be automatically learned and chosen by the computer. In other recognition domains, such as face recognition, manifold learning methods have been found to be good nonlinear feature extractors. Few manifold learning algorithms, however, have been applied to gesture recognition. Current manifold learning techniques focus only on spatial information, making them undesirable for use in the domain of gesture recognition where stroke timing data can provide helpful insight into the recognition of hand- drawn symbols. In this paper, we develop a new algorithm for multi-stroke gesture recognition, which integrates timing data into a manifold learning algorithm based on a kernel Isomap. Experimental results show it to perform better than traditional human-chosen feature-based systems.

Show BibTex
@inproceedings{Choi:2008:GRB:1485797.1485832,
 author = {Choi, Heeyoul and Paulson, Brandon and Hammond, Tracy},
 title = {Gesture Recognition Based on Manifold Learning},
 booktitle = {Proceedings of the 2008 Joint IAPR International Workshop on Structural, Syntactic, and Statistical 
   Pattern Recognition},
 series = {SSPR \& SPR '08},
 year = {2008},
 isbn = {978-3-540-89688-3},
 location = {Orlando, Florida},
 pages = {247--256},
 numpages = {10},
 url = {http://dx.doi.org/10.1007/978-3-540-89689-0_29},
 doi = {10.1007/978-3-540-89689-0_29},
 acmid = {1485832},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg},
 keywords = {Kernel Isomap, Manifold Learning, Sketch Recognition}
} 
2008 
PublicationImage
 Corey, Paul; Hammond, Tracy. GLADDER: Combining Gesture and Geometric Sketch Recognition. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1788-1789. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
Sketch recognition systems usually recognize strokes either as stylistic gestures or geometric shapes. Both techniques have their advantages. This paper presents a method for integrating gesture-based and geometric recognition techniques, significantly outperforming either technique on its own.

Show BibTex
@inproceedings{Corey:2008:GCG:1620270.1620354,
 author = {Corey, Paul and Hammond, Tracy},
 title = {GLADDER: Combining Gesture and Geometric Sketch Recognition},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1788--1789},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620354},
 acmid = {1620354},
 publisher = {AAAI Press}
} 
2008 
PublicationImage
 Dahmen, Katie; Hammond, Tracy. Distinguishing between Sketched Scribble Look Alikes. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1790-1791. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
In hand-sketched drawings, nearly identical strokes may have different meanings to a user. For instance, a scribble could signify either that a shape should be filled in or that it should be deleted. This work describes a method for determining user intention in drawing scribbles in the context of a pen-based computer sketch. Our study shows that given two strokes, a circle and a scribble, two features (bounding ratio and density) can quickly and effectively determine a user’s intention.

Show BibTex
@inproceedings{Dahmen:2008:DSS:1620270.1620355,
 author = {Dahmen, Katie and Hammond, Tracy},
 title = {Distinguishing Between Sketched Scribble Look Alikes},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1790--1791},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620355},
 acmid = {1620355},
 publisher = {AAAI Press}
}
2008 
PublicationImage
 Eoff, Brian; Hammond, Tracy. User Identification by Means of Sketched Stroke Features. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1794-1795. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
We present preliminary results of using physical features of a user’s sketching style, such as pen tilt and pressure, to identify a user from their sketched strokes.

Show BibTex
@inproceedings{Eoff:2008:UIM:1620270.1620357,
 author = {Eoff, Brian David and Hammond, Tracy},
 title = {User Identification by Means of Sketched Stroke Features},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1794--1795},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620357},
 acmid = {1620357},
 publisher = {AAAI Press}
}
2008 
PublicationImage
 Hammond, Tracy. Workshop - Integrating Sketch Recognition Technologies Into Your Classroom. 38th Annual Frontiers in Education Conference. pp. W2C-1. Saratoga Springs, NY. IEEE, October 22-25, 2008. Link
Show Abstract:
Graphical diagrams are an important part of the educational process, as students draw diagrams in fields as various as business, math, computer science, engineering, music, and many others. Hand-sketched student diagrams aid in active learning and creative processes. However, correcting hand-sketch diagrams take a significant amount of teacher time, and are thus often left out of the testing process. Automatically correcting these diagrams can provide immediate student and instructor feedback while significantly reducing instructor time. This workshop will introduce the audience to sketch recognition tools that are available for use in their classroom for active learning, immediate feedback, and automated assessment.

Show BibTex
@INPROCEEDINGS{Hammond4720505,
 author = {T. Hammond},
 booktitle = {Frontiers in Education Conference, 2008. FIE 2008. 38th Annual},
 title = {Workshop - integrating sketch recognition technologies into your classroom},
 year = {2008},
 pages = {W2C-1—W2C-1},
 keywords = {CAD; computer aided instruction; diagrams; active learning; automated assessment; graphical diagrams; 
   hand-sketched student diagrams; immediate feedback; sketch recognition technologies; active learning; automated
    assessment; pen-input; sketch recognition; tablet computers},
 doi = {10.1109/FIE.2008.4720505},
 ISSN = {0190-5848},
 month = {Oct}
}
2008 
PublicationImage
 Hammond, Tracy; Eoff, Brian; Paulson, Brandon; Wolin, Aaron; Dahmen, Katie; Johnston, Joshua; Rajan, Pankaj. Free-Sketch Recognition: Putting the CHI in Sketching. CHI'08 Axtended Abstracts on Human Factors in Computing Systems (CHI). pp. 3027-3032. Florence, Italy. ACM, April 5-10, 2008. Link
Show Abstract:
Sketch recognition techniques have generally fallen into two camps. Gesture-based techniques, such as those used by the Palm Pilot’s Graffiti, can provide high- accuracy, but require the user to learn a particular drawing style in order for shapes to be recognized. Free-sketch recognition allows users to draw shapes as they would naturally, but most current techniques have low accuracies or require significant domain-level tweaking to make them usable. Our goal is to recognize free-hand sketches with high accuracy by developing generalized techniques that work for a variety of domains, including design and education. This is a work-in-progress, but we have made significant advancements toward our over-arching goal.

Show BibTex
@inproceedings{Hammond:2008:FRP:1358628.1358802,
 author = {Hammond, Tracy and Eoff, Brian and Paulson, Brandon and Wolin, Aaron and Dahmen, 
   Katie and Johnston, Joshua and Rajan, Pankaj},
 title = {Free-sketch Recognition: Putting the Chi in Sketching},
 booktitle = {CHI '08 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '08},
 year = {2008},
 isbn = {978-1-60558-012-8},
 location = {Florence, Italy},
 pages = {3027--3032},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/1358628.1358802},
 doi = {10.1145/1358628.1358802},
 acmid = {1358802},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {free-sketch, ladder, multimodal interaction, paleosketch, pen input, shortstraw, 
   sketch recognition, tablet pc}
} 
2008 
PublicationImage
 Paulson, Brandon; Eoff, Brian; Wolin, Aaron; Johnston, Joshua; Hammond, Tracy. Sketch-Based Educational Games: Drawing Kids Away from Traditional Interfaces. Proceedings of the 7th international conference on Interaction Design and Children (IDC). pp. 133-136. Chicago, IL. ACM, July 11-13, 2008. Link
Show Abstract:
Computer-based games and technologies can be significant aids for helping children learn. However, most computer- based games simply address the learning styles of visual and auditory learners. Sketch- based interfaces, however, can also address the needs of those children who learn better through tactile and kinesthetic approaches. Furthermore, sketch recognition can allow for automatic feedback to aid children without the explicit need for teacher to be present. In this paper, we present various sketch- based tools and games that promote tactile learning and entertainment for children.

Show BibTex
@inproceedings{Paulson:2008:SEG:1463689.1463739,
 author = {Paulson, Brandon and Eoff, Brian and Wolin, Aaron and Johnston, Joshua and Hammond, Tracy},
 title = {Sketch-based Educational Games: Drawing Kids Away from Traditional Interfaces},
 booktitle = {Proceedings of the 7th International Conference on Interaction Design and Children},
 series = {IDC '08},
 year = {2008},
 isbn = {978-1-59593-994-4},
 location = {Chicago, Illinois},
 pages = {133--136},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/1463689.1463739},
 doi = {10.1145/1463689.1463739},
 acmid = {1463739},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {LADDER, PaleoSketch, automated feedback, educational games, sketch recognition}
}
2008 
PublicationImage
 Paulson, Brandon; Hammond, Tracy. Office Activity Recognition Using Hand Posture Cues. Proceedings of the 22nd British HCI Group Annual Conference on People and Computers: Culture, Creativity, Interaction. Volume 2, pp. 75-78. Liverpool, England. British Computer Society, September 1-5, 2008. Link
Show Abstract:
Activity recognition plays a key role in providing information for context-aware applications. When attempting to model activities, some researchers have looked towards Activity Theory, which theorizes that activities have objectives and are accomplished through tools and objects. The goal of this paper is to determine if hand posture can be used as a cue to determine the types of interactions a user has with objects in a desk/office environment. Furthermore, we wish to determine if hand posture is user-independent across all users when interacting with the same objects in a natural manner. Our initial experiments indicate that a) hand posture can be used to determine object interaction, with accuracy rates above 94% for a user-dependent system, and b) hand posture is dependent upon the individual user when users are allowed to interact with objects as they would naturally.

Show BibTex
@inproceedings{Paulson:2008:OAR:1531826.1531845,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {Office Activity Recognition Using Hand Posture Cues},
 booktitle = {Proceedings of the 22Nd British HCI Group Annual Conference on People and Computers: Culture, 
   Creativity, Interaction - Volume 2},
 series = {BCS-HCI '08},
 year = {2008},
 isbn = {978-1-906124-06-9},
 location = {Liverpool, United Kingdom},
 pages = {75--78},
 numpages = {4},
 url = {http://dl.acm.org/citation.cfm?id=1531826.1531845},
 acmid = {1531845},
 publisher = {British Computer Society},
 address = {Swinton, UK, UK},
 keywords = {CyberGlove, activity recognition, context-aware, hand posture, office environment, wearable computing}
} 
2008 
PublicationImage
 Paulson, Brandon; Hammond, Tracy. PaleoSketch: Accurate Primitive Sketch Recognition and Beautification. Proceedings of the 13th International Conference on Intelligent User Interfaces (IUI). pp. 10 pages. Canary Islands, Spain. ACM, January 13-16, 2008. Link
Show Abstract:
Sketching is a natural form of human communication and has become an increasingly popular tool for interacting with user interfaces. In order to facilitate the integration of sketching into traditional user interfaces, we must first develop accurate ways of recognizing users’ intentions while providing feedback to catch recognition problems early in the sketching process. One approach to sketch recognition has been to recognize low-level primitives and then hierarchically construct higher-level shapes based on geometric constraints defined by the user; however, current low-level recognizers only handle a few number of primitive shapes. We propose a new low-level recognition and beautification system that can recognize eight primitive shapes, as well as combinations of these primitives, with recognition rates at 98.56%. Our system also automatically generates beautified versions of these shapes to provide feedback early in the sketching process. In addition to looking at geometric perception, much of our recognition success can be attributed to two new features, along with a new ranking algorithm, which have proven to be significant in distinguishing polylines from curved segments.

Show BibTex
@inproceedings{Paulson:2008:PAP:1378773.1378775,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {PaleoSketch: Accurate Primitive Sketch Recognition and Beautification},
 booktitle = {Proceedings of the 13th International Conference on Intelligent User Interfaces},
 series = {IUI '08},
 year = {2008},
 isbn = {978-1-59593-987-6},
 location = {Gran Canaria, Spain},
 pages = {1--10},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1378773.1378775},
 doi = {10.1145/1378773.1378775},
 acmid = {1378775},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {low-level processing, pen-based interfaces, shape beautification, sketch recognition}
} 
2008 
PublicationImage
 Paulson, Brandon; Hammond, Tracy. Marqs: Retrieving Sketches Learned from a Single Example Using a Dual-Classifier. Journal on Multimodal User Interfaces. Volume 2, Number 1, pp. 3-11. Springer-Verlag, July, 2008. Link
Show Abstract:
Mouse and keyboard interfaces handle traditional text-based queries, and standard search engines provide for effective text-based search. However, everyday documents are filled with not only text, but photos, cartoons, diagrams, and sketches. These images can often be easier to recall than the surrounding text. In an effort to make human computer interaction handle more forms of human-human inter- action, sketching has recently become an important means of interacting with computer systems. We propose extend- ing the traditional monomodal model of text-based search to include the capabilities of sketch-based search. Our goal is to create a sketch-based search that can find documents from a single query sketch. We imagine an important use for this technology would be to allow users to search a computerized laboratory notebook for a previously drawn sketch. Because such as sketch will have initially been drawn only a single time, it is important that the search-by-sketch system (1) recognize a wide range of shapes that are not necessarily geometric nor drawn in the same way each time, (2) recognize a query example from only one initial training example, and (3) learn from successful queries to improve accuracy over time. We present here such an algorithm. To test the algorithm, we implemented a proof-of-concept-system: MARQS, a system that uses sketches to query existing media albums. Preliminary results show that the system yielded an average search rank of 1.51, indicating that the correct sketch is presented as either the top or second search result on average.

Show BibTex
@article{Paulson2008,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {{MARQS}: retrieving sketches learned from a single example using a dual-classifier},
 journal = {Journal on Multimodal User Interfaces},
 year = {2008},
 volume = {2},
 number = {1},
 pages = {3–11},
 issn = {1783-8738},
 doi = {10.1007/s12193-008-0006-0},
 url = {http://dx.doi.org/10.1007/s12193-008-0006-0}
}
2008 
PublicationImage
 Paulson, Brandon; Rajan, Pankaj; Davalos, Pedro; Gutierrez-Osuna, Ricardo; Hammond, Tracy. What!?! No Rubine Features?: Using Geometric-Based Features to Produce Normalized Confidence Values for Sketch Recognition. HCC Workshop: Sketch Tools for Diagramming (VL/HCC). pp. 57-63. Herrsching am Ammersee, Germany. September 15-19, 2008. Link
Show Abstract:
As pen-based interfaces become more popular in to- day’s applications, the need for algorithms to accurately recognize hand-drawn sketches and shapes has increased. In many cases, complex shapes can be constructed hierarchically as a combination of smaller primitive shapes meeting certain geometric constraints. However, in order to construct higher level shapes, it is imperative to accurately recognize the lower-level primitives. Two approaches have become widespread in the sketch recognition field for recognizing lower-level primitives: gesture-based recognition and geometric-based recognition. Our goal is to use a hybrid approach that combines features from both traditional gesture- based recognition systems and geometric-based recognition systems. In this paper, we show that we can produce a system with high recognition rates while providing the added benefit of being able to produce normalized confidence values for alternative interpretations; something most geometric-based recognizers lack. More significantly, results from feature subset selection indicate that geometric features aid the recognition process more than gesture-based features when given naturally sketched data.

Show BibTex
@inproceedings{paulson2008HCC,
 title = {What!?! No Rubine Features?: Using Geometric-Based Features to Produce Normalized Confidence Values 
   for Sketch Recognition},
 author = {Paulson, Brandon and Rajan, Pankaj and Davalos, Pedro and Gutierrez-Osuna, Ricardo and Hammond, Tracy},
 booktitle = {HCC Workshop: Sketch Tools for Diagramming (VL/HCC)},
 year = {2008},
 address = {Herrsching am Ammersee, Germany},
 month = 9,
 organization = {VL/HCC},
 pages = {57—63}
}
2008 
PublicationImage
 Paulson, Brandon; Wolin, Aaron; Johnston, Joshua; Hammond, Tracy. SOUSA: Sketch-based Online User Study Applet. Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling (SBIM). pp. 8 pages. Annecy, France. Eurographics Association, June 11-13, 2008. Link
Show Abstract:
Although existing domain-specific datasets are readily available, most sketch recognition researchers are forced to collect new data for their particular domain. Creating tools to collect and label sketched data can take time, and, if every researcher creates their own toolset, much time is wasted that could be better suited toward advanced research. Additionally, it is often the case that other researchers have performed collection studies and collected the same types of sketch data, resulting in large duplications of effort. We propose, and have built, a general-purpose sketch collection and verification tool that allows researchers to design custom user studies through an online applet residing on our group’s web page. By hosting such a tool through our site, we hope to provide researchers with a quick and easy way of collecting data. Additionally, our tool serves to create a universal repository of sketch data that can be made readily available to other sketch recognition researchers.

Show BibTex
@inproceedings{Paulson:2008:SSO:2386301.2386316,
 author = {Paulson, B. and Wolin, A. and Johnston, J. and Hammond, T.},
 title = {{SOUSA}: Sketch-based Online User Study Applet},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {81--88},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/081-088},
 doi = {10.2312/SBM/SBM08/081-088},
 acmid = {2386316},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}
2008 
PublicationImage
 Peschel, Joshua M; Hammond, Tracy Anne. STRAT: A Sketched-Truss Recognition and Analysis Tool. 2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International Conference on distributed Multimedia Systems (DMS). pp. 282-287. Boston, MA. Knowledge Systems Instistute, September 4-6, 2008. Link
Show Abstract:
The statically-determinate, pin-connected truss is a basic structural element used by engineers to create larger and more complex systems. Truss analysis and design are topics that virtually all students who study engineering mechanics are required to master, many of whom may experience difficulty with initial understanding. The mathematics used to analyze truss systems typically requires lengthy hand calculations or the assistance of proprietary computer-aided design (CAD) programs. To expedite work in this domain, we propose: STRAT (Sketched-Truss Recognition and Analysis Tool), a freehand sketch recognition system for solving truss problems. The STRAT system allows users to rapidly determine all of the unknown forces in a truss, using only a hand-drawn sketch of the truss itself. The focus of this article covers the design methodology and implementation of the STRAT system. Results from a preliminary user study are also presented.

Show BibTex
@inproceedings{peschel2008STRAT,
 title = {STRAT: A Sketched-Truss Recognition and Analysis Tool},
 author = {Peschel, Joshua M and Hammond, Tracy Anne},
 booktitle = {2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International 
   Conference on distributed Multimedia Systems (DMS)},
 year = {2008},
 address = {Boston, MA},
 month = 9,
 organization = {Knowledge Systems Instistute},
 pages = {282–287}
}
2008 
PublicationImage
 Plimmer, Beryl; Hammond, Tracy. Getting Started with Sketch Tools. Fifth International Conference on the Theory and Application of Diagrams (Diagrams 2008), Lecture Notes in Artificial Intelligence. pp. 9-12. Herrsching am Ammersee, Germany. Springer Berlin Heidelberg, September 19-21, 2008. Link
Show Abstract:

Show BibTex
@inproceedings{Plimmer:2008:GSS:1432522.1432529,
 author = {Plimmer, Beryl and Hammond, Tracy},
 title = {Getting Started with Sketch Tools},
 booktitle = {Proceedings of the 5th International Conference on Diagrammatic Representation and Inference},
 series = {Diagrams '08},
 year = {2008},
 isbn = {978-3-540-87729-5},
 location = {Herrsching, Germany},
 pages = {9--12},
 numpages = {4},
 url = {http://dx.doi.org/10.1007/978-3-540-87730-1_5},
 doi = {10.1007/978-3-540-87730-1_5},
 acmid = {1432529},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg}
} 
2008 
PublicationImage
 Plimmer, Beryl; Hammond, Tracy. Workshop on Sketch Tools for Diagramming. Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC). pp. 4. Herrsching am Ammersee, Germany. SRL, September 15-19, 2008. Link
Show Abstract:

Show BibTex
@inproceedings{Plimmer:2008:WST:1549823.1550035,
 author = {Plimmer, Beryl and Hammond, Tracy},
 title = {Workshop on Sketch Tools for Diagramming},
 booktitle = {Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing},
 series = {VLHCC '08},
 year = {2008},
 isbn = {978-1-4244-2528-0},
 pages = {4--},
 url = {http://dx.doi.org/10.1109/VLHCC.2008.4639047},
 doi = {10.1109/VLHCC.2008.4639047},
 acmid = {1550035},
 publisher = {IEEE Computer Society},
 address = {Washington, DC, USA}
} 
2008 
PublicationImage
 Rajan, Pankaj; Hammond, Tracy. From Paper to Machine: Extracting Strokes from Images for Use in Sketch Recognition. Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling (SBIM). pp. 8 pages. Annecy, France. Eurographics Association, June 11-13, 2008. Link
Show Abstract:
Sketching is a way of conveying ideas to people of diverse backgrounds and culture without any linguistic medium. With the advent of inexpensive tablet PCs, online sketches have become more common, allowing for stroke-based sketch recognition techniques, more powerful editing techniques, and automatic simulation of recognized diagrams. Online sketches provide significantly more information than paper sketches, but they still do not provide the flexibility, naturalness, and simplicity of a simple piece of paper. Recognition methods exist for paper sketches, but they tend to be domain specific and don’t benefit from the advances of stroke-based sketch recognition. Our goal is to combine the power of stroke-based sketch recognition with the flexibility and ease of use of a piece of paper. In this paper we will present a stroke-tracing algorithm that can be used to extract stroke data from the pixilated image of the sketch drawn on paper. The presented method both handles overlapping strokes and also attempts to capture sequencing information, which is helpful in many sketch recognition techniques. We present preliminary results of our algorithm on several paper-drawn hand-sketched scanned-in pixilated images.

Show BibTex
@inproceedings{Rajan:2008:PME:2386301.2386309,
 author = {Rajan, Pankaj and Hammond, T.},
 title = {From Paper to Machine: Extracting Strokes from Images for Use in Sketch Recognition},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {41--48},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/041-048},
 doi = {10.2312/SBM/SBM08/041-048},
 acmid = {2386309},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
} 
2008 
PublicationImage
 Taele, Paul; Hammond, Tracy. Using a Geometric-Based Sketch Recognition Approach to Sketch Chinese Radicals. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1832-1833. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
Unlike English, where unfamiliar words can be queried for its meaning by typing out its letters, the analogous operation in Chinese is far from trivial due to the nature of its written language. One approach for querying Chinese characters involve referencing their dictionary component called radicals. This is advantageous since users would not need to know their pronunciation nor their stroke-order, a requirement in other querying approaches. Currently though, sketching a character’s radical for querying is an unsupported capability in existing systems. Using the geometric-based LADDER sketching language combined with the Sezgin low- level recognizer, we were able to construct an application which can first recognize handwritten sketches of Chinese radical, and then output candidate Chinese characters which contain that radical. Thus, we were able to demonstrate that a geometric-based sketch recognition approach can be used to easily build applications for recognizing symbols related to Chinese characters while having reasonable recognition rates. Unlike current image-based recognition systems, our system also maintains stroke order information of characters. Since stroke order is important in written Chinese, our system can be easily expanded for use in Chinese language education by providing visual feedback to students on correct stroke order.

Show BibTex
@inproceedings{Taele:2008:UGS:1620270.1620376,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Using a Geometric-based Sketch Recognition Approach to Sketch Chinese Radicals},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1832--1833},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620376},
 acmid = {1620376},
 publisher = {AAAI Press}
} 
2008 
PublicationImage
 Taele, Paul; Hammond, Tracy. Chinese Characters as Sketch Diagrams Using a Geometric-Based Approach. Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC) Workshop on Sketch Tools for Diagramming. pp. 74-82. Herrsching am Ammersee, Germany. SRL, September 15-19, 2008. Link
Show Abstract:
Knowledge of over a thousand Chinese characters is necessary to effectively communicate in written Chinese and Japanese, so writing patterns such as stroke order and direction are heavily emphasized to students for efficient memorization. Pedagogical methods for Chinese characters can greatly benefit from sketch diagramming tools, since they can automate the task of critiquing students' writing technique. Falling cost and greater advances made in pen-based computing device even allow language programs to afford deploying these systems for augmenting their existing curriculum. While current vision-based techniques for sketching Chinese characters could be adopted for their high visual recognition rates, they do not directly support technique recognition and are unable to provide feedback for critiquing technique. A geometric-based approach can accomplish this task, though visual recognition rates have largely been untested. For our paper, we analyze the feasibility of a geometric- approach in visual recognition, as well as discuss its feasibility for use in a learning tool for teaching Chinese characters.

Show BibTex
@inproceedings{taele2008HCC,
 title = {Chinese Characters as Sketch Diagrams Using a Geometric-Based Approach},
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing 
   (VL/HCC) Workshop on Sketch Tools for Diagramming},
 year = {2008},
 address = {Herrsching am Ammersee, Germany},
 month = 9,
 organization = {VL/HCC},
 pages = {74–82}
}
2008 
PublicationImage
 Taele, Paul; Hammond, Tracy Anne. A Geometric-Based Sketch Recognition Approach for Handwritten Mandarin Phonetic Symbols I. 2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International Conference on distributed Multimedia Systems (DMS). pp. 6 pages. Boston, MA. Knowledge Systems Instistute, September 4-6, 2008. Link
Show Abstract:
Inputting written Chinese, unlike written English, is a non-trivial operation using a standard keyboard. To accommodate this operation, numerous existing phonetic systems using the Roman alphabet were adopted as a means of input while still making use of a Western keyboard. With the growing prevalence of computing devices capable of pen-based input, naturally sketching written Chinese using a phonetic system becomes possible, and is also generally faster and simpler than sketching entire Chinese characters. One method for sketching Chinese characters for computing devices capable of pen-based input involves using an existing non-alphabetic phonetic system called the Mandarin Phonetic Symbols I (MPS1). The benefits of inputting Chinese characters by its corresponding MPS1 symbols – unlike letters from its alphabetic-based counterpart – is that it retains the phonemic components of the corresponding Chinese characters. The work in the paper describes our geometric-based MPS1 recognition system, a system designed particularly for novice users of MPS1 symbols that gives reasonable vision-based recognition rates and provides useful feedback for symbols drawn with incorrect sketching technique such as stroke order.

Show BibTex
@inproceedings{taele2008geometric,
 title = {A Geometric-Based Sketch Recognition Approach for Handwritten Mandarin Phonetic Symbols {I}},
 author = {Taele, Paul and Hammond, Tracy Anne},
 booktitle = {2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International 
   Conference on distributed Multimedia Systems (DMS)},
 year = {2008},
 address = {Boston, MA},
 month = 9,
 organization = {Knowledge Systems Instistute},
 note = {6 pages}
}
2008 
PublicationImage
 Wolin, Aaron; Eoff, Brian; Hammond, Tracy. ShortStraw: A Simple and Effective Corner Finder for Polylines. Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling (SBIM). pp. 33-40. Annecy, France. Eurographics Association, June 11-13, 2008. Link
Show Abstract:

Show BibTex
@inproceedings{Wolin:2008:SSE:2386301.2386308,
 author = {Wolin, A. and Eoff, B. and Hammond, T.},
 title = {ShortStraw: A Simple and Effective Corner Finder for Polylines},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {33--40},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/033-040},
 doi = {10.2312/SBM/SBM08/033-040},
 acmid = {2386308},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
} 
2008 
PublicationImage
 Wolin, Aaron; Paulson, Brandon; Hammond, Tracy. Eliminating False Positives during Corner Finding by Merging Similar Segments. Proceedings of the Twenty-Third AAAI Conference on Artificial Intelligence (AAAI) Student Abstracts. pp. 1836-1837. Chicago, IL. AAAI, July 13-17, 2008. Link
Show Abstract:
We present a new corner finding algorithm based on merging like stroke segmentations together in order to eliminate false positive corners. We compare our system to two benchmark corner finders with substantial improvements in both polyline and complex fits.

Show BibTex
@inproceedings{Wolin:2008:EFP:1620270.1620378,
 author = {Wolin, Aaron and Paulson, Brandon and Hammond, Tracy},
 title = {Eliminating False Positives During Corner Finding by Merging Similar Segments},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1836--1837},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620378},
 acmid = {1620378},
 publisher = {AAAI Press}
}
2007 
PublicationImage
 Hammond, Tracy. Sketch Recognition at Texas A&M University. Brown Workshop on Pen-Centric Computing. pp. 6 pages. Providence, RI. Brown University, March 25, 2007. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2007Brown,
 title = {Sketch Recognition at Texas A&M University},
 author = {Hammond, Tracy},
 booktitle = {Brown Workshop on Pen-Centric Computing},
 year = 2007,
 address = {Providence, RI},
 month = 3,
 organization = {Eurographics},
 note = {6 pages}
}
2007 
PublicationImage
 Hammond, Tracy. Enabling Instructors to Develop Sketch Recognition Applications for the Classroom. 37th Annual Frontiers In Education Conference (FIE). pp. S3J11-S3J16. Milwaukee, WI. IEEE, October 10-13, 2007. Link
Show Abstract:
Instructors and students sketch graphical diagrams in a variety of classes from pre-K through higher education. Hand sketching the diagrams can engage students’ creative processes as they watch the diagrams being created in real-time. Animations can help a students’ functional understanding However, hand- sketched diagrams currently remain static and uninterpreted, and animations currently have to be canned pre-made diagrams. Sketch recognition systems recognize hand drawn diagrams, but they take a lot of time and effort to build and require expertise in sketch recognition programming. To simplify the creation of sketch recognition system, we have built LADDER, a language to describe how shapes in a domain are drawn, displayed, and edited for use in sketch recognition, and GUILD, a system to automatically generate user interfaces from LADDER descriptions. The goal of this work is to facilitate the development of sketch recognition systems to allow non-experts in sketch recognition systems, such as teachers develop sketch systems for their classroom. The research is continuously being improved, but thus far, over twenty people have built sketch recognition systems using these technologies.

Show BibTex
@INPROCEEDINGS{hammond2007FIE,
 author = {T. Hammond},
 booktitle = {Frontiers In Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without
   Passports, 2007. FIE '07. 37th Annual},
 title = {Enabling instructors to develop sketch recognition applications for the classroom},
 year = {2007},
 pages = {S3J-11-S3J-16},
 keywords = {computer aided instruction; computer animation; image recognition; user interfaces; GUILD; LADDER; 
   graphical diagrams; hand sketching; sketch recognition; user interfaces; Animation; Application software; Automata; 
   Computer science; Computer science education; Design automation; Educational institutions; Feedback; Programming 
   profession; Watches; active learning; pen-input; sketch recognition},
 doi = {10.1109/FIE.2007.4417930},
 ISSN = {0190-5848},
 month = {Oct}
}
2007 
PublicationImage
 Hammond, Tracy. Simplifying Sketch Recognition UI Development. Grace Hopper Celebration of Women in Computing. pp. 5 pages. Orlando, FL. Grace Hopper, October 17-20, 2007. Link
Show Abstract:
Sketch recognition systems are time- consuming to build and require signal-processing expertise if they are to handle the intricacies of each domain. Our goal is to enable user interface designers, who may not have expertise in sketch recognition, to be able to build sketch systems. We have built GUILD to automatically generate sketch recognition UIs from computer-generated or hand- typed LADDER descriptions.

Show BibTex
@inproceedings{hammond2007GHC,
 title = {Simplifying Sketch Recognition {UI} Development},
 author = {Hammond, Tracy},
 booktitle = {Grace Hopper Celebration of Women in Computing},
 year = 2007,
 address = {Orlando, FL},
 month = 10,
 organization = {GHC},
 note = {5 pages}
}
2007 
PublicationImage
 Hammond, Tracy; O’Sullivan, Barry. Recognizing Free-Form Hand-Sketched Constraint Network Diagrams by Combining Geometry and Context. Proceedings of the Eurographics Ireland. pp. 67-74. Dublin, Ireland. Eurographics, December 17, 2007. Link
Show Abstract:
Constraint satisfaction problems (CSPs) are ubiquitous in many real-world contexts. However, modeling a problem as a CSP can be very challenging, usually requiring considerable expertise. In many application domains there can often be a domain-specific way of drawing a graphical representation of a problem. Our objective is to develop sketch recognition technology that can recognize hand-drawn representations of problems, and automatically generate constraint satisfaction models of them. This paper describes a sketch recognition system that recognizes and solves a simplified set of hand-drawn constraint problems. Shapes are recognized using a combination of geometric and contextual rules, allowing shapes to be drawn freely, without requiring a specific drawing style.

Show BibTex
@inproceedings{hammond2007EG,
 title = {Recognizing Free-Form Hand-Sketched Constraint Network Diagrams by Combining Geometry and Context},
 author = {Hammond, Tracy and O'Sullivan, Barry},
 booktitle = {Proceedings of the Eurographics Ireland},
 year = 2007,
 pages = {67—74},
 address = {Dublin, Ireland},
 month = 12,
 organization = {Eurographics}
}
2007 
PublicationImage
 Paulson, Brandon; Hammond, Tracy. A System for Recognizing and Beautifying Low-Level Sketch Shapes Using NDDE and DCR. ACM Symposium on User Interface Software and Technology (UIST). pp. 2 pages. Newport Rhode Island. ACM, October 7-10, 2007. Link
Show Abstract:
Sketching has been identified as a natural means for human interaction and thus has become commonly incorporated into various user interfaces. Current low-level sketch recognizers have produced good accuracy but recognize only a small set of basic shapes. We propose a low-level sketch recognition and beautification system that uses a hierarchical approach that is capable of recognizing eight primitive shapes, along with complex fits, with preliminary recognition rates around 98.8%. These accuracy rates are comparable to current state- of-the-art recognition systems which recognize a lesser number of primitives. Furthermore, we introduce two new metrics, normalized distance between direction extremes (NDDE) and direction change ratio (DCR), which help aid in distinguishing between polylines and other low-level primitives.

Show BibTex
@inproceedings{paulson2007uist,
 title = {A System for Recognizing and Beautifying Low-Level Sketch Shapes Using NDDE and DCR},
 author = {Paulson, Brandon and Hammond, Tracy},
 booktitle = {ACM Symposium on User Interface Software and Technology (UIST)},
 year = 2007,
 address = {Newport Rhode Island},
 month = 10,
 organization = {ACM},
 note = {2 pages}
}
2006 
PublicationImage
 Hammond, Tracy; Davis, Randall. Interactive Learning of Structural Shape Descriptions from Automatically Generated Near-Miss Examples. Proceedings of the 11th International Conference on Intelligent User Interfaces. pp. 210-217. Sydney, Australia. ACM, January 29 - February 1, 2006. Link
Show Abstract:
Sketch interfaces provide more natural interaction than the traditional mouse and palette tool, but can be time consuming to build if they have to be built anew for each new domain. A shape description language, such as the LADDER language we created, can significantly reduce the time necessary to create a sketch interface by enabling automatic generation of the interface from a domain description. However, structural shape descriptions, whether written by users or created automatically by the computer, are frequently over- or under- constrained. We present a technique to debug over- and under-constrained shapes using a novel form of active learning that generates its own suspected near-miss examples. Using this technique we implemented a graphical debugging tool for use by sketch interface developers.

Show BibTex
@inproceedings{Hammond:2006:ILS:1111449.1111495,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Interactive Learning of Structural Shape Descriptions from Automatically Generated Near-miss Examples},
 booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
 series = {IUI '06},
 year = {2006},
 isbn = {1-59593-287-9},
 location = {Sydney, Australia},
 pages = {210--217},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/1111449.1111495},
 doi = {10.1145/1111449.1111495},
 acmid = {1111495},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {active learning, ladder, near-miss, shape description, sketch recognition, structural description, user interfaces}
} 
2005 
PublicationImage
 Hammond, Tracy; Davis, Randall. LADDER, A Sketching Language for User Interface Developers. Computers & Graphics. Volume 29, Number 4, pp. 518-532. Elsevier, 2005. Link
Show Abstract:
Sketch recognition systems are currently being developed for many domains, but can be time consuming to build if they are to handle the intricacies of each domain. In order to aid sketch-based user interface developers, we have developed tools to simplify the development of a new sketch recognition interface. We created LADDER, a language to describe how sketched diagrams in a domain are drawn, displayed, and edited. We then automatically transform LADDER structural descriptions into domain specific shape recognizers, editing recognizers, and shape exhibitors for use in conjunction with a domain independent sketch recognition system, creating a sketch recognition system for that domain. We have tested our framework by writing several domain descriptions and automatically generating a domain specific sketch recognition system from each description.

Show BibTex
@article{hammond2005ladder,
 title = {LADDER, a sketching language for user interface developers},
 author = {Hammond, Tracy and Davis, Randall},
 journal = {Computers \& Graphics},
 volume = {29},
 number = {4},
 pages = {518--532},
 year = {2005},
 publisher = {Elsevier}
}
2004 
PublicationImage
 Hammond, Tracy. Automatically Generating Sketch Interfaces from Shape Descriptions. Proceedings of the 4th Annual MIT Student Oxygen Workshop. pp. 4 pages. Gloucester, Massachusetts. MIT, July, 2004. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2004automatically,
 title = {Automatically generating sketch interfaces from shape descriptions},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the 4th Annual MIT Student Oxygen Workshop},
 year = {2004},
 publisher = {MIT},
 note = {4 pages}
}
2004 
PublicationImage
 Hammond, Tracy; Davis, Randall. Automatically Transforming Symbolic Shape Descriptions for Use in Sketch Recognition. Proceedings of the Nineteenth National Conference on Artificial Intelligence (AAAI). pp. 450-456. San Jose, CA. AAAI, July 25-29, 2004. Link
Show Abstract:
Sketch recognition systems are currently being developed for many domains, but can be time consuming to build if they are to handle the intricacies of each do- main. This paper presents the first translator that takes symbolic shape descriptions (written in the LADDER sketch language) and automatically transforms them into shape recognizers, editing recognizers, and shape exhibitors for use in conjunction with a domain independent sketch recognition system. This transformation allows us to build a single domain independent recognition system that can be customized for multiple do- mains. We have tested our framework by writing several domain descriptions and automatically created a domain specific sketch recognition system for each domain.

Show BibTex
@inproceedings{Hammond:2004:ATS:1597148.1597222,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Automatically Transforming Symbolic Shape Descriptions for Use in Sketch Recognition},
 booktitle = {Proceedings of the 19th National Conference on Artifical Intelligence},
 series = {AAAI'04},
 year = {2004},
 isbn = {0-262-51183-5},
 location = {San Jose, California},
 pages = {450--456},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=1597148.1597222},
 acmid = {1597222},
 publisher = {AAAI Press}
} 
2004 
PublicationImage
 Hammond, Tracy; Davis, Randall. Shady: A Shape Description Debugger for Use in Sketch Recognition. AAAI Fall Symposium on Making Pen-Based Interaction Intelligent and Natural (AAAI). pp. 7 pages. Arlington, VA. AAAI, October 22-24, 2004. Link
Show Abstract:
Sketch recognition systems are currently being developed for many domains, but can be time consuming to build if they are to handle the intricacies of each do- main. LADDER is a language for describing how do- main shapes are drawn, displayed, and edited in a sketch recognition system for that domain. LADDER shape descriptions can be automatically translated into JAVA code to be compiled with a multi-domain sketch recognition system to create a domain specific sketch interface. In this paper we present SHADY, a graphical tool to aid in the creation and debugging of LADDER shape descriptions. SHADY allows sketch interface developers to enter new shape descriptions or debug previously created descriptions, finding both syntactic and conceptual bugs. SHADY checks to see whether a shape descriptions is over-constrained by allowing the developer to draw sample shapes and then indicating which constraints are not met. This paper also describes work in progress on debugging under-constrained descriptions by automatically generating near-miss shapes.

Show BibTex
@inproceedings{hammond2004shady,
 title = {Shady: A Shape Description Debugger for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {AAAI Fall Symposium on Making Pen-Based Interaction Intelligent and Natural (AAAI)},
 year = 2004,
 address = {Arlington, VA},
 month = 10,
 organization = {AAAI},
 isbn = {978-1-57735-217-4},
 note = {7 pages}
}
2004 
PublicationImage
 Hammond, Tracy; Davis, Randall. Debugging Shape Definitions for Use in Sketch Recognition. MIT Lab Abstract. pp. 2 pages. Cambridge, MA. MIT, September, 2004. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2004Debug,
 title = {Debugging Shape Definitions for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT}, 
 note = {2 pages}
}
2004 
PublicationImage
 Hammond, Tracy; Davis, Randall. LADDER: A Sketch Recognition Language. MIT Lab Abstract. pp. 2 pages. Cambridge, MA. MIT, September, 2004. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2004Ladder,
 title = {LADDER: A Sketch Recognition Language},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}
2004 
PublicationImage
 Hammond, Tracy; Davis, Randall. Testing Shape Descrpitions by Automatically Translating them for Use in Sketch Recognition. MIT Lab Abstract. pp. 2 pages. Cambridge, MA. MIT, September, 2004. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2004testing,
 title = {Testing Shape Descrpitions by Automatically Translating them for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}
2003 
PublicationImage
 Hammond, Tracy; Davis, Randall. LADDER: A Language to Describe Drawing, Display, and Editing in Sketch Recognition. Proceedings of the International Joint Conference on Aritificial Intelligence (IJCAI). pp. 461-467. Alcapulco, Mexico. AAAI, 2003. Link
Show Abstract:
We have created LADDER, the first language to describe how sketched diagrams in a domain are drawn, displayed, and edited. The difficulty in creating such a language is choosing a set of predefined entities that is broad enough to support a wide range of domains, while remaining narrow enough to be comprehensible. The language consists of predefined shapes, constraints, editing behaviors, and display methods, as well as a syntax for specifying a domain description sketch grammar and extending the language, ensuring that shapes and shape groups from many domains can be described. The language allows shapes to be built hierarchically (e.g., an arrow is built out of three lines), and includes the concept of “abstract shapes”, analogous to abstract classes in an object oriented language. Shape groups describe how multiple do- main shapes interact and can provide the sketch recognition system with information to be used in top-down recognition. Shape groups can also be used to describe “chain-reaction” editing commands that effect multiple shapes at once. To test that recognition is feasible using this language, we have built a simple domain-independent sketch recognition system that parses the domain descriptions and generates the code necessary to recognize the shapes.

Show BibTex
@inproceedings{Hammond:2003:LLD:1630659.1630728,
 author = {Hammond, Tracy and Davis, Randall},
 title = {LADDER: A Language to Describe Drawing, Display, and Editing in Sketch Recognition},
 booktitle = {Proceedings of the 18th International Joint Conference on Artificial Intelligence},
 series = {IJCAI'03},
 year = {2003},
 location = {Acapulco, Mexico},
 pages = {461--467},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=1630659.1630728},
 acmid = {1630728},
 publisher = {Morgan Kaufmann Publishers Inc.},
 address = {San Francisco, CA, USA}
}
2002 
PublicationImage
 Hammond, Tracy; Davis, Randall. Tahuti: A Geometrical Sketch Recognition System for UML Class Diagrams. Technical Report SS-02-08: Papers from the 2002 Association for the Advancement of Artificial Intelligence (AAAI) Spring Symposium on Sketch Understanding. pp. 8 pages. Menlo Park, California. AAAI, July 28-August 1, 2002. Link
Show Abstract:
We have created and tested Tahuti, a dual-view sketch recognition environment for class diagrams in UML. The system is based on a multi-layer recognition framework which recognizes multi-stroke objects by their geometrical properties allowing users the freedom to draw naturally as they would on paper rather than requiring the user to draw the objects in a pre-defined manner. Users can draw and edit while viewing either their original strokes or the interpreted version of their strokes engendering user-autonomy in sketching. The experiments showed that users preferred Tahuti to a paint program and to Rational RoseTMbecause it combined the ease of drawing found in a paint program with the ease of editing available in a UML editor.

Show BibTex
@inproceedings{hammond2002Tahuti,
 title = {Tahuti: A Geometrical Sketch Recognition System for UML Class Diagrams},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {Technical Report SS-02-08: Papers from the 2002 Association for the Advancement of Artificial Intelligence
   (AAAI) Spring Symposium on Sketch Understanding},
 year = {2002},
 address = {Menlo Park, CA},
 month = 7,
 organization = {AAAI},
 note = {8 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Davis, Randall. A Domain Description Language for Sketch Recognition. MIT Lab Abstract. Artificial Intelligence Laboratory. pp. 2 pages. Cambridge, MA. MIT, September, 2002. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2002domain,
 title = {A Domain Description Language for Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Gajos, Krzysztof; Davis, Randall; Shrobe, Howard. An Agent-Based System for Capturing and Indexing Software Design Meetings. Proceedings of International Workshop on Agents In Design, WAID. Number 2, pp. 18 pages. Cambridge, MA. MIT, September, 2002. Link
Show Abstract:
We present an agent-based system for capturing and indexing software design meetings. During these meetings, designers design object-oriented software tools, including new agent-based technologies for the Intelligent Room, by sketching UML-type designs on a white-board. To capture the design meeting history, the Design Meeting Agent requests available audio, video, and screen capture services from the environment and uses them to capture the entire design meeting. However, finding a particular moment of the design history video and audio records can be cumbersome without a proper indexing scheme. To detect, index, and timestamp significant events in the design process, the Tahuti Agent, also started by the Design Meeting Agent, records, recognizes, and understands the UML-type sketches drawn during the meeting. These timestamps can be mapped to particular moments in the captured video and audio, aiding in the retrieval of the captured information. Metaglue, a multi-agent system, provides the computational glue necessary to bind the distributed components of the system together. It also provides necessary tools for seamless multi-modal interaction between the varied agents and the users.

Show BibTex
@inproceedings{hammond2002agent,
 title = {An Agent-Based System for Capturing and Indexing Software Design Meetings},
 author = {Hammond, Tracy and Gajos, Krzysztof and Davis, Randall and Shrobe, Howard},
 booktitle = {Proceedings of International Workshop on Agents In Design, WAID},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {18 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Gajos, Krzysztof; Davis, Randall; Shrobe, Howard. Sketch Recognition in Software Design. MIT Lab Abstract. Artificial Intelligence Laboratory. pp. 2 pages. Cambridge, MA. MIT, September, 2002. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2002sketch,
 title = {Sketch Recognition in Software Design},
 author = {Hammond, Tracy and Gajos, Krzysztof and Davis, Randall and Shrobe, Howard},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Hammond, Jan. Gender-Based Underrepresentation in Computer Science and Related Disciplines. Frontiers in Education. FIE 2002. 32nd Annual. Number 2, pp. 6 pages. Cambridge, MA. IEEE, 2002. Link
Show Abstract:
Traditionally, biological determinism served as a priori explanation for inadequate performance occurring in minority groups. Concurrent with this thinking, women were deemed to be naturally deficient in math and hence their large-scale absence from math-related disciplines. Lacking empirical support for nature-based arguments, current research relies on social determinism to test gender- based disparities in the pursuit of math. Although this latter model seems closer to reality, as evidenced by research results, this paper suggests that future studies must examine the issue from a choice-based paradigm. With work roles no longer based on gender, questions regarding women in math disciplines must be examined within choice-based models rather than those that emphasize environmentally determined criteria. We propose an integrated research model that includes choice as a critical causal variable.

Show BibTex
@inproceedings{hammond2002FIE,
 title = {Gender-Based Underrepresentation in Computer Science and Related Disciplines},
 author = {Hammond, Tracy and Hammond, Jan},
 booktitle = {Frontiers in Education. FIE 2002. 32nd Annual},
 year = {2002},
 number = {2},
 address = {Cambridge, MA},
 month = 7,
 organization = {IEEE},
 isbn = {0-7803-7444-4},
 note = {6 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Oshiro, Kalani; Davis, Randall. Natural Editing and Recognition of UML Class Diagrams. MIT Lab Abstract. Artificial Intelligence Laboratory. pp. 2 pages. Cambridge, MA. MIT, September, 2002. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2002natural,
 title = {Natural Editing and Recognition of UML Class Diagrams},
 author = {Hammond, Tracy and Oshiro, Kalani and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}
2002 
PublicationImage
 Hammond, Tracy; Sezgin, Metin; Veselova, Olya; Adler, Aaron; Oltmans, Michael; Alvarado, Christine; Hitchcock, Rebecca. Multi-Domain Sketch Recognition. Proceedings of the 2nd Annual MIT Student Oxygen Workshop. pp. 2 pages. Cambridge, MA. MIT, Artificial Intelligence Laboratory, September, 2002. Link
Show Abstract:
In this paper, we describe a new framework for multi-domain sketch recognition which is being developed by the Design Rationale Group at the MIT AI laboratory. The framework uses a blackboard architecture for recognition in which the knowledge sources are a combination of domain-independent and domain-specific recognizers. Domain-specific recognizers are automatically generated from the domain description which is written using the domain description language syntax. Domain descriptions can be automatically generated by a system that learns shape descriptions from a drawn example.

Show BibTex
@inproceedings{hammond2002SOWMulti,
 title = {Multi-{D}omain Sketch Recognition},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the 2nd Annual MIT Student Oxygen Workshop},
 year = {2002},
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {4 pages}
}
2001 
PublicationImage
 Alvarado, Christine; Sezgin, Tevfik Metin; Scott, Dana; Hammond, Tracy; Kasheff, Zardosht; Oltmans, Michael; Davis, Randall. A Framework for Multi-Domain Sketch Recognition. MIT Lab Abstract. MIT Artificial Intelligence Laboratory. pp. 3 pages. Cambridge, MA. MIT, September, 2001. Link
Show Abstract:

Show BibTex
@inproceedings{alvarado2001framework,
 title = {A Framework for Multi-Domain Sketch Recognition},
 author = {Alvarado, Christine and Sezgin, Tevfik Metin and Scott, Dana and Hammond, Tracy and Kasheff, Zardosht
   and Oltmans, Michael and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2001,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {3 pages}
}
2001 
PublicationImage
 Hammond, Tracy. Natural Sketch Recognition in UML Class Diagrams. Proceedings of the MIT Student Oxygen Workshop. pp. 3 pages. Gloucester, Massachusetts.. MIT Aritifial Intelligence Laboratory, July 16, 2001. Link
Show Abstract:

Show BibTex
@inproceedings{hammond2001natural,
 title = {Natural Sketch Recognition in UML Class Diagrams},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the MIT Student Oxygen Workshop},
 year = {2001},
 address = {Gloucester, MA},
 month = 7,
 organization = {MIT},
 note = {3 pages}
}
2001 
PublicationImage
 Parthasarthy, Raghavan; Hammond, Tracy. Technical Innovation: Options for Developing Countries. The Hindu. pp. 3 pages. India. The Hindu, May 24, 2001. Link
Show Abstract:

Show BibTex
@article{Raghavan2001Hindu,
 author = {Parthasarthy, Raghavan and Hammond, Tracy},
 title = {Technical Innovation: Options for Developing Countries},
 journal = {The Hindu},
 year = {2001},
 month = {5},
 note = {3 pages},
 address = {India}
}


Show All BibTex


@article{alamudun2017MedPhy,
  title={Fractal Analysis of Visual Search Activity for Mass Detection During Mammographic Screening},
  author={Alamudun, Folami and Yoon, Hong-Jun and Hudson, Kathleen B and Morin-Ducote, Garnetta and Hammond, Tracy and Tourassi, Georgia D},
  journal={Medical Physics},
  volume={},
  pages={},
  year={2017},
  publisher={Wiley}
}


@inproceedings{Cherian2017pervasive,
 author = {Cherian, Josh and Rajanna, Vijay and Goldberg, Daniel and Hammond, Tracy},
 title = {Did you Remember To Brush? : A Noninvasive Wearable Approach to Recognizing Brushing Teeth for Elderly Care},
 booktitle = {11th EAI International Conference on Pervasive Computing Technologies for Healthcare},
 series = {ICDC},
 year = {2017}, 
 isbn = {},
 location = {New York, USA},
 pages = {}
}

@inproceedings{rajanna2017CHI, 
author = {Rajanna, Vijay and Taele, Paul and Polsley, Seth and Hammond, Tracy}, 
title = {A Gaze Gesture-Based User Authentication System to Counter Shoulder-Surfing Attacks}, 
booktitle = {Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems}, 
series = {CHI EA '17}, 
year = {2017}, 
isbn = {978-1-4503-4656-6/17/05}, 
location = {Denver, Colorado, USA}, 
url = {http://doi.acm.org/10.1145/3027063.3053070}, 
doi = {10.1145/3027063.3053070}, 
acmid = {3053070}, 
publisher = {ACM}, 
address = {New York, NY, USA}, 
 } 

@inproceedings{Brooks2016CPTTE,
 author = {Brooks, Randy and Hammond, Tracy and Koh, Jung-In},
 title = {Score Improvement Distribution When Using Sketch Recognition Software (Mechanix) as a Tutor: Assessment of High School Classroom Pilot},
 booktitle = {10th Conference on Pen and Touch Technology in Education. CPTTE 2016},
 series = {CPTTE},
 year = {2016}, 
 isbn = {},
 location = {Brown University},
 pages = {2}
}

@inproceedings{FolamiSPIE2016,
 author = {Alamudun, Folami and Yoon, Hong-Jun and Hammond, Tracy and Hudson, Kathy and Morin-Ducote, 
   Garnetta and Tourassi, Georgia},
 title = { Shapelet analysis of pupil dilation for modeling visuo-cognitive behavior in screening mammography },
 booktitle = {Proc. SPIE},
 series = {SPIE},
 year = {2016},
 volume = {9787},
 location = {San Diego, CA},
 doi = {10.1117/12.2217670},
 pages = {97870M-97870M-13},
 URL = { http://dx.doi.org/10.1117/12.2217670},
 eprint = {}
}

@book{Hammond:2016,
 author = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron},
 title = {Revolutionizing Education with digital Ink: The Impact of Pen and Touch Technology on Education},
 year = {2016},
 isbn = {9783319311913},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated},
} 

@inproceedings{Hilton2016icdc,
 author = {Hilton, Ethan C. and WilliFord, Blake and Li, W. and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 title = {Consistently evaluating sketching ability in engineering curriculum},
 booktitle = {4th ICDC, International Conference on Design and creativity},
 series = {ICDC},
 year = {2016}, 
 isbn = {},
 location = {Atlanta, GA},
 pages = {9}
}

@inproceedings{Purnendu2016SAP,
 author = {Kaul, Purnendu and Rajanna, Vijay and Hammond, Tracy},
 title = {Exploring Users' Perceived Activities in a Sketch-based Intelligent Tutoring System Through Eye Movement Data},
 booktitle = {ACM Symposium on Applied Perception (SAP '16)},
 series = {SAP},
 year = {2016}, 
 isbn = {},
 location = {Anaheim, CA},
 pages = {1}
}

@inproceedings{Kim2016SBIM,
 author = {Kim, Hong-Hoe and Taele, Paul and Seo, Jinsil and Jeffrey, Liew and Hammond, Tracy},
 title = {A Novel Sketch-Based Interface for Improving Children’s Fine Motor Skills and School Readiness},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
  series = {SAP},
 year = {2016},
 location = {Lisbon, Portugal},
 pages = {1-10}
}

@incollection{LaraGarduno2016,
author={Lara-Garduno, Raniero
and Leslie, Nancy
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={SmartStrokes: Digitizing Paper-Based Neuropsychological Tests},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={163--175},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_11},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_11}
}

@incollection{Polsley2016,
author={Polsley, Seth
and Ray, Jaideep
and Nelligan, Trevor
and Helms, Michael
and Linsey, Julie
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={Leveraging Trends in Student Interaction to Enhance the Effectiveness of Sketch-Based Educational Software},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={103--114},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_7},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_7}
}

@inproceedings{RajannaETRA2016,
 author = {Rajanna, Vijay and Hammond, Tracy},
 title = {GAWSCHI: Gaze-augmented, Wearable-supplemented Computer-human Interaction},
 booktitle = {Proceedings of the Ninth Biennial ACM Symposium on Eye Tracking Research \& Applications},
 series = {ETRA '16},
 year = {2016},
 isbn = {978-1-4503-4125-7},
 location = {Charleston, South Carolina},
 pages = {233--236},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2857491.2857499},
 doi = {10.1145/2857491.2857499},
 acmid = {2857499},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {eye tracking, foot-operated device, gaze interaction, midas touch, multi-modal interaction, 
   quasi-mouse, wearable devices},
}

@inproceedings{RajannaIUI2016,
 author = {Rajanna, Vijay Dandur},
 title = {Gaze and Foot Input: Toward a Rich and Assistive Interaction Modality},
 booktitle = {Companion Publication of the 21st International Conference on Intelligent User Interfaces},
 series = {IUI '16 Companion},
 year = {2016},
 isbn = {978-1-4503-4140-0},
 location = {Sonoma, California, USA},
 pages = {126--129},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2876456.2876462},
 doi = {10.1145/2876456.2876462},
 acmid = {2876462},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {authentication, eye tracking, foot input, gaze and foot interaction, tabletop interaction},
} 

@inproceedings{Rjanna2016Access,
 author = {Rajanna, Vijay and Hammond, Tracy},
 title = {Gaze Typing Through Foot-Operated Wearable Device},
 booktitle = {The 18th International ACM SIGACCESS Conference on Computers and Accessibility (ASSETS '16)},
 series = {SIG},
 year = {2016}, 
 isbn = {},
 location = {Reno, Nevada},
 pages = {2}
}

@Article{Rajanna2015,
 author = {Rajanna, Vijay and Vo, Patrick and Barth, Jerry and Mjelde, Matthew
 and Grey, Trevor and Oduola, Cassandra and Hammond, Tracy},
 title = {KinoHaptics: An Automated, Wearable, Haptic Assisted, Physio-therapeutic System for Post-surgery 
   Rehabilitation and Self-care},
 journal = {Journal of Medical Systems},
 year = {2015},
 volume = {40},
 number = {3},
 pages = {1--12},
 issn = {1573-689X},
 doi = {10.1007/s10916-015-0391-3},
 url = {http://dx.doi.org/10.1007/s10916-015-0391-3}
}

@article{guo2016resumatcher,
  title={R{\'e}suMatcher: A personalized r{\'e}sum{\'e}-job matching system},
  author={Guo, Shiqiang and Alamudun, Folami and Hammond, Tracy},
  journal={Expert Systems with Applications},
  volume={60},
  pages={169--182},
  year={2016},
  publisher={Elsevier}
}

@article{SmithermanJournal2016Casper,
author = {Seth Smitherman and Daniel Goldberg and Tracy Hammond and Jennifer Horney},
title = {Developing a Survey to Assess the Prevalence of Risk Factors for Neglected Tropical Diseases in Texas Using the CASPER Method},
journal = {Journal of Health Security},
year = {2016}
}

@incollection{Barreto2016,
author={Barreto, Laura
and Taele, Paul
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={A Stylus-Driven Intelligent Tutoring System for Music Education Instruction},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={141--161},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_10},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_10}
}

@incollection{Taele2016,
author={Taele, Paul
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={An Intelligent Sketch-Based Educational Interface for Learning Complex Written East Asian Phonetic Symbols},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={129--140},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_9},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_9}
}



@incollection{Valentine2016,
author={Valentine, Stephanie
and Leyva-McMurtry, Angelica
and Borgos-Rodriguez, Katya
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={The Digital Sash: A Sketch-Based Badge System in a Social Network for Children},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={179--189},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_12},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_12}
}

@incollection{ValentineWIPTTE2016,
author={Valentine, Stephanie
and Conrad, Hannah
and Oduola, Cassandra
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={WIPTTE 2015 High School Contest},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={345--364},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_25},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_25}
}


@incollection{Williford2016,
author={Williford, Blake
and Taele, Paul
and Nelligan, Trevor
and Li, Wayne
and Linsey, Julie
and Hammond, Tracy},
editor={Hammond, Tracy
and Valentine, Stephanie
and Adler, Aaron},
title={PerSketchTivity: An Intelligent Pen-Based Educational Application for Design Sketching Instruction},
booktitle={Revolutionizing Education with Digital Ink: The Impact of Pen and Touch Technology on Education},
year={2016},
publisher={Springer International Publishing},
address={Cham},
pages={115--127},
isbn={978-3-319-31193-7},
doi={10.1007/978-3-319-31193-7_8},
url={http://dx.doi.org/10.1007/978-3-319-31193-7_8}
}

@incollection{Hammond2015Intro,
 title = {Introduction}, 
 author = {Hammond, Tracy and Payton, Mark and Adler, Aaron and Valentine, Stephanie},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {v--xix},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}

@book{Hammond:2015:IPT:2815658,
 author = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark},
 title = {The Impact of Pen and Touch Technology on Education},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
} 

@incollection{Kim2015EasySketch,
 title = {EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children 
   Emerging Technology Research Strand}, 
 author = {Kim, Hong-Hoe and Valentine, Stephanie and Taele, Paul and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {35--46},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}

@INPROCEEDINGS{GreenASEE2015,
 author = {Green, Matthew G. and Caldwell, Benjamin W. and Helms, Michael and Linsey, Julie S. and Hammond, Tracy Anne},
 title = {Using Natural Sketch Recognition Software to Provide Instant Feedback on Statics Homework (Truss
   Free Body Diagrams): Assessment of a Classroom Pilot},
 booktitle = {2015 ASEE Annual Conference and Exposition},
 year = {2015},
 month = {June},
 address = {Seattle, Washington},
 publisher = {ASEE Conferences},
 note = {https://peer.asee.org/25007},
 number = {10.18260/p.25007}
}

@inproceedings{Nelligan:2015:MSE:2732158.2732194,
 author = {Nelligan, Trevor and Polsley, Seth and Ray, Jaideep and Helms, Michael and Linsey, Julie and Hammond, Tracy},
 title = {Mechanix: A Sketch-Based Educational Interface},
 booktitle = {Proceedings of the 20th International Conference on Intelligent User Interfaces Companion},
 series = {IUI Companion '15},
 year = {2015},
 isbn = {978-1-4503-3308-5},
 location = {Atlanta, Georgia, USA},
 pages = {53--56},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2732158.2732194},
 doi = {10.1145/2732158.2732194},
 acmid = {2732194},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {computer-assisted instruction (cai), graphical user interfaces (gui), guides, interaction styles, user-centered design}
} 

@inproceedings{Rajanna:2015,
 author = {Rajanna, Vijay and Alamudun, Folami and Goldberg, Daniel and Hammond, Tracy},
 title = {Let Me Relax: Toward Automated Sedentary State Recognition and Ubiquitous Mental Wellness Solutions},
 booktitle = {Proceedings of the 5th EAI International Conference on Wireless Mobile Communication and Healthcare},
 series = {MOBIHEALTH'15},
 year = {2015}, 
 isbn = {978-1-63190-088-4},
 location = {London, Great Britain},
 pages = {28--33},
 numpages = {6},
 url = {http://dx.doi.org/10.4108/eai.14-10-2015.2261900},
 doi = {10.4108/eai.14-10-2015.2261900},
 acmid = {2897461}, 
 publisher = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
 address = {ICST, Brussels, Belgium, Belgium},
 keywords = {anxiety, cognitive reappraisal, intervention techniques, mental wellness, personal health assistant, relaxation, 
   sedentary state recognition, stress, ubiquitous computing},
}

@incollection{raymond2015vision,
 title = {A Vision for Education: Transforming How Formal Systems are Taught Within Mass Lectures by 
   Using Pen Technology to Create a Personalized Learning Environment},
 author = {Raymond, Dwayne and Liew, Jeffrey and Hammond, Tracy A},
 booktitle = {The Impact of Pen and Touch Technology on Education},
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark},
 pages = {355--363},
 year = {2015},
 publisher = {Springer International Publishing Switzerland},
 doi = {10.1007/978-3-319-15594-4}
}

@inproceedings{VangavoluCHitaly2015,
 author = {Vangavolu, Sriharish and Wood, Hayden and Newman, Joseph and Polsley, Seth and Hammond, Tracy},
 title = {Frontier: A Directed Graph System for Web Navigation},
 booktitle = {Proceedings of the 11th Biannual Conference on Italian SIGCHI Chapter},
 series = {CHItaly 2015},
 year = {2015},
 isbn = {978-1-4503-3684-0},
 location = {Rome, Italy},
 pages = {82--85},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2808435.2808465},
 doi = {10.1145/2808435.2808465},
 acmid = {2808465},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {contextual History, graph visualization, session management, web navigation}
} 

@inproceedings{TaeleAAAI2015,
 author = {Taele, Paul and Barreto, Laura and Hammond, Tracy},
 title = {Maestoso: An Intelligent Educational Sketching Tool for Learning Music Theory},
 booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence},
 series = {AAAI'15},
 year = {2015},
 isbn = {0-262-51129-0},
 location = {Austin, Texas},
 pages = {3999--4005},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=2888116.2888271},
 acmid = {2888271},
 publisher = {AAAI Press}
}

@inproceedings{Taele2015SG,
 author = {Taele, Paul and Hammond, Tracy},
 title = {InvisiShapes: A Recognition System for Sketched 3D Primitives in Continuous Interaction Spaces},
 booktitle = {Proceedings of the 2015 International Symposium on Smart Graphics, Chengdu, China},
 series = {SG},
 year = {2015}, 
 isbn = {},
 location = {Chengdu, China},
 pages = {12}
}

@incollection{Taele2015Enhancing,
 title = {Enhancing Instruction of Written East Asian Languages with Sketch Recognition-Based Intelligent Language
   Workbook Interfaces}, 
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {119--126},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}

@incollection{Valentine2015Mechanix,
 title = {Mechanix: A Sketch-Based Tutoring System that Automatically Corrects Hand-Sketched Statics Homework}, 
 author = {Valentine, Stephanie and Lara-Garduno, Raniero and Linsey, Julie and Hammond, Tracy},
 booktitle = {The Impact of Pen and Touch Technology on Education}, 
 editor = {Hammond, Tracy and Valentine, Stephanie and Adler, Aaron and Payton, Mark}, 
 pages = {91--105},
 year = {2015},
 isbn = {3319155938, 9783319155937},
 edition = {1st},
 publisher = {Springer Publishing Company, Incorporated}
}

@article{Atilola2014AIE,
 author = {Atilola, Olufunmilola and Valentine, Stephanie and Kim, Hong-Hoe and Turner, David and McTigue, Erin and Hammond,Tracy and Linsey, Julie},
 title = {Mechanix: A natural sketch interface tool for teaching truss analysis and free-body diagrams},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {null},
 issue = {Special Issue 02},
 month = {5},
 year = {2014},
 issn = {1469-1760},
 pages = {169--192},
 numpages = {24},
 doi = {10.1017/S0890060414000079},
 URL = {http://journals.cambridge.org/article_S0890060414000079}
}

@incollection{Hammond2015,
 author = {Hammond, Tracy},
 editor = {Gero, S. John},
 chapter = {Dialectical Creativity: Sketch-Negate-Create},
 title = {Studying Visual and Spatial Reasoning for Design Creativity},
 year = {2015},
 publisher = {Springer Netherlands},
 address = {Dordrecht},
 pages = {91--108},
 isbn = {978-94-017-9297-4},
 doi = {10.1007/978-94-017-9297-4_6},
 url = {http://dx.doi.org/10.1007/978-94-017-9297-4_6}
}

@article{Hammond2014AIE,
 author = {Hammond,Tracy and Linsey,Julie},
 title = {Design Computing and Cognition (DCC'12)},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {null},
 issue = {Special Issue 02},
 month = {5},
 year = {2014},
 issn = {1469-1760},
 pages = {113--114},
 numpages = {2},
 doi = {10.1017/S089006041400002X},
 URL = {http://journals.cambridge.org/article_S089006041400002X}
}

@inproceedings{hong2014development,
 title = {Development of icanfit: A mobile device application to promote physical activity and access to health information
   among older cancer survivors},
 author = {Hong, Yan and Vollmer Dahlke, Deborah and Ory, Marcia and Goldberg, Daniel and Cargill, Jessica and
   Kellstedt, Debra and Pulczinski, Jairus and Hammond,Tracy and Hernandez, Edgar},
 booktitle = {142nd APHA Annual Meeting and Exposition (November 15-November 19, 2014)},
 year = {2014},
 organization = {APHA},
 note = {1 page}
}

@inproceedings{Kim2014Developing,
 title = {Developing Intelligent Sketch-Based Applications for Children’s Fine Motor Sketching Skill Development},
 author = {Kim, Hong-hoe and Taele, Paul and Valentine, Stephanie and Hammond, Tracy},
 booktitle = {2014 International Conference on Intelligent User Interfaces (IUI) Workshop on Sketch:
   Pen and Touch Recognition},
 year = 2014,
 address = {Haifa, Israel},
 month = 2,
 organization = {IUI},
 publisher = {ACM}
}

@inproceedings{Kim2014easysketch,
 title = {EasySketch: A Sketch-Based Fine Motor Skill Recognizing Educational Interface for Children Emerging
   Technology Research Strand},
 author = {Kim, Hong-hoe and Valentine, Stephanie and Taele, Paul and Hammond, Tracy},
 booktitle = {Workshop on the Impact of Pen & Touch Technology on Education (WIPTTE)},
 year = 2014,
 address = {College Station, TX},
 month = 3,
 organization = {WIPTTE}
}

@INPROCEEDINGS{Prasad2014Model,
 author = {Prasad, Manoj and Russell, Murat I. and Hammond, Tracy Anne},
 booktitle = {Haptics Symposium (HAPTICS), 2014 IEEE},
 title = {A user centric model to design tactile codes with shapes and waveforms},
 year = {2014},
 pages = {597--602},
 keywords = {actuators; display instrumentation; haptic interfaces; user centred design; actuators; auditory mediums; 
   graph model; shape clustering algorithm; tactile code design; tactile codes; tactile information; tactile medium; 
   user centric model; vibrotactile displays; visual mediums; Arrays; Carbon; Clustering algorithms; Heating; Shape; 
   Time factors; Usability},
 doi = {10.1109/HAPTICS.2014.6775523},
 month = {Feb}
}

@article{Prasad:2014:DVC,
 author = {Prasad, Manoj and Russell, Murat and Hammond, Tracy A.},
 title = {Designing Vibrotactile Codes to Communicate Verb Phrases},
 journal = {ACM Trans. Multimedia Comput. Commun. Appl.},
 issue_date = {September 2014},
 volume = {11},
 number = {1s},
 month = oct,
 year = {2014},
 issn = {1551-6857},
 pages = {11:1--11:21},
 articleno = {11},
 numpages = {21},
 url = {http://doi.acm.org/10.1145/2637289},
 doi = {10.1145/2637289},
 acmid = {2637289},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {Tactile interface, Vibrotactile pattern perception, communication, graph model, perception model,
   tactile code, user centric design}
}

@inproceedings{Prasad:2014:HTH:2611222.2557404,
 author = {Prasad, Manoj and Taele, Paul and Goldberg, Daniel and Hammond, Tracy A.},
 title = {HaptiMoto: Turn-by-turn Haptic Route Guidance Interface for Motorcyclists},
 booktitle = {Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
 series = {CHI '14},
 year = {2014},
 isbn = {978-1-4503-2473-1},
 location = {Toronto, Ontario, Canada},
 pages = {3597--3606},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2556288.2557404},
 doi = {10.1145/2556288.2557404},
 acmid = {2557404},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {advanced traveler information system, route guidance, tactile interface, vibro-tactile}
} 

@INPROCEEDINGS{Prasad2014Haptigo,
 author = {Prasad, Manoj and Taele, Paul and Olubeko, Ayobami and Hammond, Tracy},
 booktitle = {Haptics Symposium (HAPTICS), 2014 IEEE},
 title = {HaptiGo: A navigational tap on the shoulder},
 year = {2014},
 pages = {339--345},
 keywords = {haptic interfaces; pedestrians; tactile sensors; HaptiGo; cognitive loads; complex inter-personal 
   interactions; environmental awareness; lightweight haptic vest;navigational intelligence; navigational tap on
   the shoulder; obstacle detection capability; optimally-placed vibrotactile sensors; pedestrian navigation; 
   small form factor interaction cues; Belts; Haptic interfaces; Mobile communication; Navigation;Tactile 
   sensors; Vibrations},
 doi = {10.1109/HAPTICS.2014.6775478},
 month = {Feb}
}

@inproceedings{Rajanna:2014:SUL:2676629.2676636,
 author = {Rajanna, Vijay and Lara-Garduno, Raniero and Behera, Dev Jyoti and Madanagopal, Karthic and 
   Goldberg, Daniel and Hammond, Tracy},
 title = {Step Up Life: A Context Aware Health Assistant},
 booktitle = {Proceedings of the Third ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '14},
 year = {2014},
 isbn = {978-1-4503-3136-4},
 location = {Dallas, Texas},
 pages = {21--30},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2676629.2676636},
 doi = {10.1145/2676629.2676636},
 acmid = {2676636},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {context aware systems, environmental monitoring, geographic information systems, healthgis, 
   individual health, personal health assistant, public health, sensors}
} 

@inproceedings{TaeleIUI2014,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Developing Sketch Recognition and Interaction Techniques for Intelligent Surfaceless Sketching User Interfaces},
 booktitle = {Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces},
 series = {IUI Companion '14},
 year = {2014},
 isbn = {978-1-4503-2729-9},
 location = {Haifa, Israel},
 pages = {53--56},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2559184.2559185},
 doi = {10.1145/2559184.2559185},
 acmid = {2559185},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {natural user interfaces, sketch recognition, surfaceless interaction}
}

@inproceedings{Atilola2013ASEE,
 title = {Mechanix: Evaluating the Effectiveness of a Sketch Recognition Truss Tutorin Program Against Other Truss Programs},
 author = {Atilola, Olufumilola and McTigue, Erin M. and Hammond, Tracy and Linsey, Julie},
 booktitle = {120th American Society for Engineering Education Annual Conference & Exposition (ASEE). June 23-26},
 year = 2013,
 address = {Atlanta, GA},
 month = 6,
 organization = {ASEE},
 note = {15 pages}
}

@inproceedings{Bartley:2013:WWC:2535708.2535718,
 author = {Bartley, Joey and Forsyth, Jonathon and Pendse, Prachi and Xin, Da and Brown, Garrett and Hagseth,
   Paul and Agrawal, Ashish and Goldberg, Daniel W. and Hammond, Tracy},
 title = {World of Workout: A Contextual Mobile RPG to Encourage Long Term Fitness},
 booktitle = {Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '13},
 year = {2013},
 isbn = {978-1-4503-2529-5},
 location = {Orlando, Florida},
 pages = {60--67},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2535708.2535718},
 doi = {10.1145/2535708.2535718},
 acmid = {2535718},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {GIS, activity recognition, exergaming, fitness, gaming, health, location, motivation, pattern recognition,
   smartphones, wearable computing}
} 

@inproceedings{CummingsCHI2013,
 author = {Cummings, Danielle and Prasad, Manoj and Lucchese, George and Aikens, Christopher and Hammond, Tracy A.},
 title = {Multi-modal Location-aware System for Paratrooper Team Coordination},
 booktitle = {CHI '13 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '13},
 year = {2013},
 isbn = {978-1-4503-1952-2},
 location = {Paris, France},
 pages = {2385--2388},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2468356.2468779},
 doi = {10.1145/2468356.2468779},
 acmid = {2468779},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {ad-hoc networks, location-aware, military applications, user-centered design}
}

@inproceedings{DamarajuIUI2013,
 author = {Damaraju, Sashikanth and Seo, Jinsil Hwaryoung and Hammond, Tracy and Kerne, Andruid},
 title = {Multi-tap Sliders: Advancing Touch Interaction for Parameter Adjustment},
 booktitle = {Proceedings of the 2013 International Conference on Intelligent User Interfaces},
 series = {IUI '13},
 year = {2013},
 isbn = {978-1-4503-1965-2},
 location = {Santa Monica, California, USA},
 pages = {445--452},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2449396.2449453},
 doi = {10.1145/2449396.2449453},
 acmid = {2449453},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {exploratory interfaces, multi-touch, parametric spaces}
}

@inproceedings{Goldberg:2013:EFS:2535708.2535716,
 author = {Goldberg, Daniel W. and Cockburn, Myles G. and Hammond, Tracy A. and Jacquez, Geoffrey M. and
   Janies, Daniel and Knoblock, Craig and Kuhn, Werner and Pultar, Edward and Raubal, Martin},
 title = {Envisioning a Future for a Spatial-health CyberGIS Marketplace},
 booktitle = {Proceedings of the Second ACM SIGSPATIAL International Workshop on the Use of GIS in Public Health},
 series = {HealthGIS '13},
 year = {2013},
 isbn = {978-1-4503-2529-5},
 location = {Orlando, Florida},
 pages = {27--30},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2535708.2535716},
 doi = {10.1145/2535708.2535716},
 acmid = {2535716},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {HealthGIS, data integration, environmental monitoring, health interventions, individual health,
   online analytics, public health, sensors, streaming data}
}

@article{Hammond2013AIE,
 author = {Hammond,Tracy and Linsey, Julie},
 title = {AI EDAM Special Issue, May 2014, Vol. 28, No. 2},
 journal = {Artificial Intelligence for Engineering Design, Analysis and Manufacturing},
 volume = {27},
 issue = {01},
 month = {2},
 year = {2013},
 issn = {1469-1760},
 pages = {83--84},
 numpages = {2},
 doi = {10.1017/S089006041200039X},
 URL = {http://journals.cambridge.org/article_S089006041200039X}
}

@inproceedings{Kim:2013:KSD:2487381.2487389,
 author = {Kim, Hong-hoe and Taele, Paul and Valentine, Stephanie and McTigue, Erin and Hammond, Tracy},
 title = {KimCHI: A Sketch-based Developmental Skill Classifier to Enhance Pen-driven Educational Interfaces for Children},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '13},
 year = {2013},
 isbn = {978-1-4503-2205-8},
 location = {Anaheim, California},
 pages = {33--42},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/2487381.2487389},
 doi = {10.1145/2487381.2487389},
 acmid = {2487389},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {EasySketch, KimCHI: children's developmental skill classifier, age and gender difference}
}

@inproceedings{Taele:2013:ASS:2540128.2540630,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Adapting Surface Sketch Recognition Techniques for Surfaceless Sketches},
 booktitle = {Proceedings of the Twenty-Third International Joint Conference on Artificial Intelligence},
 series = {IJCAI '13},
 year = {2013},
 isbn = {978-1-57735-633-2},
 location = {Beijing, China},
 pages = {3243--3244},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=2540128.2540630},
 acmid = {2540630},
 publisher = {AAAI Press}
} 

@article{Valentine2013Mechanix,
 author = {Valentine, Stephanie and Vides, Francisco and Lucchese, George and Turner, David and Kim, Hong-hoe
   and Li, Wenzhe and Linsey, Julie and Hammond, Tracy},
 title = {Mechanix: A Sketch-Based Tutoring and Grading System for Free-Body Diagrams},
 journal = {AI Magazine},
 year = {2013},
 volume = {34},
 number = {1},
 pages = {55–66}
}

@inproceedings{Atilola2012ASEE,
 title = {Automatic Identification of Student Misconceptions and Errors for Truss Analysis},
 author = {Atilola, Olufunmilola and Vides, Francisco and Mctigue, Erin M and Linsey, Julie S and Hammond, Tracy Anne},
 booktitle = {119th American Society for Engineering Education Annual Conference & Exposition (ASEE). June 10–13},
 year = 2012,
 address = {San Antonio, TX},
 month = 6,
 organization = {ASEE},
 note = {13 pages}
}

@inproceedings{Cummings:2012:SII:2212776.2223664,
 author = {Cummings, Danielle and Fymat, Stephane and Hammond, Tracy},
 title = {Sketch-based Interface for Interaction with Unmanned Air Vehicles},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {1511--1516},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223664},
 doi = {10.1145/2212776.2223664},
 acmid = {2223664},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {multimodal interaction, sketch recognition, unmanned air system}
} 

@inproceedings{Cummings:2012:GML:2379256.2379286,
 author = {Cummings, Danielle and Lucchese, George and Prasad, Manoj and Aikens, Chris and Ho, Jimmy and Hammond, Tracy},
 title = {GeoTrooper: A Mobile Location-aware System for Team Coordination},
 booktitle = {Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest
   Group on Human-Computer Interaction},
 series = {CHINZ '12},
 year = {2012},
 isbn = {978-1-4503-1474-9},
 location = {Dunedin, New Zealand},
 pages = {102--102},
 numpages = {1},
 url = {http://doi.acm.org/10.1145/2379256.2379286},
 doi = {10.1145/2379256.2379286},
 acmid = {2379286},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {location-based system, military applications, mobile-computing}
}

@inproceedings{Cummings:2012:HAI:2379256.2379265,
 author = {Cummings, Danielle and Lucchese, George and Prasad, Manoj and Aikens, Chris and Ho, Jimmy and Hammond, Tracy},
 title = {Haptic and AR Interface for Paratrooper Coordination},
 booktitle = {Proceedings of the 13th International Conference of the NZ Chapter of the ACM's Special Interest
   Group on Human-Computer Interaction},
 series = {CHINZ '12},
 year = {2012},
 isbn = {978-1-4503-1474-9},
 location = {Dunedin, New Zealand},
 pages = {52--55},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/2379256.2379265},
 doi = {10.1145/2379256.2379265},
 acmid = {2379265},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {location-based system, military applications, mobile-computing}
}

@inproceedings{Cummmings:2012:RSS:2331067.2331071,
 author = {Cummmings, D. and Fymat, S. and Hammond, T.},
 title = {RedDog: A Smart Sketch Interface for Autonomous Aerial Systems},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '12},
 year = {2012},
 isbn = {978-3-905674-42-2},
 location = {Annecy, France},
 pages = {21--28},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=2331067.2331071},
 acmid = {2331071},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}

@inproceedings{Cummmings:2012:IDB:2331067.2331082,
 author = {Cummmings, D. and Vides, F. and Hammond, T.},
 title = {I Don'T Believe My Eyes!: Geometric Sketch Recognition for a Computer Art Tutorial},
 booktitle = {Proceedings of the International Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '12},
 year = {2012},
 isbn = {978-3-905674-42-2},
 location = {Annecy, France},
 pages = {97--106},
 numpages = {10},
 url = {http://dl.acm.org/citation.cfm?id=2331067.2331082},
 acmid = {2331082},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}

@inproceedings{Li:2012:USG:2212776.2223778,
 author = {Li, Wenzhe and Hammond, Tracy},
 title = {Using Scribble Gestures to Enhance Editing Behaviors of Sketch Recognition Systems},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {2213--2218},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223778},
 doi = {10.1145/2212776.2223778},
 acmid = {2223778},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {editing, gesture recognition, pen-input computing, sketch recognition}
} 

@inproceedings{Lucchese:2012:GCT:2212776.2223730,
 author = {Lucchese, George and Field, Martin and Ho, Jimmy and Gutierrez-Osuna, Ricardo and Hammond, Tracy},
 title = {GestureCommander: Continuous Touch-based Gesture Prediction},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {1925--1930},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223730},
 doi = {10.1145/2212776.2223730},
 acmid = {2223730},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {gesture recognition, mobile computing}
} 

@inproceedings{Prasad:2012:OST:2212776.2212809,
 author = {Prasad, Manoj and Hammond, Tracy},
 title = {Observational Study on Teaching Artifacts Created Using Tablet PC},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {301--316},
 numpages = {16},
 url = {http://doi.acm.org/10.1145/2212776.2212809},
 doi = {10.1145/2212776.2212809},
 acmid = {2212809},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {ethnography, multimodal, pen enabled technologies, sketch recognition, sketching, tablet pc}
} 

@inproceedings{Taele:2012:IAE:2212776.2223749,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Initial Approaches for Extending Sketch Recognition to Beyond-surface Environments},
 booktitle = {CHI '12 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '12},
 year = {2012},
 isbn = {978-1-4503-1016-1},
 location = {Austin, Texas, USA},
 pages = {2039--2044},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/2212776.2223749},
 doi = {10.1145/2212776.2223749},
 acmid = {2223749},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {beyond-surface interactions, low-level processing, sketch recognition}
} 

@inproceedings{valentine2012mechanix,
 title = {Mechanix: A Sketch-Based Tutoring System for Statics Courses.},
 author = {Valentine, Stephanie and Vides, Francisco and Lucchese, George and Turner, David and
   Kim, Hong-hoe and Li, Wenzhe and Linsey, Julie and Hammond, Tracy},
 booktitle = {Proceedings of the Twenty-Fourth Innovative Applications of Artificial Intelligence Conference (IAAI)},
 year = {2012},
 address = {Toronto, Canada},
 month = {July},
 organization = {AAAI},
 pages = {2253–2260}
}

@INPROCEEDINGS{Vides:2012:CHI-EIST,
 author = {Vides, F and Taele, P and Kim, H and Ho, J and T Hammond},
 title = {Intelligent Feedback for Kids Using Sketch Recognition},
 booktitle = {ACM SIGCHI 2012 Conference on Human Factors in Computing Systems Workshop on Educational
   Interfaces, Software, and Technology},
 year = {2012},
 publisher = {ACM}
}

@INPROCEEDINGS{atilola2011asme,
 author = {Atilola, Olufunmilola and Field, Martin and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 booktitle= {American Society of Mechanical Engineers (ASME) 2011 International Design Engineering Technical 
   Conferences and Computers and Information in Engineering Conference, Volume 7: 5th International Conference
   on Micro- and Nanosystems; 8th International Conference on Design and Design Education; 21st Reliability, 
   Stress Analysis, and Failure Prevention Conference},
 title = {Mechanix: A Sketch Recognition Truss Tutoring System},
 year = {2011},
 volume= {7},
 pages = {645–654},
 month = {August 28–30},
 address = {Washington, DC},
 publisher = {ASME}
}

@inproceedings{Atilola:2011:ENS:2192607.2193253,
 author = {Atilola, Olufunmilola and Field, Martin and McTigue, Erin and Hammond, Tracy and Linsey, Julie},
 title = {Evaluation of a Natural Sketch Interface for Truss FBDs and Analysis},
 booktitle = {Proceedings of the 2011 Frontiers in Education Conference},
 series = {FIE '11},
 year = {2011},
 isbn = {978-1-61284-468-8},
 pages = {S2E-1--1-S2E-6},
 url = {http://dx.doi.org/10.1109/FIE.2011.6142959},
 doi = {10.1109/FIE.2011.6142959},
 acmid = {2193253},
 publisher = {IEEE Computer Society},
 address = {Washington, DC, USA}
} 

@inproceedings{Field:2011:SRA:2283696.2283803,
 author = {Field, Martin and Valentine, Stephanie and Linsey, Julie and Hammond, Tracy},
 title = {Sketch Recognition Algorithms for Comparing Complex and Unpredictable Shapes},
 booktitle = {Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence - Volume Three},
 series = {IJCAI'11},
 year = {2011},
 isbn = {978-1-57735-515-1},
 location = {Barcelona, Catalonia, Spain},
 pages = {2436--2441},
 numpages = {6},
 url = {http://dx.doi.org/10.5591/978-1-57735-516-8/IJCAI11-406},
 doi = {10.5591/978-1-57735-516-8/IJCAI11-406},
 acmid = {2283803},
 publisher = {AAAI Press}
} 

@inproceedings{Hammond:2011:IWS:1943403.1943503,
author = {Hammond, Tracy Anne and Adler, Aaron},
title = {{IUI} 2011 Workshop: Sketch Recognition},
booktitle = {Proceedings of the 16th International Conference on Intelligent User Interfaces},
series = {{IUI} '11},
year = {2011},
isbn = {978-1-4503-0419-1},
location = {Palo Alto, CA, USA},
pages = {465--466},
numpages = {2},
url = {http://doi.acm.org/10.1145/1943403.1943503},
doi = {10.1145/1943403.1943503},
acmid = {1943503},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {CAD, design, document processing, intelligent user interfaces, pen-input computing, sketch
   recognition, sketch understanding, sketching, tablet PCs}
} 

@article{Hammond:2011:RSM:2030365.2030369,
 author = {Hammond, Tracy and Paulson, Brandon},
 title = {Recognizing Sketched Multistroke Primitives},
 journal = {ACM Trans. Interact. Intell. Syst.},
 issue_date = {October 2011},
 volume = {1},
 number = {1},
 month = oct,
 year = {2011},
 issn = {2160-6455},
 pages = {4:1--4:34},
 articleno = {4},
 numpages = {34},
 url = {http://doi.acm.org/10.1145/2030365.2030369},
 doi = {10.1145/2030365.2030369},
 acmid = {2030369},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {Sketch recognition, intelligent user interfaces, neural networks, primitive recognition}
} 

@inproceedings{Kebodeaux:2011:DPM:2021164.2021179,
 author = {Kebodeaux, Kourtney and Field, Martin and Hammond, Tracy},
 title = {Defining Precise Measurements with Sketched Annotations},
 booktitle = {Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '11},
 year = {2011},
 isbn = {978-1-4503-0906-6},
 location = {Vancouver, British Columbia, Canada},
 pages = {79--86},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2021164.2021179},
 doi = {10.1145/2021164.2021179},
 acmid = {2021179},
 publisher = {ACM},
 address = {New York, NY, USA}
} 

@inproceedings{WLi2011,
 author = {Wenzhe Li and Tracy Hammond},
 title = {Recognizing Text Through Sound Alone},
 booktitle = {AAAI Conference on Artificial Intelligence},
 year = {2011},
 keywords = {},
 abstract = {This paper presents an acoustic sound recognizer to recognize what people are writing on a table or wall by
   utilizing the sound signal information generated from a key, pen, or fingernail moving along a textured surface. Sketching
   provides a natural modality to interact with text, and sound is an effective modality for distinguishing text. However,
   limited research has been conducted in this area. Our system uses a dynamic time- warping approach to recognize 26
   hand-sketched characters (A-Z) solely through their acoustic signal. Our initial prototype system is user-dependent and
   relies on fixed stroke ordering. Our algorithm relied mainly on two features: mean amplitude and MFCCs (Mel-frequency
   cepstral coefficients). Our results showed over 80% recognition accuracy.},
 url = {http://www.aaai.org/ocs/index.php/AAAI/AAAI11/paper/view/3791}
}

@INPROCEEDINGS{lupfer2011ACM,
 author = {Lupfer, Nic and Field, Martin and Kerne, Andruid and Hammond, Tracy},
 booktitle = {Proceedings of the 2011 ACM International conference on intelligent user interface},
 title = {sketchy: Morphing User Sketches for Artistic Assistance},
 year = {2011},
 month = {February 13-16},
 address = {Palo Alto, CA},
 publisher = {ACM}
}

@article{Paulson:2011:OID:1897345.1897540,
 author = {Paulson, Brandon and Cummings, Danielle and Hammond, Tracy},
 title = {Object Interaction Detection Using Hand Posture Cues in an Office Setting},
 journal = {Int. J. Hum.-Comput. Stud.},
 issue_date = {January, 2011},
 volume = {69},
 number = {1-2},
 month = jan,
 year = {2011},
 issn = {1071-5819},
 pages = {19--29},
 numpages = {11},
 url = {http://dx.doi.org/10.1016/j.ijhcs.2010.09.003},
 doi = {10.1016/j.ijhcs.2010.09.003},
 acmid = {1897540},
 publisher = {Academic Press, Inc.},
 address = {Duluth, MN, USA},
 keywords = {Activity recognition, Context-aware, Cyberglove, Glove-based interaction, Hand gesture, Hand posture, Haptics}
}

@INPROCEEDINGS{valentine2011shape,
 author = {Valentine, Stephanie and Field, Martin and Smith, A and Hammond, T},
 title = {A Shape Comparison Technique for Use in Sketch-Based Tutoring Systems},
 booktitle = {Proceedings of the 2011 Intelligent User Interfaces Workshop on Sketch Recognition (Palo Alto, CA, USA, 2011)},
 year = {2011},
 month = {February 13},
 address = {Palo Alto, CA},
 publisher = {ASEE Conferences},
 note = {4 pages}
}

@inproceedings{Wolin:2011:CCM:2021164.2021185,
 author = {Wolin, Aaron and Field, Martin and Hammond, Tracy},
 title = {Combining Corners from Multiple Segmenters},
 booktitle = {Proceedings of the Eighth Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '11},
 year = {2011},
 isbn = {978-1-4503-0906-6},
 location = {Vancouver, British Columbia, Canada},
 pages = {117--124},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/2021164.2021185},
 doi = {10.1145/2021164.2021185},
 acmid = {2021185},
 publisher = {ACM},
 address = {New York, NY, USA}
} 

@article{Costagliola:2010:EJS:1752259.1752502,
 author = {Costagliola, Gennaro and Hammond, Tracy and Plimmer, Beryl},
 title = {Editorial: JVLC Special Issue on Sketch Computation},
 journal = {J. Vis. Lang. Comput.},
 issue_date = {April, 2010},
 volume = {21},
 number = {2},
 month = apr,
 year = {2010},
 issn = {1045-926X},
 pages = {67--68},
 numpages = {2},
 url = {http://dx.doi.org/10.1016/j.jvlc.2010.01.003},
 doi = {10.1016/j.jvlc.2010.01.003},
 acmid = {1752502},
 publisher = {Academic Press, Inc.},
 address = {Orlando, FL, USA}
} 

@INPROCEEDINGS{david2010coske,
 author = {David, Jessica and Eoff, Brian and Hammond, Tracy},
 booktitle = {Computer Supported Cooperative Work Posters (CSCW)},
 title = {CoSke-An Exploration in Collaborative Sketching},
 year = {2010},
 pages = {471–472},
 month = {February 6–10},
 address = {Savannah, GA}
}

@inproceedings{Dixon:2010:IUS:1753326.1753459,
 author = {Dixon, Daniel and Prasad, Manoj and Hammond, Tracy},
 title = {iCanDraw: Using Sketch Recognition and Corrective Feedback to Assist a User in Drawing Human Faces},
 booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
 series = {CHI '10},
 year = {2010},
 isbn = {978-1-60558-929-9},
 location = {Atlanta, Georgia, USA},
 pages = {897--906},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1753326.1753459},
 doi = {10.1145/1753326.1753459},
 acmid = {1753459},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {assistive and corrective feedback, computer-aided instruction, pen-input computing, sketch recognition}
} 

@inproceedings{Hammond2010IAAI,
 author = {Hammond, Tracy and Logsdon, Drew and Paulson, Brandon and Johnston, Joshua  and Peschel, Joshua 
   and Wolin, Aaron  and Taele, Paul},
 title = {A Sketch Recognition System for Recognizing Free-Hand Course of Action Diagrams},
 booktitle = {Innovative Applications of Artificial Intelligence},
 year = {2010},
 pages = {1781--1786},
 month = {July 11--15},
 address = {Atlanta, GA},
 keywords = {course of action diagrams; sketch recognition; pen input computing},
 url = {http://www.aaai.org/ocs/index.php/IAAI/IAAI10/paper/view/1581}
}

@inproceedings{Hammond:2010:CPL:1858171.1858197,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Creating the Perception-based LADDER Sketch Recognition Language},
 booktitle = {Proceedings of the 8th ACM Conference on Designing Interactive Systems},
 series = {DIS '10},
 year = {2010},
 isbn = {978-1-4503-0103-9},
 location = {Aarhus, Denmark},
 pages = {141--150},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1858171.1858197},
 doi = {10.1145/1858171.1858197},
 acmid = {1858197},
 publisher = {ACM},
 address = {New York, NY, USA}
} 

@inproceedings{Hammond:2010:SDS:1753846.1754184,
 author = {Hammond, Tracy and Lank, Edward and Adler, Aaron},
 title = {SkCHI: Designing Sketch Recognition Interfaces},
 booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '10},
 year = {2010},
 isbn = {978-1-60558-930-5},
 location = {Atlanta, Georgia, USA},
 pages = {4501--4504},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/1753846.1754184},
 doi = {10.1145/1753846.1754184},
 acmid = {1754184},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {pen computing, pen-input computing, sketch computing, sketch interfaces, sketch recognition, tablet pc}
} 

@inproceedings{Hammond:2010:SRI:1753846.1754128, 
 author = {Hammond, Tracy and Logsdon, Drew and Peschel, Joshua and Johnston, Joshua and Taele, Paul and 
   Wolin, Aaron and Paulson, Brandon},
 title = {A Sketch Recognition Interface That Recognizes Hundreds of Shapes in Course-of-action Diagrams},
 booktitle = {CHI '10 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '10},
 year = {2010},
 isbn = {978-1-60558-930-5},
 location = {Atlanta, Georgia, USA},
 pages = {4213--4218},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/1753846.1754128},
 doi = {10.1145/1753846.1754128},
 acmid = {1754128},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {course-of-action diagrams, pen-based input, sketch recognition}
}

@inproceedings{Hammond:2010:ALD:1894345.1894383,
 author = {Hammond, Tracy and Prasad, Manoj and Dixon, Daniel},
 title = {Art 101: Learning to Draw Through Sketch Recognition},
 booktitle = {Proceedings of the 10th International Conference on Smart Graphics},
 series = {SG'10},
 year = {2010},
 isbn = {3-642-13543-9, 978-3-642-13543-9},
 location = {Banff, Canada},
 pages = {277--280},
 numpages = {4},
 url = {http://dl.acm.org/citation.cfm?id=1894345.1894383},
 acmid = {1894383},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg},
 keywords = {face recognition, sketch recognition, spatial cognition}
} 

@inproceedings{Johnston:2010:CCV:1923363.1923376,
 author = {Johnston, J. and Hammond, T.},
 title = {Computing Confidence Values for Geometric Constraints for Use in Sketch Recognition},
 booktitle = {Proceedings of the Seventh Sketch-Based Interfaces and Modeling Symposium},
 series = {SBIM '10},
 year = {2010},
 isbn = {978-3-905674-25-5},
 location = {Annecy, France},
 pages = {71--78},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=1923363.1923376},
 acmid = {1923376},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}

@INPROCEEDINGS{miller2010wiiolin
 author = {Miller, Jace and Hammond, Tracy},
 booktitle = {Proceedings of the 2010 Conference on New Interfaces for Musical Expression (NIME)},
 title = {Wiiolin: A Virtual Instrument Using the Wii Remote},
 year = {2010},
 pages = {497–500},
 month = {June 15-18},
 address = {Sydney, Australia},
 publisher = {}
}

@inproceedings{rajan2010evaluation,
 title = {Evaluation of Paper-Pen based Sketching Interface.},
 author = {Rajan, Pankaj and Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the 16th International Conference on Distributed Multimedia Systems (DMS)},
 pages = {321-326},
 year = {2010}
}

@INPROCEEDINGS{taele2010No,
 author = {Taele, Paul and Dixon, Daniel and Hammond, Tracy},
 booktitle = {SkCHI: Designing Sketch Recognition Interfaces, A CHI 2010 Workshop},
 title = {Telling the User, “No”: Sketch Recognition for Improving Sketch Technique},
 year = {2010},
 month = {April 10},
 address = {Atlanta, GA},
 notes = {4 pages}
}

@article{Taele:2010:LSR:1752259.1752505,
 author = {Taele, Paul and Hammond, Tracy},
 title = {LAMPS: A Sketch Recognition-based Teaching Tool for Mandarin Phonetic Symbols I},
 journal = {J. Vis. Lang. Comput.},
 issue_date = {April, 2010},
 volume = {21},
 number = {2},
 month = apr,
 year = {2010},
 issn = {1045-926X},
 pages = {109--120},
 numpages = {12},
 url = {http://dx.doi.org/10.1016/j.jvlc.2009.12.004},
 doi = {10.1016/j.jvlc.2009.12.004},
 acmid = {1752505},
 publisher = {Academic Press, Inc.},
 address = {Orlando, FL, USA},
 keywords = {Bopomofo, Chinese, Sketch recognition}
} 

@inproceedings{Bhat:2009:UED:1661445.1661669,
 author = {Bhat, Akshay and Hammond, Tracy},
 title = {Using Entropy to Distinguish Shape Versus Text in Hand-drawn Diagrams},
 booktitle = {Proceedings of the 21st International Jont Conference on Artifical Intelligence},
 series = {IJCAI'09},
 year = {2009},
 location = {Pasadena, California, USA},
 pages = {1395--1400},
 numpages = {6},
 url = {http://dl.acm.org/citation.cfm?id=1661445.1661669},
 acmid = {1661669},
 publisher = {Morgan Kaufmann Publishers Inc.},
 address = {San Francisco, CA, USA}
}

@inproceedings{corey2009IUI,
 title = {Sketch Off: A Sketch Recognition Competition},
 author = {Corey, Paul and Eoff, Brian and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}

@article{dixon2009icandraw,
 title = {i{C}an{D}raw?: A Methodology for Using Assistive Sketch Recognition to Improve a User's Drawing Ability},
 author = {Dixon, Daniel and Prasad, Manoj and Hammond, Tracy},
 booktitle = {ACM Symposium on User Interface Software and Technology (UIST) Posters},
 year = {2009}, 
 address = {Vancouver, Canada},
 month = {10},
 organization = {ACM},
 notes = {2 pages}
}

@inproceedings{Eoff:2009:DIC:1555880.1555916,
 author = {Eoff, Brian David and Hammond, Tracy},
 title = {Who Dotted That 'I'?: Context Free User Differentiation Through Pressure and Tilt Pen Data},
 booktitle = {Proceedings of Graphics Interface 2009},
 series = {GI '09},
 year = {2009},
 isbn = {978-1-56881-470-4},
 location = {Kelowna, British Columbia, Canada},
 pages = {149--156},
 numpages = {8},
 url = {http://dl.acm.org/citation.cfm?id=1555880.1555916},
 acmid = {1555916},
 publisher = {Canadian Information Processing Society},
 address = {Toronto, Ont., Canada, Canada}
} 

@inproceedings{Hammond:2009:RIS:1555880.1555917,
 author = {Hammond, Tracy A. and Davis, Randall},
 title = {Recognizing Interspersed Sketches Quickly},
 booktitle = {Proceedings of Graphics Interface 2009},
 series = {GI '09},
 year = {2009},
 isbn = {978-1-56881-470-4},
 location = {Kelowna, British Columbia, Canada},
 pages = {157--166},
 numpages = {10},
 url = {http://dl.acm.org/citation.cfm?id=1555880.1555917},
 acmid = {1555917},
 publisher = {Canadian Information Processing Society},
 address = {Toronto, Ont., Canada, Canada}
}

@inproceedings{Hammond:2009:IWS:1502650.1502736,
 author = {Hammond, Tracy Anne},
 title = {IUI'09 Workshop Summary: Sketch Recognition},
 booktitle = {Proceedings of the 14th International Conference on Intelligent User Interfaces},
 series = {IUI '09},
 year = {2009},
 isbn = {978-1-60558-168-2},
 location = {Sanibel Island, Florida, USA},
 pages = {501--502},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1502650.1502736},
 doi = {10.1145/1502650.1502736},
 acmid = {1502736},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {cad, document processing, pen input computing, sketch recognition, sketch understanding, sketching}
} 

@inproceedings{hammond2009eurographics,
 title = {Eurographics Tutorial on Sketch Recognition},
 author = {Hammond, Tracy and Paulson, Brandon and Eoff, Brian},
 booktitle = {Eurographics 2009-Tutorials},
 year = {2009},
 address = {Munich, Germany},
 month = 3,
 organization = {The Eurographics Association},
 notes    = {4 pages}
}

@inproceedings{kaster2009sssousa,
 title = {SSSOUSA: Automatically Generating Secure and Searchable Data Collection Studies},
 author = {Kaster, Brandon L and Jacobson, Emily R and Hammond, Tracy A},
 booktitle = {International workshop on visual languages and computing. Redwood City, CA, USA: VLC},
 year = {2009},
 address = {Redwood City, CA},
 month = 9,
 organization = {DMS},
 notes = {6 pages}
}

@inproceedings{paulson2009IUI,
 title = {Towards a Framework for Truly Natural Low-level Sketch Recognition},
 author = {Paulson, Brandon and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}

@inproceedings{Peschel:2009:SPI:1640233.1640338,
 author = {Peschel, Joshua M. and Paulson, Brandon and Hammond, Tracy},
 title = {A Surfaceless Pen-based Interface},
 booktitle = {Proceedings of the Seventh ACM Conference on Creativity and Cognition},
 series = {C\&\#38;C '09},
 year = {2009},
 isbn = {978-1-60558-865-0},
 location = {Berkeley, California, USA},
 pages = {433--434},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1640233.1640338},
 doi = {10.1145/1640233.1640338},
 acmid = {1640338},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {pen-based technology, surfaceless user interface}
}

@inproceedings{rajan2009IUI,
 title = {Applying Online Sketch Recognition Algorithms to a Scanned-In Sketch},
 author = {Rajan, Pankaj and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {3 pages}
}

@inproceedings{Shahzad2009IUI,
 title = {Urdu Qaeda: Recognition System for Isolated Urdu Characters},
 author = {Shahzad, Nabeel and Paulson, Brandon and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes    = {4 pages}
}

@inproceedings{taele2009hashigo,
 title = {Hashigo: A Next-Generation Sketch Interactive System for Japanese Kanji},
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the Twenty-First Innovative Applications of Artificial Intelligence Conference (IAAI)},
 year = {2009},
 address = {Pasadena, CA},
 month = 7,
 organization = {AAAI},
 pages = {153–158}
}

@inproceedings{taele2009interactive,
 title = {A Sketch Interactive Approach to Computer-Assisted Biology Instruction},
 author = {Taele, Paul and Peschel, Joshua and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent User
   Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {2 pages}
}

@inproceedings{wolin2009mobilesketch,
 title = {Search Your Mobile Sketch: Improving the Ratio of Interaction to Information on Mobile Devices},
 author = {Wolin, Aaron and Eoff, Brian and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {4 pages}
}

@inproceedings{Wolin:2009:SMR:1572741.1572758,
 author = {Wolin, A. and Paulson, B. and Hammond, T.},
 title = {Sort, Merge, Repeat: An Algorithm for Effectively Finding Corners in Hand-sketched Strokes},
 booktitle = {Proceedings of the 6th Eurographics Symposium on Sketch-Based Interfaces and Modeling},
 series = {SBIM '09},
 year = {2009},
 isbn = {978-1-60558-602-1},
 location = {New Orleans, Louisiana},
 pages = {93--99},
 numpages = {7},
 url = {http://doi.acm.org/10.1145/1572741.1572758},
 doi = {10.1145/1572741.1572758},
 acmid = {1572758},
 publisher = {ACM},
 address = {New York, NY, USA},
}

@inproceedings{zhu2009IUI,
 title = {RingEdit: A Control Point Based Editing Approach in Sketch Recognition Systems},
 author = {Zhu, Yuxiang and Johnston, Joshua and Hammond, Tracy},
 booktitle = {Proceedings of the Workshop on Sketch Recognition at the 14th International Conference of Intelligent 
   User Interfaces Posters (IUI)},
 year = {2009},
 address = {Sanibel, FL},
 month = 2,
 organization = {ACM},
 notes = {6 pages}
}

@inproceedings{Zomeren:2009:HIO:1514095.1514153,
 author = {Zomeren, Maarten van and Peschel, Joshua M. and Mann, Timothy and Knezek, Gabe and 
   Doebbler, James and Davis, Jeremy and Hammond, Tracy A. and Oomes, Augustinus H.J. and Murphy, Robin R.},
 title = {Human-robot Interaction Observations from a Proto-study Using SUAVs for Structural Inspection},
 booktitle = {Proceedings of the 4th ACM/IEEE International Conference on Human Robot Interaction},
 series = {HRI '09},
 year = {2009},
 isbn = {978-1-60558-404-1},
 location = {La Jolla, California, USA},
 pages = {235--236},
 numpages = {2},
 url = {http://doi.acm.org/10.1145/1514095.1514153},
 doi = {10.1145/1514095.1514153},
 acmid = {1514153},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {(s)UAV, human robot interaction, interface, rescue robotics}
}

@book{hammond2008Tablet,
 author = {Berque, D. and Evans, E. and Hammond, T. and Mock, K. and Payton, M. and Sweeny, D}, 
 title = {Tablet PCs in K-12 Education: No More Blank Slates},
 publisher = {International Society for Technology in Education, ISTE},
 year = {2008},
 isbn = {1564842428}
}

@inproceedings{Choi:2008:SRB:1620270.1620353,
 author = {Choi, Heeyoul and Hammond, Tracy},
 title = {Sketch Recognition Based on Manifold Learning},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1786--1787},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620353},
 acmid = {1620353},
 publisher = {AAAI Press}
} 

@inproceedings{Choi:2008:GRB:1485797.1485832,
 author = {Choi, Heeyoul and Paulson, Brandon and Hammond, Tracy},
 title = {Gesture Recognition Based on Manifold Learning},
 booktitle = {Proceedings of the 2008 Joint IAPR International Workshop on Structural, Syntactic, and Statistical 
   Pattern Recognition},
 series = {SSPR \& SPR '08},
 year = {2008},
 isbn = {978-3-540-89688-3},
 location = {Orlando, Florida},
 pages = {247--256},
 numpages = {10},
 url = {http://dx.doi.org/10.1007/978-3-540-89689-0_29},
 doi = {10.1007/978-3-540-89689-0_29},
 acmid = {1485832},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg},
 keywords = {Kernel Isomap, Manifold Learning, Sketch Recognition}
} 

@inproceedings{Corey:2008:GCG:1620270.1620354,
 author = {Corey, Paul and Hammond, Tracy},
 title = {GLADDER: Combining Gesture and Geometric Sketch Recognition},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1788--1789},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620354},
 acmid = {1620354},
 publisher = {AAAI Press}
} 

@inproceedings{Dahmen:2008:DSS:1620270.1620355,
 author = {Dahmen, Katie and Hammond, Tracy},
 title = {Distinguishing Between Sketched Scribble Look Alikes},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1790--1791},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620355},
 acmid = {1620355},
 publisher = {AAAI Press}
}

@inproceedings{Eoff:2008:UIM:1620270.1620357,
 author = {Eoff, Brian David and Hammond, Tracy},
 title = {User Identification by Means of Sketched Stroke Features},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1794--1795},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620357},
 acmid = {1620357},
 publisher = {AAAI Press}
}

@INPROCEEDINGS{Hammond4720505,
 author = {T. Hammond},
 booktitle = {Frontiers in Education Conference, 2008. FIE 2008. 38th Annual},
 title = {Workshop - integrating sketch recognition technologies into your classroom},
 year = {2008},
 pages = {W2C-1—W2C-1},
 keywords = {CAD; computer aided instruction; diagrams; active learning; automated assessment; graphical diagrams; 
   hand-sketched student diagrams; immediate feedback; sketch recognition technologies; active learning; automated
    assessment; pen-input; sketch recognition; tablet computers},
 doi = {10.1109/FIE.2008.4720505},
 ISSN = {0190-5848},
 month = {Oct}
}

@inproceedings{Hammond:2008:FRP:1358628.1358802,
 author = {Hammond, Tracy and Eoff, Brian and Paulson, Brandon and Wolin, Aaron and Dahmen, 
   Katie and Johnston, Joshua and Rajan, Pankaj},
 title = {Free-sketch Recognition: Putting the Chi in Sketching},
 booktitle = {CHI '08 Extended Abstracts on Human Factors in Computing Systems},
 series = {CHI EA '08},
 year = {2008},
 isbn = {978-1-60558-012-8},
 location = {Florence, Italy},
 pages = {3027--3032},
 numpages = {6},
 url = {http://doi.acm.org/10.1145/1358628.1358802},
 doi = {10.1145/1358628.1358802},
 acmid = {1358802},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {free-sketch, ladder, multimodal interaction, paleosketch, pen input, shortstraw, 
   sketch recognition, tablet pc}
} 

@inproceedings{Paulson:2008:SEG:1463689.1463739,
 author = {Paulson, Brandon and Eoff, Brian and Wolin, Aaron and Johnston, Joshua and Hammond, Tracy},
 title = {Sketch-based Educational Games: Drawing Kids Away from Traditional Interfaces},
 booktitle = {Proceedings of the 7th International Conference on Interaction Design and Children},
 series = {IDC '08},
 year = {2008},
 isbn = {978-1-59593-994-4},
 location = {Chicago, Illinois},
 pages = {133--136},
 numpages = {4},
 url = {http://doi.acm.org/10.1145/1463689.1463739},
 doi = {10.1145/1463689.1463739},
 acmid = {1463739},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {LADDER, PaleoSketch, automated feedback, educational games, sketch recognition}
}

@inproceedings{Paulson:2008:OAR:1531826.1531845,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {Office Activity Recognition Using Hand Posture Cues},
 booktitle = {Proceedings of the 22Nd British HCI Group Annual Conference on People and Computers: Culture, 
   Creativity, Interaction - Volume 2},
 series = {BCS-HCI '08},
 year = {2008},
 isbn = {978-1-906124-06-9},
 location = {Liverpool, United Kingdom},
 pages = {75--78},
 numpages = {4},
 url = {http://dl.acm.org/citation.cfm?id=1531826.1531845},
 acmid = {1531845},
 publisher = {British Computer Society},
 address = {Swinton, UK, UK},
 keywords = {CyberGlove, activity recognition, context-aware, hand posture, office environment, wearable computing}
} 

@inproceedings{Paulson:2008:PAP:1378773.1378775,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {PaleoSketch: Accurate Primitive Sketch Recognition and Beautification},
 booktitle = {Proceedings of the 13th International Conference on Intelligent User Interfaces},
 series = {IUI '08},
 year = {2008},
 isbn = {978-1-59593-987-6},
 location = {Gran Canaria, Spain},
 pages = {1--10},
 numpages = {10},
 url = {http://doi.acm.org/10.1145/1378773.1378775},
 doi = {10.1145/1378773.1378775},
 acmid = {1378775},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {low-level processing, pen-based interfaces, shape beautification, sketch recognition}
} 

@article{Paulson2008,
 author = {Paulson, Brandon and Hammond, Tracy},
 title = {{MARQS}: retrieving sketches learned from a single example using a dual-classifier},
 journal = {Journal on Multimodal User Interfaces},
 year = {2008},
 volume = {2},
 number = {1},
 pages = {3–11},
 issn = {1783-8738},
 doi = {10.1007/s12193-008-0006-0},
 url = {http://dx.doi.org/10.1007/s12193-008-0006-0}
}

@inproceedings{paulson2008HCC,
 title = {What!?! No Rubine Features?: Using Geometric-Based Features to Produce Normalized Confidence Values 
   for Sketch Recognition},
 author = {Paulson, Brandon and Rajan, Pankaj and Davalos, Pedro and Gutierrez-Osuna, Ricardo and Hammond, Tracy},
 booktitle = {HCC Workshop: Sketch Tools for Diagramming (VL/HCC)},
 year = {2008},
 address = {Herrsching am Ammersee, Germany},
 month = 9,
 organization = {VL/HCC},
 pages = {57—63}
}

@inproceedings{Paulson:2008:SSO:2386301.2386316,
 author = {Paulson, B. and Wolin, A. and Johnston, J. and Hammond, T.},
 title = {{SOUSA}: Sketch-based Online User Study Applet},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {81--88},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/081-088},
 doi = {10.2312/SBM/SBM08/081-088},
 acmid = {2386316},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
}

@inproceedings{peschel2008STRAT,
 title = {STRAT: A Sketched-Truss Recognition and Analysis Tool},
 author = {Peschel, Joshua M and Hammond, Tracy Anne},
 booktitle = {2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International 
   Conference on distributed Multimedia Systems (DMS)},
 year = {2008},
 address = {Boston, MA},
 month = 9,
 organization = {Knowledge Systems Instistute},
 pages = {282–287}
}

@inproceedings{Plimmer:2008:GSS:1432522.1432529,
 author = {Plimmer, Beryl and Hammond, Tracy},
 title = {Getting Started with Sketch Tools},
 booktitle = {Proceedings of the 5th International Conference on Diagrammatic Representation and Inference},
 series = {Diagrams '08},
 year = {2008},
 isbn = {978-3-540-87729-5},
 location = {Herrsching, Germany},
 pages = {9--12},
 numpages = {4},
 url = {http://dx.doi.org/10.1007/978-3-540-87730-1_5},
 doi = {10.1007/978-3-540-87730-1_5},
 acmid = {1432529},
 publisher = {Springer-Verlag},
 address = {Berlin, Heidelberg}
} 

@inproceedings{Plimmer:2008:WST:1549823.1550035,
 author = {Plimmer, Beryl and Hammond, Tracy},
 title = {Workshop on Sketch Tools for Diagramming},
 booktitle = {Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing},
 series = {VLHCC '08},
 year = {2008},
 isbn = {978-1-4244-2528-0},
 pages = {4--},
 url = {http://dx.doi.org/10.1109/VLHCC.2008.4639047},
 doi = {10.1109/VLHCC.2008.4639047},
 acmid = {1550035},
 publisher = {IEEE Computer Society},
 address = {Washington, DC, USA}
} 

@inproceedings{Rajan:2008:PME:2386301.2386309,
 author = {Rajan, Pankaj and Hammond, T.},
 title = {From Paper to Machine: Extracting Strokes from Images for Use in Sketch Recognition},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {41--48},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/041-048},
 doi = {10.2312/SBM/SBM08/041-048},
 acmid = {2386309},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
} 

@inproceedings{Taele:2008:UGS:1620270.1620376,
 author = {Taele, Paul and Hammond, Tracy},
 title = {Using a Geometric-based Sketch Recognition Approach to Sketch Chinese Radicals},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1832--1833},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620376},
 acmid = {1620376},
 publisher = {AAAI Press}
} 

@inproceedings{taele2008HCC,
 title = {Chinese Characters as Sketch Diagrams Using a Geometric-Based Approach},
 author = {Taele, Paul and Hammond, Tracy},
 booktitle = {Proceedings of the 2008 IEEE Symposium on Visual Languages and Human-Centric Computing 
   (VL/HCC) Workshop on Sketch Tools for Diagramming},
 year = {2008},
 address = {Herrsching am Ammersee, Germany},
 month = 9,
 organization = {VL/HCC},
 pages = {74–82}
}

@inproceedings{taele2008geometric,
 title = {A Geometric-Based Sketch Recognition Approach for Handwritten Mandarin Phonetic Symbols {I}},
 author = {Taele, Paul and Hammond, Tracy Anne},
 booktitle = {2008 International Workshop on Visual Languages and Computing (VLC) at the 14th International 
   Conference on distributed Multimedia Systems (DMS)},
 year = {2008},
 address = {Boston, MA},
 month = 9,
 organization = {Knowledge Systems Instistute},
 note = {6 pages}
}

@inproceedings{Wolin:2008:SSE:2386301.2386308,
 author = {Wolin, A. and Eoff, B. and Hammond, T.},
 title = {ShortStraw: A Simple and Effective Corner Finder for Polylines},
 booktitle = {Proceedings of the Fifth Eurographics Conference on Sketch-Based Interfaces and Modeling},
 series = {SBM'08},
 year = {2008},
 isbn = {978-3-905674-07-1},
 location = {Annecy, France},
 pages = {33--40},
 numpages = {8},
 url = {http://dx.doi.org/10.2312/SBM/SBM08/033-040},
 doi = {10.2312/SBM/SBM08/033-040},
 acmid = {2386308},
 publisher = {Eurographics Association},
 address = {Aire-la-Ville, Switzerland, Switzerland}
} 

@inproceedings{Wolin:2008:EFP:1620270.1620378,
 author = {Wolin, Aaron and Paulson, Brandon and Hammond, Tracy},
 title = {Eliminating False Positives During Corner Finding by Merging Similar Segments},
 booktitle = {Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 3},
 series = {AAAI'08},
 year = {2008},
 isbn = {978-1-57735-368-3},
 location = {Chicago, Illinois},
 pages = {1836--1837},
 numpages = {2},
 url = {http://dl.acm.org/citation.cfm?id=1620270.1620378},
 acmid = {1620378},
 publisher = {AAAI Press}
}

@inproceedings{hammond2007Brown,
 title = {Sketch Recognition at Texas A&M University},
 author = {Hammond, Tracy},
 booktitle = {Brown Workshop on Pen-Centric Computing},
 year = 2007,
 address = {Providence, RI},
 month = 3,
 organization = {Eurographics},
 note = {6 pages}
}

@INPROCEEDINGS{hammond2007FIE,
 author = {T. Hammond},
 booktitle = {Frontiers In Education Conference - Global Engineering: Knowledge Without Borders, Opportunities Without
   Passports, 2007. FIE '07. 37th Annual},
 title = {Enabling instructors to develop sketch recognition applications for the classroom},
 year = {2007},
 pages = {S3J-11-S3J-16},
 keywords = {computer aided instruction; computer animation; image recognition; user interfaces; GUILD; LADDER; 
   graphical diagrams; hand sketching; sketch recognition; user interfaces; Animation; Application software; Automata; 
   Computer science; Computer science education; Design automation; Educational institutions; Feedback; Programming 
   profession; Watches; active learning; pen-input; sketch recognition},
 doi = {10.1109/FIE.2007.4417930},
 ISSN = {0190-5848},
 month = {Oct}
}

@inproceedings{hammond2007GHC,
 title = {Simplifying Sketch Recognition {UI} Development},
 author = {Hammond, Tracy},
 booktitle = {Grace Hopper Celebration of Women in Computing},
 year = 2007,
 address = {Orlando, FL},
 month = 10,
 organization = {GHC},
 note = {5 pages}
}

@inproceedings{hammond2007EG,
 title = {Recognizing Free-Form Hand-Sketched Constraint Network Diagrams by Combining Geometry and Context},
 author = {Hammond, Tracy and O'Sullivan, Barry},
 booktitle = {Proceedings of the Eurographics Ireland},
 year = 2007,
 pages = {67—74},
 address = {Dublin, Ireland},
 month = 12,
 organization = {Eurographics}
}

@inproceedings{paulson2007uist,
 title = {A System for Recognizing and Beautifying Low-Level Sketch Shapes Using NDDE and DCR},
 author = {Paulson, Brandon and Hammond, Tracy},
 booktitle = {ACM Symposium on User Interface Software and Technology (UIST)},
 year = 2007,
 address = {Newport Rhode Island},
 month = 10,
 organization = {ACM},
 note = {2 pages}
}

@inproceedings{Hammond:2006:ILS:1111449.1111495,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Interactive Learning of Structural Shape Descriptions from Automatically Generated Near-miss Examples},
 booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
 series = {IUI '06},
 year = {2006},
 isbn = {1-59593-287-9},
 location = {Sydney, Australia},
 pages = {210--217},
 numpages = {8},
 url = {http://doi.acm.org/10.1145/1111449.1111495},
 doi = {10.1145/1111449.1111495},
 acmid = {1111495},
 publisher = {ACM},
 address = {New York, NY, USA},
 keywords = {active learning, ladder, near-miss, shape description, sketch recognition, structural description, user interfaces}
} 

@article{hammond2005ladder,
 title = {LADDER, a sketching language for user interface developers},
 author = {Hammond, Tracy and Davis, Randall},
 journal = {Computers \& Graphics},
 volume = {29},
 number = {4},
 pages = {518--532},
 year = {2005},
 publisher = {Elsevier}
}

@inproceedings{hammond2004automatically,
 title = {Automatically generating sketch interfaces from shape descriptions},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the 4th Annual MIT Student Oxygen Workshop},
 year = {2004},
 publisher = {MIT},
 note = {4 pages}
}

@inproceedings{Hammond:2004:ATS:1597148.1597222,
 author = {Hammond, Tracy and Davis, Randall},
 title = {Automatically Transforming Symbolic Shape Descriptions for Use in Sketch Recognition},
 booktitle = {Proceedings of the 19th National Conference on Artifical Intelligence},
 series = {AAAI'04},
 year = {2004},
 isbn = {0-262-51183-5},
 location = {San Jose, California},
 pages = {450--456},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=1597148.1597222},
 acmid = {1597222},
 publisher = {AAAI Press}
} 

@inproceedings{hammond2004shady,
 title = {Shady: A Shape Description Debugger for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {AAAI Fall Symposium on Making Pen-Based Interaction Intelligent and Natural (AAAI)},
 year = 2004,
 address = {Arlington, VA},
 month = 10,
 organization = {AAAI},
 isbn = {978-1-57735-217-4},
 note = {7 pages}
}

@inproceedings{hammond2004Debug,
 title = {Debugging Shape Definitions for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT}, 
 note = {2 pages}
}

@inproceedings{hammond2004Ladder,
 title = {LADDER: A Sketch Recognition Language},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}

@inproceedings{hammond2004testing,
 title = {Testing Shape Descrpitions by Automatically Translating them for Use in Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract},
 year = 2004,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}

@inproceedings{Hammond:2003:LLD:1630659.1630728,
 author = {Hammond, Tracy and Davis, Randall},
 title = {LADDER: A Language to Describe Drawing, Display, and Editing in Sketch Recognition},
 booktitle = {Proceedings of the 18th International Joint Conference on Artificial Intelligence},
 series = {IJCAI'03},
 year = {2003},
 location = {Acapulco, Mexico},
 pages = {461--467},
 numpages = {7},
 url = {http://dl.acm.org/citation.cfm?id=1630659.1630728},
 acmid = {1630728},
 publisher = {Morgan Kaufmann Publishers Inc.},
 address = {San Francisco, CA, USA}
}

@inproceedings{hammond2002Tahuti,
 title = {Tahuti: A Geometrical Sketch Recognition System for UML Class Diagrams},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {Technical Report SS-02-08: Papers from the 2002 Association for the Advancement of Artificial Intelligence
   (AAAI) Spring Symposium on Sketch Understanding},
 year = {2002},
 address = {Menlo Park, CA},
 month = 7,
 organization = {AAAI},
 note = {8 pages}
}

@inproceedings{hammond2002domain,
 title = {A Domain Description Language for Sketch Recognition},
 author = {Hammond, Tracy and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}

@inproceedings{hammond2002agent,
 title = {An Agent-Based System for Capturing and Indexing Software Design Meetings},
 author = {Hammond, Tracy and Gajos, Krzysztof and Davis, Randall and Shrobe, Howard},
 booktitle = {Proceedings of International Workshop on Agents In Design, WAID},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {18 pages}
}

@inproceedings{hammond2002sketch,
 title = {Sketch Recognition in Software Design},
 author = {Hammond, Tracy and Gajos, Krzysztof and Davis, Randall and Shrobe, Howard},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}

@inproceedings{hammond2002FIE,
 title = {Gender-Based Underrepresentation in Computer Science and Related Disciplines},
 author = {Hammond, Tracy and Hammond, Jan},
 booktitle = {Frontiers in Education. FIE 2002. 32nd Annual},
 year = {2002},
 number = {2},
 address = {Cambridge, MA},
 month = 7,
 organization = {IEEE},
 isbn = {0-7803-7444-4},
 note = {6 pages}
}

@inproceedings{hammond2002natural,
 title = {Natural Editing and Recognition of UML Class Diagrams},
 author = {Hammond, Tracy and Oshiro, Kalani and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2002,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {2 pages}
}

@inproceedings{hammond2002SOWMulti,
 title = {Multi-{D}omain Sketch Recognition},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the 2nd Annual MIT Student Oxygen Workshop},
 year = {2002},
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {4 pages}
}

@inproceedings{alvarado2001framework,
 title = {A Framework for Multi-Domain Sketch Recognition},
 author = {Alvarado, Christine and Sezgin, Tevfik Metin and Scott, Dana and Hammond, Tracy and Kasheff, Zardosht
   and Oltmans, Michael and Davis, Randall},
 booktitle = {MIT Lab Abstract. Artificial Intelligence Laboratory},
 year = 2001,
 address = {Cambridge, MA},
 month = 9,
 organization = {MIT},
 note = {3 pages}
}

@inproceedings{hammond2001natural,
 title = {Natural Sketch Recognition in UML Class Diagrams},
 author = {Tracy Hammond},
 booktitle = {Proceedings of the MIT Student Oxygen Workshop},
 year = {2001},
 address = {Gloucester, MA},
 month = 7,
 organization = {MIT},
 note = {3 pages}
}

@article{Raghavan2001Hindu,
 author = {Parthasarthy, Raghavan and Hammond, Tracy},
 title = {Technical Innovation: Options for Developing Countries},
 journal = {The Hindu},
 year = {2001},
 month = {5},
 note = {3 pages},
 address = {India}
}