Bild von Institut mit Unilogo
home uni uni suche suche sitemap sitemap kontakt kontakt
unilogo Universität Stuttgart

Abteilung Visualisierung und Interaktive Systeme : Veröffentlichungen

Bibliographie BibTeX

suche englishicon
 
@inproceedings {INPROC-2014-81,
   author = {Stefan Schneegass and Frank Steimle and Andreas Bulling and Florian Alt and Albrecht Schmidt},
   title = {{SmudgeSafe: Geometric Image Transformations for Smudge-resistant User Authentication}},
   booktitle = {Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {775--786},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2014},
   isbn = {978-1-4503-2968-2},
   doi = {10.1145/2632048.2636090},
   keywords = {finger smudge traces, graphical passwords, touch input},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.6.5 Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Touch-enabled user interfaces have become ubiquitous, such as on ATMs or portable devices. At the same time, authentication using touch input is problematic, since finger smudge traces may allow attackers to reconstruct passwords. We present SmudgeSafe, an authentication system that uses random geometric image transformations, such as translation, rotation, scaling, shearing, and flipping, to increase the security of cued-recall graphical passwords. We describe the design space of these transformations and report on two user studies: A lab-based security study involving 20 participants in attacking user-defined passwords, using high quality pictures of real smudge traces captured on a mobile phone display; and an in-the-field usability study with 374 participants who generated more than 130,000 logins on a mobile phone implementation of SmudgeSafe. Results show that SmudgeSafe significantly increases security compared to authentication schemes based on PINs and lock patterns, and exhibits very high learnability, efficiency, and memorability.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-81&engl=0}
}
@inproceedings {INPROC-2010-66,
   author = {Bernhard Schmitz},
   title = {{The ViibraCane - A White Cane for Tactile Navigation Guidance}},
   booktitle = {Proceedings of the California State University, Northridge Center on Disabilities' 25th Annual International Technology and Persons with Disabilities Conference (CSUN 2010)},
   address = {San Diego},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2010},
   language = {Deutsch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-66/INPROC-2010-66.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The ViibraCane, an extension to the navigation system TANIA, is a white cane that can be used as a tactile guiding device for navigation tasks.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-66&engl=0}
}
@inproceedings {INPROC-2010-64,
   author = {Bernhard Schmitz and Thomas Ertl},
   title = {{Making Digital Maps Accessible Using Vibrations}},
   booktitle = {Computers Helping People with Special Needs. 12th International Conference, ICCHP 2010, Proceedings},
   address = {Heidelberg},
   publisher = {Springer Berlin Heidelberg},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   volume = {6179},
   pages = {100--107},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2010},
   doi = {10.1007/978-3-642-14097-6_18},
   isbn = {978-3-642-14096-9},
   keywords = {Haptic Maps; Vibrating Maps; Blind Users},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-64/INPROC-2010-64.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In order to allow blind and deafblind people to use and explore electronically available maps, we have developed a system that displays maps in a tactile way using a standard rumble gamepad. The system is intended for both on-site and off-site use and therefore includes mechanisms for getting overviews of larger regions as well as for the exploration of small areas.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-64&engl=0}
}
@inproceedings {INPROC-2010-40,
   author = {Julia Moehrmann and Xin Wang and Gunther Heidemann},
   title = {{Motion based situation recognition in group meetings}},
   booktitle = {Proceedings of SPIE Conference on Image Processing: Machine Vision Applications III},
   editor = {David Fofi and Kurt S. Niel},
   address = {San Jose, California, USA},
   publisher = {SPIE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Proceedings of SPIE},
   volume = {7538},
   pages = {75380--75380},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2010},
   doi = {http://dx.doi.org/10.1117/12.838963},
   keywords = {Group meeting recognition; situation recognition; Hidden Markov Model; Motion features},
   language = {Englisch},
   cr-category = {I.5.4 Pattern Recognition Applications,     I.4.6 Image Processing and Computer Vision Segmentation,     I.4.9 Image Processing and Computer Vision Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-40/INPROC-2010-40.pdf},
   contact = {julia.moehrmann@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We present an unobtrusive vision based system for the recognition of situations in group meetings. The system uses a three-stage architecture, consisting of one video processing stage and two classification stages. The video processing stage detects motion in the videos and extracts up to 12 features from this data. The classification stage uses Hidden Markov Models to first identify the activity of every participant in the meeting and afterwards recognize the situation as a whole. The feature extraction uses position information of both hands and the face to extract motion features like speed, acceleration and motion frequency, as well as distance based features. We investigate the discriminative ability of these features and their applicability to the task of interaction recognition. A two-stage Hidden Markov Model classifier is applied to perform the recognition task. The developed system classifies the situation in 94\% of all frames in our video test set correctly, where 3\% of the test data is misclassified due to contradictory behavior of the participants. The results show that unimodal data can be sufficient to recognize complex situations.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-40&engl=0}
}
@inproceedings {INPROC-2010-39,
   author = {Julia Moehrmann and Gunther Heidemann},
   title = {{Automatic trajectory clustering for generating ground truth data sets}},
   booktitle = {Proceedings of SPIE Conference on Image Processing: Machine Vision Applications III},
   editor = {David Fofi and Kurt S. Niel},
   address = {San Jose, California, USA},
   publisher = {SPIE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Proceedings of SPIE},
   volume = {7538},
   pages = {753808--753808},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2010},
   doi = {http://dx.doi.org/10.1117/12.838954},
   keywords = {Hidden Markov Model; HMM based representation; clustering; ground truth data},
   language = {Deutsch},
   cr-category = {I.2.10 Vision and Scene Understanding,     I.4.8 Image Processing and Computer Vision Scene Analysis,     I.5.3 Pattern Recognition Clustering,     I.5.4 Pattern Recognition Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-39/INPROC-2010-39.pdf},
   contact = {julia.moehrmann@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We present a novel approach towards the creation of vision based recognition tasks. A lot of domain specific recognition systems have been presented in the past which make use of the large amounts of available video data. The creation of ground truth data sets for the training of theses systems remains difficult and tiresome. We present a system which automatically creates clusters of 2D trajectories. The results of this clustering can then be used to perform the actual labeling of the data, or rather the selection of events or features of interest by the user. The selected clusters can be used as positive training data for a user defined recognition task {\^a}€“ without the need to adapt the system. The proposed technique reduces the necessary user interaction and allows the creation of application independent ground truth data sets with minimal effort. In order to achieve the automatic clustering we have developed a distance metric based on the Hidden Markov Model representations of three sequences {\^a}€“ movement, speed and orientation {\^a}€“ derived from the initial trajectory. The proposed system yields promising results and could prove to be an important steps towards mining very large data sets.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-39&engl=0}
}
@inproceedings {INPROC-2010-35,
   author = {Julia M{\"o}hrmann and Gunter Heidemann and Oliver Siemoneit and Christoph Hubig and Uwe-Philipp K{\"a}ppeler and Paul Levi},
   title = {{Context Generation with Image Based Sensors: An Interdisciplinary Enquiry on Technical and Social Issues and their Implications for System Design}},
   booktitle = {Proceedings of World Academy of Science, Engineering and Technology},
   address = {Capetown},
   publisher = {WASET},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {262--268},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2010},
   keywords = {Context-aware computing; ethical and social issues; image recognition; requirements in system design},
   language = {Englisch},
   cr-category = {I.4.1 Digitization and Image Capture,     I.4.8 Image Processing and Computer Vision Scene Analysis},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Philosophie, Abteilung f{\"u}r Wissenschaftstheorie und Technikphilosophie (IP/WTTP);     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Bildverstehen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Image data holds a large amount of different context information. However, as of today, these resources remain largely untouched. It is thus the aim of this paper to present a basic technical framework which allows for a quick and easy exploitation of context information from image data especially by non-expert users. Furthermore, the proposed framework is discussed in detail concerning important social and ethical issues which demand special requirements in system design. Finally, a first sensor prototype is presented which meets the identified requirements. Additionally, necessary implications for the software and hardware design of the system are discussed, rendering a sensor system which could be regarded as a good, acceptable and justifiable technical and thereby enabling the extraction of context information from image data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-35&engl=0}
}
@inproceedings {INPROC-2010-19,
   author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Harald Sanftmann},
   title = {{NexusVIS: A Distributed Visualization Toolkit for Mobile Applications (Demonstration)}},
   booktitle = {Proceedings of the 8th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '10); Mannheim, Germany, March 2010},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--3},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2010},
   isbn = {978-1-4244-5328-3},
   keywords = {Visualisierung, Datenstromverarbeitung},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Many mobile pervasive applications need to visualize information about the user's geographic surroundings combined with data from sensors, which determine the user's context. We demonstrate NexusVIS, a distributed visualization toolkit for mobile applications. By building upon an existing data stream processing system we enable applications to define distributed visualization processes as continuous queries. This allows applications to define visualization semantics descriptively. Moreover, NexusVIS is capable of adapting the visual query at runtime, and thus allows to navigate in the visualized scene both automatically and manually through user control.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-19&engl=0}
}
@inproceedings {INPROC-2009-93,
   author = {Andreas Hub and Joachim Kizler},
   title = {{Integration of Voice-Operated Route Planning and Route Guidance into a Portable Navigation and Object Recognition System for the Blind and Visually Impaired}},
   booktitle = {Proceedings of the 2009 Biennial South Pacific Educators in Vision Impairment Conference (SPEVI 2009); January 5-9, Adelaide, Australia},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--7},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2009},
   keywords = {Blind Navigation; Object Recognition},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-93/INPROC-2009-93.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {This study describes the development of a multi-functional assistant system for the blind which combines localisation, real and virtual navigation within modelled environments and the identification and tracking of fixed and movable objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-93&engl=0}
}
@inproceedings {INPROC-2009-92,
   author = {Oliver Siemoneit and Christoph Hubig and Bernhard Schmitz and Thomas Ertl},
   title = {{Mobiquitous Devices and Perception of Reality. A Philosophical Enquiry into Mobile and Ubiquitous Computing Devices that Alter Perception Using the Example of TANIA - A Tactile Acoustical Indoor and Outdoor Navigation and Information Assistant for the Blind, Deafblind, and Visually-impaired Users.}},
   booktitle = {Proceedings of the 5th Asia-Pacific Computing and Philosophy Conference (APCAP 2009)},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {123--130},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues,     H.5.2 Information Interfaces and Presentation User Interfaces,     H.1.2 User/Machine Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-92/INPROC-2009-92.pdf,     http://bentham.k2.t.u-tokyo.ac.jp/ap-cap09/proceedings.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Philosophie, Abteilung f{\"u}r Wissenschaftstheorie und Technikphilosophie (IP/WTTP);     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Philosophical research in mobiquitous devices still lacks important topics, in particular how mobiquitous devices change us and our perception of the world. It is the aim of this paper, to make a basic contribution to this debate and to shed some light on fundamental questions. Therefore, we introduce the TANIA system, a tactile acoustical navigation and information assistant for the blind, which uses - among other concepts - 1) a vibrating Wii remote mounted to a cane so as to indicate for the orientation of the blind towards a certain destination and 2) a stereo camera integrated into a bicycle helmet so as to compensate for the loss of vision. Alteration of perception is discussed in detail by distinguishing between perception enhancement, perception substitution, perception constriction and perception determination. Moreover we elaborate upon basic system design issues, thereby also justifying why we designed the TANIA system like it is. Finally, it is shown that technology itself has never been something else but an extension to man, and that technology - since technology is not only a means but also a medium - has always altered, still alters, and will always alter our perception of the world.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-92&engl=0}
}
@inproceedings {INPROC-2009-91,
   author = {Andreas Hub and Bernhard Schmitz},
   title = {{Addition of RFID-Based Initialization and Object Recognition to the Navigation System TANIA}},
   booktitle = {Proceedings of the California State University, Northridge Center on Disabilities' 24th Annual International Technology and Persons with Disabilities Conference (CSUN 2009)},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--3},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-91/INPROC-2009-91.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The TANIA indoor and outdoor blind navigation system has been augmented with RFID technology, providing automatic initialization and recognition of tagged objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-91&engl=0}
}
@inproceedings {INPROC-2009-89,
   author = {Bernhard Schmitz and Andreas Hub},
   title = {{Combination of the Navigation System TANIA with RFID-Based Initialization and Object Recognition}},
   booktitle = {Proceedings from 7th European Conference of ICEVI},
   address = {Dublin},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--2},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-89/INPROC-2009-89.pdf,     http://www.icevi-europe.org/dublin2009/index.html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In order to initialize the user’s location more accurately, the TANIA indoor and outdoor blind navigation system has been extended with an RFID reader. The system can also be used for the recognition of tagged objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-89&engl=0}
}
@inproceedings {INPROC-2009-84,
   author = {M. Eissele and D. Weiskopf and T. Ertl},
   title = {{{Interactive Context-Aware Visualization for Mobile Devices}},
   booktitle = {SG '09: Proceedings of Smart Graphics},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {167--178},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Utilizing context informationœ{\^o}{\`o}{\^o}e.g. location, user aspects, or hardware capabilitiesœ{\^o}{\`o}{\^o}enables the presented generic framework to automatically control the selection and configuration of visualization techniques and therewith provide interactive illustrations, displayed on small mobile devices. For contextannotated data, provided by an underlying context-aware world model, the proposed system determines adequate visualization methods out of a database. Based on a novel analysis of a hierarchical data format definition and an evaluation of relevant context attributes, visualization templates are selected, configured, and instanced. This automatic, interactive process enables visualizations that smartly reconfigure according to changed context aspects. In addition to the generic concept, we present real-world applications that make use of this framework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-84&engl=0}
}
@inproceedings {INPROC-2009-83,
   author = {H. Sanftmann and A. Blessing and H. Sch{\"u}tze and D. Weiskopf},
   title = {{Visual Exploration of Classifiers for Hybrid Textual and Geospatial Matching}},
   booktitle = {Proceedings of Vision, Modeling, and Visualization VMV '09},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Maschinelle Sprachverarbeitung;     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The availability of large geospatial data from different sources has dramatically increased, but for the usage of such data in geo-mashup or contextaware systems, a data fusion component is necessary. To solve the integration issue classifiers are obtained by supervised training, with feature vectors derived from textual and geospatial attributes. In an application example, a coherent part of Germany was annotated by humans and used for supervised learning. Annotation by humans is not free of errors, which decreases the performance of the classifier. We show how visual analytics techniques can be used to efficiently detect such false annotations. Especially the textual features introduce high-dimensional feature vectors, where visual analytics becomes important and helps to understand and improve the trained classifiers. Particular technical components used in our systems are scatterplots, multiple coordinated views, and interactive data drill-down.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-83&engl=0}
}
@inproceedings {INPROC-2008-48,
   author = {Mike Eissele and Matthias Kreiser and Thomas Ertl},
   title = {{Context-Controlled Flow Visualization in Augmented Reality}},
   booktitle = {ACM International Conference Proceeding Series},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {89--96},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2008},
   keywords = {Context-aware; augmented reality; visualization; flow},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism,     I.3.6 Computer Graphics Methodology and Techniques},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {A major challenge of novel scientific visualization using Augmented Reality is the accuracy of the user/camera position tracking. Many alternative techniques have been proposed, but still there is no general solution. Therefore, this paper presents a system that copes with different conditions and makes use of context information, e.g. available tracking quality, to select adequate Augmented Reality visualization methods. This way, users will automatically benefit from highquality visualizations if the system can estimate the pose of the realworld camera accurately enough. Otherwise, specially-designed alternative visualization techniques which require a less accurate positioning are used for the augmentation of real-world views. The proposed system makes use of multiple tracking systems and a simple estimation of the currently available overall accuracy of the pose estimation, used as context information to control the resulting visualization. Results of a prototypical implementation for visualization of 3D scientific flow data are presented to show the practicality.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-48&engl=0}
}
@inproceedings {INPROC-2008-130,
   author = {Andreas Hub},
   title = {{Precise Indoor and Outdoor Navigation for the Blind and Visually Impaired Using Augmented Maps and the TANIA System}},
   booktitle = {Proceedings of the 9th International Conference on Low Vision (Vision 2008); July 7-11, Montreal, Canada, 2008},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2008},
   keywords = {Blind Navigation; Blind Users; Impaired Vision},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-130/INPROC-2008-130.pdf,     http://www.opto.umontreal.ca/vision2008/},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The use of a small, portable Tactile Acoustical Navigation and Information Assistant (TANIA) by ambulatory blind people in complex environments is presented. TANIA utilizes an inertial sensor, tablet computer, and enhanced mapping to provide precise navigation of up to one-step accuracy. Its operation is relatively simple, even for elderly people with no computer experience. Previously-installed beacon or tag infrastructure is not required, which expands environmental access for blind users to any area where adequate digital mapping has been done. Current development in pilot locations is described, including examples of how maps are augmented with specific, location-based information. Such data can be presented to the user acoustically or in Braille. Given the ever-increasing availability of global positioning and information services, systems such as TANIA suggest the potential for independent and precise worldwide navigation by blind people.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-130&engl=0}
}
@inproceedings {INPROC-2008-129,
   author = {Andreas Hub},
   title = {{Map Requirements and Attainable Public Policy for an Installationfree Worldwide Navigation and Information System for the Blind}},
   booktitle = {Proceedings of the 9th International Conference on Low Vision (Vision 2008); July 7-11, Montreal, Canada, 2008},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2008},
   keywords = {Blind Navigation; Blind Users; Impaired Vision},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-129/INPROC-2008-129.pdf,     http://www.opto.umontreal.ca/vision2008/},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The aim of this work is to demonstrate that safe and independent mobility is feasible for blind pedestrians worldwide, requiring only adequate mapping and attainable public policy. A basic map format and an installation-free guidance and information system are presented. The developed map format can be used as the basis for a worldwide navigation system for the blind. In order to achieve such expanded accessibility, however, certain public policy changes are necessary. National and local organizations of blind and deafblind people must educate themselves and their supporters about the need for, and benefits of, detailed mapping of buildings and cities. They must take the lead in raising public awareness and in lobbying institutions and cities to offer maps in formats which support safe and independent navigation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-129&engl=0}
}
@inproceedings {INPROC-2008-128,
   author = {Andreas Hub},
   title = {{Integration of Active Tactile Control Braille Technology into Portable Navigation and Object Recognition Systems for the Blind and Deafblind}},
   booktitle = {Proceedings of the 9th International Conference on Low Vision (Vision 2008); July 7-11, Montreal, Canada, 2008},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--3},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2008},
   keywords = {Blind Navigation, Blind Users, Impaired Vision},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-128/INPROC-2008-128.pdf,     http://www.opto.umontreal.ca/vision2008/},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {A small Braille display with Active Tactile Control (ATC) Braille technology was developed in association with the Handy Tech Company. With ATC technology text information can be read without pressing a scrolling button, as the system automatically shifts to the next line when the reading finger is detected at the end of the last word. This Braille display was connected to two compatible systems. The first is the TANIA (Tactile Acoustical Navigation and Information Assistant) navigation system, based on detailed maps, a movement sensor, and the Global Positioning System (GPS). The second is an object recognition system, which uses 3D environmental models, a movement sensor and stereo camera. Either system, or both in combination, provide information acoustically or in Braille. Blind and deafblind users report that the use of the portable ATC display technology combined with these systems represents an additional step toward increasing independent orientation and mobility.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-128&engl=0}
}
@inproceedings {INPROC-2008-127,
   author = {Andreas Hub},
   title = {{Guiding Grids in Augmented Maps for Precise Installation-Free Worldwide Blind Navigation}},
   booktitle = {Conference Proceedings of the California State University, Northridge Center on Disabilities' 23rd Annual International Technology and Persons with Disabilities Conference (CSUN 2008); March 19-24; Los Angeles, CA, USA},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2008},
   keywords = {Blind Navigation; Blind Users; Impaired Vision},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-127/INPROC-2008-127.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Guiding grids were integrated into the TANIA system’s maps. Precise navigation support is provided indoors and outdoors even in large open spaces without physical guidelines.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-127&engl=0}
}
@inproceedings {INPROC-2008-125,
   author = {M. Moser and D. Weiskopf},
   title = {{Interactive Volume Rendering on Mobile Devices}},
   booktitle = {Vision, Modeling, and Visualization VMV '08 Conference Proceedings},
   publisher = {Akademische Verlagsgesellschaft AKA GmbH},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {217--226},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2008},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {A rendering technique for interactive direct volume visualization on mobile devices is presented. Utilizing basic graphics hardware functionality such as rasterization and 2D texture mapping, native volume rendering is possible by adopting and extending the 2D texture-slicing approach. Limitations of mobile graphics devices are discussedœ{\^o}{\`o}{\^o}in particular concerning graphics performance and available functionality. These limitations lead to modifications of the traditional texture-based volumerendering algorithm: we propose a screen-adaptive hybrid low/high-resolution rendering technique that achieves a good compromise between image quality and interactivity; furthermore, compressed and paletted texture formats are explored for fast texture update, for example, during interactive specification of the transfer function. The implementation of the rendering algorithm is based on OpenGL ES 1.0 and was tested on the Dell Axim X50v/X51v PDA. Performance characteristics are documented in the form of several kinds of performance measurements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-125&engl=0}
}
@inproceedings {INPROC-2007-72,
   author = {Mike Eissele and Thomas Ertl},
   title = {{Mobile Navigation and Augmentation utilizing Real-World Text}},
   booktitle = {Proceedings of Mensch und Computer 2007, Workshop on Nomadic and Wearable Computing 2007},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   language = {Englisch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Mobile, smart devices are already available in everyday life. Most of them offer a rich feature set, including camera, GPS antenna, or even WIFI modules. Navigation software utilizing GPS to acquire location data became very popular on these mobile clients. However, due to hardware restrictions GPS positioning cannot be used for indoor scenarios. In contrast, this paper is focused on navigation for indoor environments targeting on a low-cost solution, without the requirement of additional hardware installations or markers inside buildings. A server-based optical character recognition service is used to map images containing unique text passages – acquired with a mobile client – to additional meta data, e.g. locations or orientation. We also show that augmented-reality methods can be used to provide an intuitive presentation and interaction system by overlying real- world images with additional informa-tion like navigation symbols or internet hyperlinks.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-72&engl=0}
}
@inproceedings {INPROC-2007-71,
   author = {Martin Rotard and Eissele Mike and Raoul van Putten and Thomas Ertl},
   title = {{ZOOMABLE USER INTERFACES IN SCALABLE VECTOR GRAPHICS}},
   booktitle = {Proceedings of Open SVG 2007},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   language = {Deutsch},
   cr-category = {I.3.3 Picture/Image Generation},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Zoomable user interfaces are an evolutionary outgrowth of graphical user interfaces. In this paper we propose a zoomable user interface based on Scalable Vector Graphics. Three-level zooming is proposed as a new paradigm to combine different zooming functionalities in a common interface and support zooming within the window manager. This helps to unify zooming techniques of different applications. To meet the demand of efficient and easy navigation on a user interface, several novel interaction techniques are shown that further support the integration of three-level zooming within the underlying presentation system. For mobile small-screen devices, where the benefit of zooming user interfaces is even higher, the proposed system can be operated with simple pen tap or tap and drag operations. We also present a prototypical implementation, which demonstrates how applications based on the SPARK toolkit can transparently benefit form the proposed technology.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-71&engl=0}
}
@inproceedings {INPROC-2007-70,
   author = {Martin Krau{\ss} and Mike Eissele and Magnus Strengert},
   title = {{GPU-Based Edge-Directed Image Interpolation}},
   booktitle = {In Image Analysis (Proceedings of SCIA 2007)},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   volume = {4522},
   pages = {532--541},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   keywords = {Zoom, GPU, HDTV},
   language = {Englisch},
   cr-category = {I.3.3 Picture/Image Generation},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The rendering of lower resolution image data on higher resolution displays has become a very common task, in particular because of the increasing popularity of webcams, camera phones, and low-bandwidth video streaming. Thus, there is a strong demand for real-time, highquality image magnification. In this work, we suggest to exploit the high performance of programmable graphics processing units (GPUs) for an adaptive image magnification method. To this end, we propose a GPUfriendly algorithm for image up-sampling by edge-directed image interpolation, which avoids ringing artifacts, excessive blurring, and staircasing of oblique edges. At the same time it features gray-scale invariance, is applicable to color images, and allows for real-time processing of full-screen images on today’s GPUs.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-70&engl=0}
}
@inproceedings {INPROC-2007-124,
   author = {Andreas Hub and Stefan Kombrink and Klaus Bosse and Thomas Ertl},
   title = {{Conference Navigation and Communication Assistant for the Deafblind based on Tactile and Acoustically Amplified Augmented Map Informationfor the 14th Deafblind International World Conference}},
   booktitle = {Proceedings of the 14th Deafblind International World Conference (DbI 2007); Perth, Australia, September 25-30, 2007},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--2},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2007},
   keywords = {Blind Navigation; Blind Users; Impaired Vision},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-124/INPROC-2007-124.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We have developed a portable electronic navigation assistant for the deafblind that facilitates independent navigation even in new and complex environments, such as large conference sites. The device includes a keyboard, loudspeaker and small Braille display, allowing deafblind users to communicate with anyone capable of typing.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-124&engl=0}
}
@inproceedings {INPROC-2007-123,
   author = {Andreas Hub and Stefan Kombrink and Klaus Bosse and Thomas Ertl},
   title = {{TANIA – A Tactile-Acoustical Navigation and Information Assistant for the 2007 CSUN Conference}},
   booktitle = {Proceedings of the California State University, Northridge Center on Disabilities' 22nd Annual International Technology and Persons with Disabilities Conference (CSUN 2007); March 19-24; Los Angeles,CA, USA},
   address = {Los Angeles},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2007},
   keywords = {Indoor Navigation; Blind Users; Impaired Vision; Mobile Computing},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-123/INPROC-2007-123.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {A navigation assistant based on a tactile-acoustical interface and augmented map information is presented, affording blind people real and virtual explorations of the 2007 CSUN Conference environment. By tapping on a touch screen, hotel layout and conference-related data are provided.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-123&engl=0}
}
@inproceedings {INPROC-2007-122,
   author = {M. Kraus and M. Eissele and M. Strengert},
   title = {{GPU-Based Edge Directed Image Interpolation}},
   booktitle = {Image Analysis (Proceedings of SCIA 2007)},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   pages = {532--541},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2007},
   language = {Deutsch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The rendering of lower resolution image data on higher resolution displays has become a very common task, in particular because of the increasing popularity of webcams, camera phones, and low-bandwidth video streaming. Thus, there is a strong demand for real-time, highquality image magnification. In this work, we suggest to exploit the high performance of programmable graphics processing units (GPUs) for an adaptive image magnification method. To this end, we propose a GPUfriendly algorithm for image up-sampling by edge-directed image interpolation, which avoids ringing artifacts, excessive blurring, and staircasing of oblique edges. At the same time it features gray-scale invariance, is applicable to color images, and allows for real-time processing of full-screen images on todayœ{\^o}{\`o}{\`u}s GPUs.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-122&engl=0}
}
@inproceedings {INPROC-2006-65,
   author = {Andreas Hub and Tim Hartter and Thomas Ertl},
   title = {{Interactive Tracking of Movable Objects for the Blind on the Basis of Environment Models and Perception-Oriented Object Recognition Methods}},
   booktitle = {Proceedings of the 8th ACM SIGACCESS Conference on Computers and Accessibility: ASSETS 2006; Portland, OR, USA, October 23-25, 2006},
   address = {New York,},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {111--118},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2006},
   isbn = {1-59593-290-9},
   keywords = {Indoor Navigation, Blind Users, Impaired Vision, Mobile Computing},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     I.4.6 Image Processing and Computer Vision Segmentation,     I.4.7 Image Processing and Computer Vision Feature Measurement,     I.4.8 Image Processing and Computer Vision Scene Analysis,     K.3.1 Computer Uses in Education,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-65/INPROC-2006-65.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de, tim.hartter@gmx.de, thomas.ertl@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In previous work we have presented a prototype of an assistant system for the blind that can be used for self-localization and interactive object identification of static objects stored within 3D environment models. In this paper we present a new method for interactive tracking of various types of movable objects. The state of fixed movable objects, like doors, can be recognized by comparing the distance between sensor data and a 3D model. For the identification and model-based tracking of free movable objects, like chairs, we have developed an algorithm that is similar to human perception, based on shape and color comparisons to trained objects. Further, using a common face detection algorithm, our assistant system informs the user of the presence of people, and enables the localization of a real person based on interactive tracking of virtual models of humans.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-65&engl=0}
}
@inproceedings {INPROC-2006-64,
   author = {Andreas Hub and Stefan Kombrink and Thomas Ertl},
   title = {{Tactile-Acoustical Navigation Assistant for Real and Virtual Explorations of the Environment.}},
   booktitle = {Proceedings of the 1st Multi-disciplinary Vision Rehabilitation \& Research Conference: ENVISION 06, 2006 September 21-24, Kansas City, MO, USA.},
   editor = {Linda Merill},
   address = {Kansas City},
   publisher = {Envision},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {48--48},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2006},
   keywords = {Indoor Navigation, Blind Users, Impaired Vision, Mobile Computing},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-64/INPROC-2006-64.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de, kombrisn@studi.informatik.uni-stuttgart.de, thomas.ertl@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We present a tactile-acoustical navigation assistant for real and virtual explorations of the environment.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-64&engl=0}
}
@article {ART-2009-22,
   author = {J. Chuang and D. Weiskopf and T. M{\"o}ller},
   title = {{Energy Aware Color Sets}},
   journal = {Computer Graphics Forum},
   publisher = {Wiley},
   volume = {28},
   number = {2},
   pages = {203--211},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3.3 Picture/Image Generation,     I.3.6 Computer Graphics Methodology and Techniques,     I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We present a design technique for colors with the purpose of lowering the energy consumption of the display device. Our approach is based on a screen space variant energy model. The result of our design is a set of distinguishable iso-lightness colors guided by perceptual principles. We present two variations of our approach. One is based on a set of discrete user-named (categorical) colors, which are analyzed according to their energy consumption. The second is based on the constrained continuous optimization of color energy in the perceptually uniform CIELAB color space. We quantitatively compare our two approaches with a traditional choice of colors, demonstrating that we typically save approximately 40 percent of the energy. The color sets are applied to examples from the 2D visualization of nominal data and volume rendering of 3D scalar fields.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-22&engl=0}
}
@article {ART-2009-17,
   author = {H. Sanftmann and D. Weiskopf},
   title = {{Illuminated 3D Scatterplots}},
   journal = {Computer Graphics Forum (Proceedings of EuroVis 2009)},
   publisher = {Wiley},
   volume = {28},
   number = {3},
   pages = {751--758},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In contrast to 2D scatterplots, the existing 3D variants have the advantage of showing one additional data dimension, but suffer from inadequate spatial and shape perception and therefore are not well suited to display structures of the underlying data. We improve shape perception by applying a new illumination technique to the pointcloud representation of 3D scatterplots. Points are classified as locally linear, planar, and volumetric structuresœ{\^o}{\`o}{\^o}according to the eigenvalues of the inverse distance-weighted covariance matrix at each data element. Based on this classification, different lighting models are applied: codimension-2 illumination, surface illumination, and emissive volumetric illumination. Our technique lends itself to efficient GPU point rendering and can be combined with existing methods like semi-transparent rendering, halos, and depth or attribute based color coding. The user can interactively navigate in the dataset and manipulate the classification and other visualization parameters. We demonstrate our visualization technique by showing examples of multi-dimensional data and of generic pointcloud data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-17&engl=0}
}
@article {ART-2009-16,
   author = {M. Eissele and H. Sanftmann and T. Ertl},
   title = {{Interactively Refining Object-Recognition System}},
   journal = {Journal of WSCG},
   publisher = {Online},
   volume = {17},
   number = {1},
   pages = {1--8},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2009},
   issn = {1213-6972},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The availability of large geospatial data from different sources has dramatically increased, but for the usage of such data in geo-mashup or contextaware systems, a data fusion component is necessary. To solve the integration issue classifiers are obtained by supervised training, with feature vectors derived from textual and geospatial attributes. In an application example, a coherent part of Germany was annotated by humans and used for supervised learning. Annotation by humans is not free of errors, which decreases the performance of the classifier. We show how visual analytics techniques can be used to efficiently detect such false annotations. Especially the textual features introduce high-dimensional feature vectors, where visual analytics becomes important and helps to understand and improve the trained classifiers. Particular technical components used in our systems are scatterplots, multiple coordinated views, and interactive data drill-down.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-16&engl=0}
}
@article {ART-2007-20,
   author = {Andreas Hub and Tim Hartter and Stefan Kombrink and Thomas Ertl},
   title = {{Real and virtual explorations of the environment and interactive tracking of movable objects for the blind on the basis of tactile-acoustical maps and 3D environment models}},
   journal = {Disability and Rehabilitation: Assistive Technology},
   address = {London},
   publisher = {Informa Healthcare},
   volume = {3},
   number = {1},
   pages = {57--68},
   type = {Artikel in Zeitschrift},
   month = {Mai},
   year = {2007},
   doi = {10.1080/17483100701275677},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   ee = {http://dx.doi.org/10.1080/17483100701275677},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {This study describes the development of a multi-functional assistant system for the blind which combines localisation, real and virtual navigation within modelled environments and the identification and tracking of fixed and movable objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2007-20&engl=0}
}
@article {ART-2006-13,
   author = {Kurt Rothermel and Thomas Ertl and Fritsch Dieter and Paul J. K{\"u}hn and Bernhard Mitschang and Engelbert Westk{\"a}mper and Christian Becker and Dominique Dudkowski and Andreas Gutscher and Christian Hauser and Lamine Jendoubi and Daniela Nicklas and Steffen Volz and Matthias Wieland},
   title = {{SFB 627 – Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme}},
   journal = {Informatik - Forschung und Entwicklung},
   publisher = {Springer-Verlag},
   volume = {21},
   number = {1-2},
   pages = {105--113},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2006},
   language = {Deutsch},
   cr-category = {C.2.4 Distributed Systems,     H.2.4 Database Management Systems,     H.2.8 Database Applications,     H.3 Information Storage and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2006-13/ART-2006-13.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Industrielle Fertigung und Fabrikbetrieb (IFF);     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp);     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Kommunikationsnetze und Rechnersysteme (IKR)},
   abstract = {Computersysteme, wie wir sie heute kennen, passen sich typischerweise nicht an den Benutzer und dessen Situation an. Erste Beispiele von Systemen, die durch den Bezug zur Realwelt den Kontext des Benutzers einbeziehen, sind Navigationssysteme, die unter Ber{\"u}cksichtigung der Position eines Benutzers und der Verkehrslage Richtungsanweisungen geben k{\"o}nnen. Damit innovative kontextbezogene Anwendungen m{\"o}glich werden, muss der Kontext, also der Zustand der Realwelt, durch Sensoren erfasst, in das Computersystem {\"u}bermittelt und dort in Form dynamischer Umgebungsmodelle den Anwendungen zur Verf{\"u}gung gestellt werden.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2006-13&engl=0}
}
@inbook {INBOOK-2009-05,
   author = {D. Weiskopf},
   title = {{Geo-spatial context-aware visualization}},
   series = {Eurographics 2009 Areas Papers Proceedings},
   publisher = {-},
   pages = {1--2},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2009},
   language = {Deutsch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Mobile computer systems equipped with wireless communication and sensor technology - such as mobile phones with cameras - have become widely available. Context information, for example the userœ{\^o}{\`o}{\`u}s current location or their physical environment, plays an increasingly important role in simplifying the interaction between users and such mobile information systems. A generic framework for federating heterogeneous spatial context models is briefly described. The federated information serves as basis for the visualization of spatially referenced data. Visualization challenges include efficient rendering on mobile devices, automatic adaptation of visualization techniques to context information, as well as consideration of the quality of context in the form of uncertainty visualization.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-05&engl=0}
}
@inbook {INBOOK-2008-12,
   author = {Andreas Hub},
   title = {{Taktil-Akustische Navigationsunterst{\"u}tzung f{\"u}r {\"a}ltere blinde und sehbehinderte Menschen auf der Basis erweiterter Umgebungsmodelle}},
   series = {UDay VI – Seniorengerechte Schnittstellen zur Technik},
   address = {Lengerich},
   publisher = {Pabst Science Publishers},
   pages = {104--107},
   type = {Beitrag in Buch},
   month = {April},
   year = {2008},
   isbn = {978-3-89967-467-5},
   keywords = {Navigation; Blind},
   language = {Deutsch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,     K.4.2 Computers and Society Social Issues},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Mit dem aktuellen Prototyp eines portablen Taktil-Akustischen Navigations- und Informations-Assistenten (TANIA) wurden Gebrauchstests mit {\"a}lteren blinden und hochgradig sehbehinderten Personen unter allt{\"a}glichen Bedingungen durchgef{\"u}hrt. Erste Ergebnisse und Reaktionen der Benutzer deuten daraufhin, dass auch {\"a}ltere Personen mit sensorischen Behinderungen den Umgang mit innovativen Assistenzsystemen erlernen und von deren Einsatz profitieren k{\"o}nnen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-12&engl=0}
}
@book {BOOK-1998-01,
   author = {Kerstin Schneider (Hrsg.) and Hiltrud Betz and Cora Burger and Volker Claus and Waltraud Schweikhardt},
   title = {{Die Informatik AG - Telekooperation.}},
   address = {Stuttgart},
   publisher = {B.G. Teubner-Verlag},
   pages = {158},
   type = {Buch},
   month = {Januar},
   year = {1998},
   isbn = {3-519-12194-8},
   keywords = {Studieninformation; Frauenf{\"o}rderung; gender; computer science},
   language = {Deutsch},
   cr-category = {K.3 Computers and Education},
   ee = {http://www.informatik.uni-stuttgart.de/fakultaet/frauen/frauen.html,     http://medoc.informatik.uni-stuttgart.de/~medoc/},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware (Prof. Reuter);     Universit{\"a}t Stuttgart, Institut f{\"u}r Informatik, Formale Konzepte;     Universit{\"a}t Stuttgart, Institut f{\"u}r Informatik, Programmiersprachen und {\"U}bersetzerbau;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Verteilte Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Informatik, Visualisierung und Interaktive Systeme},
   abstract = {(not available)},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-1998-01&engl=0}
}