Bild von Institut mit Unilogo
home uni uni suche suche sitemap sitemap kontakt kontakt
unilogo Universität Stuttgart

Department Visualisation and Interactive Systems : Veröffentlichungen

Bibliography 2010 BibTeX

search germanicon
 
@inproceedings {INPROC-2010-66,
   author = {Bernhard Schmitz},
   title = {{The ViibraCane - A White Cane for Tactile Navigation Guidance}},
   booktitle = {Proceedings of the California State University, Northridge Center on Disabilities' 25th Annual International Technology and Persons with Disabilities Conference (CSUN 2010)},
   address = {San Diego},
   publisher = {Online},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   type = {Conference Paper},
   month = {March},
   year = {2010},
   language = {German},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-66/INPROC-2010-66.pdf},
   department = {University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {The ViibraCane, an extension to the navigation system TANIA, is a white cane that can be used as a tactile guiding device for navigation tasks.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-66&engl=1}
}
@inproceedings {INPROC-2010-64,
   author = {Bernhard Schmitz and Thomas Ertl},
   title = {{Making Digital Maps Accessible Using Vibrations}},
   booktitle = {Computers Helping People with Special Needs. 12th International Conference, ICCHP 2010, Proceedings},
   address = {Heidelberg},
   publisher = {Springer Berlin Heidelberg},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   series = {Lecture Notes in Computer Science},
   volume = {6179},
   pages = {100--107},
   type = {Conference Paper},
   month = {July},
   year = {2010},
   doi = {10.1007/978-3-642-14097-6_18},
   isbn = {978-3-642-14096-9},
   keywords = {Haptic Maps; Vibrating Maps; Blind Users},
   language = {English},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-64/INPROC-2010-64.pdf},
   department = {University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {In order to allow blind and deafblind people to use and explore electronically available maps, we have developed a system that displays maps in a tactile way using a standard rumble gamepad. The system is intended for both on-site and off-site use and therefore includes mechanisms for getting overviews of larger regions as well as for the exploration of small areas.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-64&engl=1}
}
@inproceedings {INPROC-2010-40,
   author = {Julia Moehrmann and Xin Wang and Gunther Heidemann},
   title = {{Motion based situation recognition in group meetings}},
   booktitle = {Proceedings of SPIE Conference on Image Processing: Machine Vision Applications III},
   editor = {David Fofi and Kurt S. Niel},
   address = {San Jose, California, USA},
   publisher = {SPIE},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   series = {Proceedings of SPIE},
   volume = {7538},
   pages = {75380--75380},
   type = {Conference Paper},
   month = {January},
   year = {2010},
   doi = {http://dx.doi.org/10.1117/12.838963},
   keywords = {Group meeting recognition; situation recognition; Hidden Markov Model; Motion features},
   language = {English},
   cr-category = {I.5.4 Pattern Recognition Applications,     I.4.6 Image Processing and Computer Vision Segmentation,     I.4.9 Image Processing and Computer Vision Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-40/INPROC-2010-40.pdf},
   contact = {julia.moehrmann@vis.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {We present an unobtrusive vision based system for the recognition of situations in group meetings. The system uses a three-stage architecture, consisting of one video processing stage and two classification stages. The video processing stage detects motion in the videos and extracts up to 12 features from this data. The classification stage uses Hidden Markov Models to first identify the activity of every participant in the meeting and afterwards recognize the situation as a whole. The feature extraction uses position information of both hands and the face to extract motion features like speed, acceleration and motion frequency, as well as distance based features. We investigate the discriminative ability of these features and their applicability to the task of interaction recognition. A two-stage Hidden Markov Model classifier is applied to perform the recognition task. The developed system classifies the situation in 94\% of all frames in our video test set correctly, where 3\% of the test data is misclassified due to contradictory behavior of the participants. The results show that unimodal data can be sufficient to recognize complex situations.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-40&engl=1}
}
@inproceedings {INPROC-2010-39,
   author = {Julia Moehrmann and Gunther Heidemann},
   title = {{Automatic trajectory clustering for generating ground truth data sets}},
   booktitle = {Proceedings of SPIE Conference on Image Processing: Machine Vision Applications III},
   editor = {David Fofi and Kurt S. Niel},
   address = {San Jose, California, USA},
   publisher = {SPIE},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   series = {Proceedings of SPIE},
   volume = {7538},
   pages = {753808--753808},
   type = {Conference Paper},
   month = {January},
   year = {2010},
   doi = {http://dx.doi.org/10.1117/12.838954},
   keywords = {Hidden Markov Model; HMM based representation; clustering; ground truth data},
   language = {German},
   cr-category = {I.2.10 Vision and Scene Understanding,     I.4.8 Image Processing and Computer Vision Scene Analysis,     I.5.3 Pattern Recognition Clustering,     I.5.4 Pattern Recognition Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-39/INPROC-2010-39.pdf},
   contact = {julia.moehrmann@vis.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {We present a novel approach towards the creation of vision based recognition tasks. A lot of domain specific recognition systems have been presented in the past which make use of the large amounts of available video data. The creation of ground truth data sets for the training of theses systems remains difficult and tiresome. We present a system which automatically creates clusters of 2D trajectories. The results of this clustering can then be used to perform the actual labeling of the data, or rather the selection of events or features of interest by the user. The selected clusters can be used as positive training data for a user defined recognition task {\^a}€“ without the need to adapt the system. The proposed technique reduces the necessary user interaction and allows the creation of application independent ground truth data sets with minimal effort. In order to achieve the automatic clustering we have developed a distance metric based on the Hidden Markov Model representations of three sequences {\^a}€“ movement, speed and orientation {\^a}€“ derived from the initial trajectory. The proposed system yields promising results and could prove to be an important steps towards mining very large data sets.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-39&engl=1}
}
@inproceedings {INPROC-2010-35,
   author = {Julia M{\"o}hrmann and Gunter Heidemann and Oliver Siemoneit and Christoph Hubig and Uwe-Philipp K{\"a}ppeler and Paul Levi},
   title = {{Context Generation with Image Based Sensors: An Interdisciplinary Enquiry on Technical and Social Issues and their Implications for System Design}},
   booktitle = {Proceedings of World Academy of Science, Engineering and Technology},
   address = {Capetown},
   publisher = {WASET},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {262--268},
   type = {Conference Paper},
   month = {January},
   year = {2010},
   keywords = {Context-aware computing; ethical and social issues; image recognition; requirements in system design},
   language = {English},
   cr-category = {I.4.1 Digitization and Image Capture,     I.4.8 Image Processing and Computer Vision Scene Analysis},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Philosophie, Abteilung f{\"u}r Wissenschaftstheorie und Technikphilosophie (IP/WTTP);     University of Stuttgart, Institute of Parallel and Distributed Systems, Image Understanding;     University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {Image data holds a large amount of different context information. However, as of today, these resources remain largely untouched. It is thus the aim of this paper to present a basic technical framework which allows for a quick and easy exploitation of context information from image data especially by non-expert users. Furthermore, the proposed framework is discussed in detail concerning important social and ethical issues which demand special requirements in system design. Finally, a first sensor prototype is presented which meets the identified requirements. Additionally, necessary implications for the software and hardware design of the system are discussed, rendering a sensor system which could be regarded as a good, acceptable and justifiable technical and thereby enabling the extraction of context information from image data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-35&engl=1}
}
@inproceedings {INPROC-2010-19,
   author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Harald Sanftmann},
   title = {{NexusVIS: A Distributed Visualization Toolkit for Mobile Applications (Demonstration)}},
   booktitle = {Proceedings of the 8th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '10); Mannheim, Germany, March 2010},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--3},
   type = {Conference Paper},
   month = {March},
   year = {2010},
   isbn = {978-1-4244-5328-3},
   keywords = {Visualisierung, Datenstromverarbeitung},
   language = {English},
   cr-category = {H.2.4 Database Management Systems},
   contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;     University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
   abstract = {Many mobile pervasive applications need to visualize information about the user's geographic surroundings combined with data from sensors, which determine the user's context. We demonstrate NexusVIS, a distributed visualization toolkit for mobile applications. By building upon an existing data stream processing system we enable applications to define distributed visualization processes as continuous queries. This allows applications to define visualization semantics descriptively. Moreover, NexusVIS is capable of adapting the visual query at runtime, and thus allows to navigate in the visualized scene both automatically and manually through user control.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-19&engl=1}
}