Institute for Parallel and Distributed Systems (IPVS)

Publications

An overview of publications of the Institute for Parallel and Distributed Systems.

Publications VS: Bibliography 2009 BibTeX

 
@inproceedings {INPROC-2009-86,
   author = {Matthias Grossmann and Nicola H{\"o}nle and Carlos L{\"u}bbe and Harald Weinschrott},
   title = {{An Abstract Processing Model for the Quality of Context Data}},
   booktitle = {Proceedings of the 1st International Workshop on Quality of Context},
   publisher = {Springer},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {132--143},
   type = {Workshop Paper},
   month = {June},
   year = {2009},
   keywords = {uncertainty; inconsistency; trust; processing model},
   language = {English},
   cr-category = {H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-86/INPROC-2009-86.pdf,     http://dx.doi.org/10.1007/978-3-642-04559-2_12},
   contact = {Matthias.Grossmann@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;     University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Data quality can be relevant to many applications. Especially applications coping with sensor data cannot take a single sensor value for granted. Because of technical and physical restrictions each sensor reading is associated with an uncertainty. To improve quality, an application can combine data values from different sensors or, more generally, data providers. But as different data providers may have diverse opinions about a certain real world phenomenon, another issue arises: inconsistency. When handling data from different data providers, the application needs to consider their trustworthiness. This naturally introduces a third aspect of quality: trust. In this paper we propose a novel processing model integrating the three aspects of quality: uncertainty, inconsistency and trust.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-86&engl=1}
}
@inproceedings {INPROC-2009-79,
   author = {Hannes Wolf and Klaus Herrmann and Kurt Rothermel},
   title = {{Modeling Dynamic Context Awareness for Situated Workflows}},
   booktitle = {OTM 2009 Workshops},
   editor = {R Meersman and P Herrero and T Dillon},
   address = {Berlin, Heidelberg},
   publisher = {Springer Verlag},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {5872},
   pages = {98--107},
   type = {Workshop Paper},
   month = {November},
   year = {2009},
   language = {German},
   cr-category = {H.4.1 Office Automation,     H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-79/INPROC-2009-79.pdf,     http://www.springerlink.com/content/r6783472331327u4},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {A major challenge for pervasive computing is to support continuous adaptation of applications to the behavior of the user. Recent research has adopted classical workflows as alternative programming paradigm for pervasive applications and approaches for context aware workflow models have been presented. However the current approaches suffer from the low flexibility of classical workflow models. We present a solution that allows attaching workflows to real-world objects and defining relevant context dynamically in relation to those objects. The benefits are a dynamic, yet simple modeling of context constraints and events in pervasive workflows and a greatly reduced amount of context information that must be provided to the workflow.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-79&engl=1}
}
@inproceedings {INPROC-2009-71,
   author = {Davide Frey and Rachid Guerraoui and Anne-Marie Kermarrec and Boris Koldehofe and Martin Mogensen and Maxime Monod and Vivien Qu{\'e}ma},
   title = {{Heterogeneous Gossip}},
   booktitle = {Proceedings of the 10th ACM/IFIP/USENIX International Conference on Middleware},
   publisher = {Springer},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {42--61},
   type = {Conference Paper},
   month = {November},
   year = {2009},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {http://delivery.acm.org/10.1145/1660000/1656984/a3-frey.pdf?key1=1656984&key2=2295269621&coll=GUIDE&dl=ACM&CFID=83635975&CFTOKEN=34871073},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Gossip-based information dissemination protocols are considered easy to deploy, scalable and resilient to network dynamics. Load-balancing is inherent in these protocols as the dissemination work is evenly spread among all nodes. Yet, large-scale distributed systems are usually heterogeneous with respect to network capabilities such as bandwidth. In practice, a blind load-balancing strategy might significantly hamper the performance of the gossip dissemination. This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynamically adapt their contribution to the gossip dissemination according to their bandwidth capabilities. Using a continuous, itself gossip-based, approximation of relative bandwidth capabilities, HEAP dynamically leverages the most capable nodes by increasing their fanout, while decreasing by the same proportion that of less capable nodes. HEAP preserves the simple and proactive (churn adaptation) nature of gossip, while significantly improving its effectiveness. We extensively evaluate HEAP in the context of a video streaming application on a testbed of 270 PlanetLab nodes. Our results show that HEAP significantly improves the quality of the streaming over standard homogeneous gossip protocols, especially when the stream rate is close to the average available bandwidth.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-71&engl=1}
}
@inproceedings {INPROC-2009-66,
   author = {Lars Geiger and Ronald Schertle and Frank D{\"u}rr and Kurt Rothermel},
   title = {{Temporal Addressing for Mobile Context-Aware Communication}},
   booktitle = {Proceedings of the Sixth Annual International Conference on Mobile and Ubiquitous Systems: Computing, Networking and Services (MobiQuitous '09), Toronto, ON, Canada, July 13-16, 2009},
   publisher = {ICST},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {June},
   year = {2009},
   language = {English},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.2 Network Protocols,     C.2.4 Distributed Systems,     C.2.6 Internetworking},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-66/INPROC-2009-66.pdf,     http://www.ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5326393},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Mobile clients in context-aware systems benefit from the indirect addressing of users via their context (contextcast), such as addressing messages to all users in downtown Toronto whose age is below 35. There is, however, almost no support for a temporal decoupling in such a contextcast system, i.e., the addressing of users that were or will be in a certain context in the past or future, respectively. This could for instance be used to distribute the minutes of a meeting to all people who attended the meeting in room 1.138, 3 days ago, between 1 and 3 pm. To enable a context-aware communication system to address messages with temporal relations, especially those contexts in the past, the system needs to manage information about user context histories. This poses the risk that the system can be abused to profile users, which would most probably hinder acceptance. Therefore, privacy aspects need to be considered in the core design of such a system. We present an extension to our earlier work, which allows a temporal decoupling of messages and users and requires very little additional overhead to manage historic context information. The solution includes mechanisms to efficiently disseminate messages to both users with past and future contexts, while effectively preventing user profiling through the use of virtual identities.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-66&engl=1}
}
@inproceedings {INPROC-2009-51,
   author = {Ralph Lange and Harald Weinschrott and Lars Geiger and Andre Blessing and Frank D{\"u}rr and Kurt Rothermel and Hinrich Sch{\"u}tze},
   title = {{On a Generic Uncertainty Model for Position Information}},
   booktitle = {Proceedings of the 1st International Workshop on Quality of Context},
   publisher = {Springer},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--12},
   type = {Workshop Paper},
   month = {June},
   year = {2009},
   keywords = {data uncertainty; position information; position accuracy; spatial queries},
   language = {English},
   cr-category = {H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-51/INPROC-2009-51.pdf,     http://www.springerlink.com/content/85450711831008k0/},
   contact = {ralph.lange@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute for Natural Language Processing;     University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Position information of moving as well as stationary objects is generally subject to uncertainties due to inherent measuring errors of positioning technologies, explicit tolerances of position update protocols, and approximations by interpolation algorithms. There exist a variety of approaches for specifying these uncertainties by mathematical uncertainty models such as tolerance regions or the Dilution of Precision (DOP) values of GPS. In this paper we propose a principled generic uncertainty model that integrates the different approaches and derive a comprehensive query interface for processing spatial queries on uncertain position information of different sources based on this model. Finally, we show how to implement our approach with prevalent existing uncertainty models.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-51&engl=1}
}
@inproceedings {INPROC-2009-48,
   author = {Hanna Eberle and Stefan F{\"o}ll and Klaus Herrmann and Frank Leymann and Annapaola Marconi and Tobias Unger and Hannes Wolf},
   title = {{Enforcement from the Inside: Improving Quality of Business in Process Management}},
   booktitle = {2009 IEEE International Conference on Web Services (ICWS 2009)},
   address = {Los Alamitos, CA, USA},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {405--412},
   type = {Conference Paper},
   month = {July},
   year = {2009},
   doi = {10.1109/ICWS.2009.82},
   isbn = {978-0-7695-3709-2},
   keywords = {Business Process Management; BPM; BPEL; Enforcement},
   language = {English},
   cr-category = {H.4.1 Office Automation},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-48/INPROC-2009-48.pdf,     http://ieeexplore.ieee.org/search/wrapper.jsp?arnumber=5175850,     http://www2.computer.org/portal/web/csdl/doi/10.1109/ICWS.2009.82},
   contact = {unger@iaas.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Architecture of Application Systems;     University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {In this paper we introduce a new modeling tool for constraint handling in the area of workflow technology. The constraint handlers can be used to improve the quality of business processes but without changing already existing business logic. Todays workflow languages provide no possibility to model constraints and the actions in case the constraints get violated explicitly. Fault and event handling mechanisms to react to events not expected in normal executions are only provided by the BPEL language. Using BPEL as workflow language we integrate the constraint handling extension without changing any existing semantics in a smart way. In our approach we use this fault and event handling mechanisms to extend the BPEL language with a constraint handling mechanism. By integrating this constraint handling tool into the BPEL language we provide an approach for quality driven process modeling with the BPEL language.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-48&engl=1}
}
@inproceedings {INPROC-2009-44,
   author = {Andreas Grau and Klaus Herrmann and Kurt Rothermel},
   title = {{Efficient and Scalable Network Emulation using Adaptive Virtual Time}},
   booktitle = {Proceedings of the 18th International Conference on Computer Communications and Networks (ICCCN 2009)},
   publisher = {IEEE Communications Society},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--6},
   type = {Conference Paper},
   month = {August},
   year = {2009},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-44/INPROC-2009-44.pdf,     http://dx.doi.org/10.1109/ICCCN.2009.5235306},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Performance analysis and functionality testing are major parts of developing distributed software systems. Since the number of communicating software instances heavily influences the behavior of distributed applications and communication protocols, evaluation scenarios have to consider a large number of nodes. Network emulation provides an infrastructure for running these experiments using real prototype implementations in a controllable and realistic environment. Large-scale experiments, however, have a high resource consumption which often exceeds available physical testbed resources. Time dilation allows for reducing the resource demands of a scenario at the expense of the experiment's runtime. However, current approaches only consider a constant time dilation factor, which wastes a lot of resources in case of scenarios with varying load. We propose a framework for adaptive time virtualization that significantly reduces the runtime of experiments by improving resource utilization in network emulation testbeds. In this framework, resource demands are monitored and the time dilation factor is dynamically adapted to the required level. Our evaluation shows that adaptive virtual time in combination with our lightweight node virtualization architecture allows us to increase the possible scenario sizes by more than an order of magnitude and, at the same time, ensure unbiased emulation results. This represents an important contribution to making network emulation systems highly scalable.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-44&engl=1}
}
@inproceedings {INPROC-2009-42,
   author = {Faraz Memon and Daniel Tiebler and Frank D{\"u}rr and Kurt Rothermel and Marco Tomsu and Peter Domschitz},
   title = {{Scalable Spatial Information Discovery over Distributed Hash Tables}},
   booktitle = {Procs. of 4th International Conference on COMmunication System softWAre and middlewaRE (COMSWARE'09), Dublin, Ireland, June 2009. ACM.},
   publisher = {ACM},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--12},
   type = {Conference Paper},
   month = {June},
   year = {2009},
   language = {English},
   cr-category = {C.2.1 Network Architecture and Design,     H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-42/INPROC-2009-42.pdf,     http://doi.acm.org/10.1145/1621890.1621892},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {In this paper, we present a Peer-to-Peer (P2P) spatial information discovery system that enables spatial range queries over Distributed Hash Tables (DHTs). Our system utilizes a less-distorting octahedral map projection in contrast to the quadrilateral projections used by majority of the previously proposed systems, to represent the spatial information. We also introduce a Space-Filling Curve (SFC)-based data placement strategy that reduces the probability of data hot-spots in the network. Moreover, we show that our system achieves scalable resolution of location-based range queries by utilizing a tree-based query optimization algorithm. Compared to the basic query resolution algorithm, the query optimization algorithm reduces the average number of parallel messages used to resolve a query, by a factor of 96\%.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-42&engl=1}
}
@inproceedings {INPROC-2009-36,
   author = {S{\'e}rgio Alves and Boris Koldehofe and Hugo Miranda and Francois Taiani},
   title = {{Design of a Backup Network for Catastrophe Scenarios}},
   booktitle = {Proceedings of the ACM International Workshop on Advanced Topics in Mobile Computing for Emergency Management: Communication and Computing Platforms (MCEM)},
   publisher = {ACM Press},
   institution = {University of Stuttgart, Faculty of Computer Science, Germany},
   pages = {613--617},
   type = {Workshop Paper},
   month = {June},
   year = {2009},
   doi = {10.1145/1582379.1582512},
   language = {English},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.4 Distributed Systems},
   department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Distributed Systems},
   abstract = {Communication networks play a fundamental role in the response to a massive catastrophe, like an earthquake or a large-scale terrorist attack to a major urban area. In such situations, command centres must be able to rely on a fully operational communication network, for example to learn about on-going situations and allocate and guide the rescue teams. Communication is bidirectional: once in the field, these teams will feed the command centre with a more accurate view of the situation, contributing to the efficient allocation of the resources. Failures in this network, even if localised to some of the regions affected by the catastrophe, can have costs both monetary and in human lives. In this position paper, we propose the creation of a redundant, best-effort, emergency communication network that could serve to mitigate localised failures using off-the-shelf widespread technology. We give an overview of an architecture for a backup network, highlight the possible advantage of such an architecture to disaster management and discuss challenges that need to be overcome in realizing it.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-36&engl=1}
}
@inproceedings {INPROC-2009-32,
   author = {Lars Geiger and Frank D{\"u}rr and Kurt Rothermel},
   title = {{On Contextcast: A Context-aware Communication Mechanism}},
   booktitle = {IEEE International Conference on Communications, 2009. ICC '09.},
   publisher = {IEEE Communications Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--6},
   type = {Conference Paper},
   month = {June},
   year = {2009},
   language = {English},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.2 Network Protocols,     C.2.4 Distributed Systems,     C.2.6 Internetworking},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-32/INPROC-2009-32.pdf,     http://dx.doi.org/10.1109/ICC.2009.5199239},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {The dissemination of messages according to clients' contexts (i.e., location and other attributes) opens up new possibilities in context-aware systems. While geocast or content-based publish/subscribe forward messages according to client location or attributes, respectively, neither uses a combination of the two. In this paper, we present this new communication paradigm and the challenges it poses. We also extend concepts from publish/subscribe networks to efficiently deal with highly dynamic user location to lower update rates by approximating the user's location. This reduces update rates by between 25\% and 90\%, depending on the granularity of the approximation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-32&engl=1}
}
@inproceedings {INPROC-2009-24,
   author = {Harald Weinschrott and Frank D{\"u}rr and Kurt Rothermel},
   title = {{Efficient Capturing of Environmental Data with Mobile RFID Readers}},
   booktitle = {Proceedings of the 10th International Conference on Mobile Data Management (MDM'09); Taipei, Taiwan, Mai 18-20, 2009},
   address = {Taipei, Taiwan},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {May},
   year = {2009},
   keywords = {ad-hoc; mobile; RFID-sensor; urban sensing},
   language = {English},
   cr-category = {C.2 Computer-Communication Networks},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-24/INPROC-2009-24.pdf,     http://doi.ieeecomputersociety.org/10.1109/MDM.2009.15,     http://www.comnsense.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {In this paper we introduce a novel scenario for environmental sensing based on the combination of simple and cheap RFID-based sensors and mobile devices like mobile phones with integrated RFID readers. We envision a system, where the mobile nodes cooperatively read sensors installed in the environment as they pass by and transmit the data to a server infrastructure. To achieve quality requirements on the one hand, and on the other hand efficiency in terms of communication cost and energy consumption, this paper presents several algorithms for coordinating update operations. Mobile nodes form an ad-hoc network for the cooperative management of requested update times to meet the desired update interval and to avoid redundant sensor reading and collisions during read operations. Besides this decentralized coordination algorithm, we also show a complementary algorithm that exploits infrastructure based coordination. By extensive simulations we show that our algorithms achieve a high quality of sensor updates where nearly 100\% of the possible updates are performed. Moreover, the algorithms achieve a very high energy efficiency allowing for several hundred hours of operation assuming a typical battery of a mobile phone.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-24&engl=1}
}
@inproceedings {INPROC-2009-141,
   author = {Andreas Grau and Klaus Herrmann and Kurt Rothermel},
   title = {{Exploiting Emulation Testbeds for Security Experiments}},
   booktitle = {Proceedings of the Workshop on Experimental platforms for Internet resilience, security and stability research (invited paper)},
   address = {Br{\"u}ssel},
   publisher = {JRC European Commission},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--2},
   type = {Workshop Paper},
   month = {June},
   year = {2009},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-141/INPROC-2009-141.pdf},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Security, resilience and stability test are an important step to protect the information and communication-based digital society. Experimental platforms allow researchers to evaluate todays and tomorrows internet architectures, protocols and applications. However, selecting the most adequate platform is difficult. In this paper we first present a common used approach based on real world testbeds. We identify a couple of problem coming around with this approach and show how emulation-based testbeds can avoids them. Finally we present an hybrid approach to combine the benefits of both systems to provide a scalable, repeatable, controllable, flexible and secure experimental platform.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-141&engl=1}
}
@inproceedings {INPROC-2009-07,
   author = {Ralph Lange and Nazario Cipriani and Lars Geiger and Matthias Gro{\ss}mann and Harald Weinschrott and Andreas Brodt and Matthias Wieland and Stamatia Rizou and Kurt Rothermel},
   title = {{Making the World Wide Space Happen: New Challenges for the Nexus Context Platform}},
   booktitle = {Proceedings of the 7th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '09). Galveston, TX, USA. March 2009},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--4},
   type = {Conference Paper},
   month = {March},
   year = {2009},
   keywords = {Nexus; context; mobile context-aware applications; context-awareness; context management; World Wide Space; stream-processing; situation recognition; reasoning; workflows; quality of context},
   language = {English},
   cr-category = {H.2.8 Database Applications,     H.3.4 Information Storage and Retrieval Systems and Software,     H.3.5 Online Information Services},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-07/INPROC-2009-07.pdf,     http://www.nexus.uni-stuttgart.de/,     http://dx.doi.org/10.1109/PERCOM.2009.4912782},
   contact = {ralph.lange@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;     University of Stuttgart, Institute of Architecture of Application Systems;     University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Context-aware applications rely on models of the physical world. Within the Nexus project, we envision a World Wide Space which provides the conceptual and technological framework for integrating and sharing such context models in an open, global platform of context providers. In our ongoing research we tackle important challenges in such a platform including distributed processing of streamed context data, situation recognition by distributed reasoning, efficient management of context data histories, and quality of context information. In this paper we discuss our approach to cope with these challenges and present an extended Nexus architecture.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-07&engl=1}
}
@inproceedings {INPROC-2009-06,
   author = {Ralph Lange and Tobias Farrell and Frank D{\"u}rr and Kurt Rothermel},
   title = {{Remote Real-Time Trajectory Simplification}},
   booktitle = {Proceedings of the 7th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '09). Galveston, TX, USA. March 2009},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {March},
   year = {2009},
   keywords = {Remote trajectory simplification; tracking; dead reckoning; moving objects database; MOD; line simplification},
   language = {English},
   cr-category = {H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-06/INPROC-2009-06.pdf,     http://dx.doi.org/10.1109/PERCOM.2009.4912767},
   contact = {ralph.lange@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Moving objects databases (MODs) have been proposed for managing trajectory data, an important kind of information for pervasive applications. To save storage capacity, a MOD generally stores simplified trajectories only. A simplified trajectory approximates the actual trajectory of the mobile object according to a certain accuracy bound. In order to minimize the costs of communicating position information between mobile object and MOD, the trajectory simplification should be performed by the mobile object. To assure that the MOD always has a valid simplified trajectory of the remote object, we propose the generic remote trajectory simplification protocol (GRTS) allowing for computing and managing a simplified trajectory in such a system in real-time. We show how to combine GRTS with existing line simplification algorithms for computing the simplified trajectory and analyze trade-offs between the different algorithms. Our evaluations show that GRTS outperforms the two existing approaches by a factor of two and more in terms of reduction efficiency. Moreover, on average, the reduction efficiency of GRTS is only 12\% worse compared to optimal offline simplification.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-06&engl=1}
}
@inproceedings {INPROC-2009-04,
   author = {Andreas Benzing and Klaus Herrmann and Boris Koldehofe and Kurt Rothermel},
   title = {{Identifying the Challenges in Reducing Latency in GSN using Predictors}},
   booktitle = {Workshops der Wissenschaftlichen Konferenz Kommunikation in Verteilten Systemen 2009 (WowKiVS 2009)},
   editor = {Tiziana Margaria and Julia Padberg and Gabriele Taentzer},
   address = {Kassel},
   publisher = {EASST},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   series = {Electronic Communications of the EASST},
   volume = {17},
   pages = {1--6},
   type = {Workshop Paper},
   month = {March},
   year = {2009},
   issn = {1863-2122},
   keywords = {Global Sensor Networks, Wireless Sensor Networks, Predictors},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-04/INPROC-2009-04.pdf},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Simulations based on real-time data continuously gathered from sensor networks all over the world have received growing attention due to the increasing availability of measured data. Furthermore, predictive techniques have been employed in the realm of such networks to reduce communication for energy-efficiency. However, research has focused on the high amounts of data transferred rather than latency requirements posed by the applications. We propose using predictors to supply data with low latency as required for accurate simulations. This paper investigates requirements for a successful combination of these concepts and discusses challenges that arise.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-04&engl=1}
}
@inproceedings {INPROC-2009-03,
   author = {Adnan Tariq and Boris Koldehofe and Gerald Koch and Kurt Rothermel},
   title = {{Providing Probabilistic Latency Bounds for Dynamic Publish/Subscribe Systems}},
   booktitle = {Proceedings of the 16th ITG/GI Conference on Kommunikation in Verteilten Systemen 2009 (KiVS 2009)},
   address = {Kassel, Germany},
   publisher = {Springer},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   type = {Conference Paper},
   month = {January},
   year = {2009},
   doi = {10.1007/978-3-540-92666-5_13},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-03/INPROC-2009-03.pdf,     http://dx.doi.org/10.1007/978-3-540-92666-5_13},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {In the context of large decentralized many-to-many communication systems it is impractical to provide realistic and hard bounds for certain QoS metrics including latency bounds. Nevertheless, many applications can yield better performance if such bounds hold with a given probability. In this paper we show how probabilistic latency bounds can be applied in the context of publish/subscribe. We present an algorithm for maintaining individual probabilistic latency bounds in a highly dynamic environment for a large number of subscribers. The algorithm consists of an adaptive dissemination algorithm as well as a cluster partitioning scheme. Together they ensure i) adaptation to the individual latency requirements of subscribers under dynamically changing system properties, and ii) scalability by determining appropriate clusters according to available publishers in the system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-03&engl=1}
}
@article {ART-2009-31,
   author = {Andreas Lachenmann and Klaus Herrmann and Kurt Rothermel and Pedro Jos{\'e} Marr{\'o}n},
   title = {{On Meeting Lifetime Goals and Providing Constant Application Quality}},
   journal = {Transactions on Sensor Networks},
   publisher = {ACM},
   volume = {5},
   number = {4},
   pages = {1--36},
   type = {Article in Journal},
   month = {November},
   year = {2009},
   doi = {10.1145/1614379.1614388},
   keywords = {Wireless sensor network; Coordination; Energy; Lifetime goal; Programming abstraction},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2009-31/ART-2009-31.pdf,     http://portal.acm.org/citation.cfm?id=1614388},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Most work in sensor networks tries to maximize network lifetime. However, for many applications the required lifetime is known in advance. Therefore, application quality should rather be maximized for that given time. Levels, the approach presented in this article, is a programming abstraction for energy-aware sensor network applications that helps to meet a user-defined lifetime goal by deactivating optional functionality. With this programming abstraction, the application developer defines so-called energy levels. Functionality in energy levels can be deactivated if the required lifetime cannot be met otherwise. The runtime system uses data about the energy consumption of different levels to compute an optimal level assignment that maximizes each node's quality for the time remaining. In addition, Levels includes a completely distributed coordination algorithm that balances energy level assignments and keeps the application quality of the network roughly constant over time. In this approach, each node computes its schedule based on those of its neighbors. As the evaluation shows, applications using Levels can accurately meet given lifetime goals with only small fluctuations in application quality. In addition, the runtime overhead both for computation and for communication is negligible.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-31&engl=1}
}
@article {ART-2009-27,
   author = {Matthias Gauger and Pedro Jos{\'e} Marr{\'o}n and Daniel Kauker and Kurt Rothermel},
   title = {{Low Overhead Assignment of Symbolic Coordinates in Sensor Networks}},
   journal = {Telecommunication Systems},
   publisher = {Springer},
   volume = {40},
   number = {3-4},
   pages = {117--128},
   type = {Article in Journal},
   month = {April},
   year = {2009},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2009-27/ART-2009-27.pdf,     http://www.springerlink.com/content/b651j055q0m40003/},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Approximate information on the location of nodes in a sensor network is essential to many types of sensor network applications and algorithms. In many cases, using symbolic coordinates is an attractive alternative to the use of geographic coordinates due to lower costs and lower requirements on the available location information during coordinate assignment. In this paper, we investigate different possible methods of assigning symbolic coordinates to sensor nodes. We present a method based on broadcasting coordinate messaging and filtering using sensor events. We show in the evaluation that this method allows a reliable assignment of symbolic coordinates while only generating a low overhead.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-27&engl=1}
}
@article {ART-2009-26,
   author = {Sven Schulz and Wolfgang Blochinger and Hannes Hannak},
   title = {{Capability-Aware Information Aggregation in Peer-to-Peer Grids}},
   journal = {Journal of Grid Computing},
   address = {Heidelberg},
   publisher = {Springer-Verlag},
   volume = {7},
   number = {2},
   pages = {135--167},
   type = {Article in Journal},
   month = {January},
   year = {2009},
   doi = {10.1007/s10723-008-9114-z},
   language = {English},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.2 Network Protocols,     C.2.4 Distributed Systems,     C.2.6 Internetworking},
   department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Distributed Systems},
   abstract = {Information aggregation is the process of summarizing information across the nodes of a distributed system. We present a hierarchical information aggregation system tailored for Peer-to-Peer Grids which typically exhibit a high degree of volatility and heterogeneity of resources. Aggregation is performed in a scalable yet efficient way by merging data along the edges of a logical self-healing tree with each inner node providing a summary view of the information delivered by the nodes of the corresponding subtree. We describe different tree management methods suitable for high-efficiency and high-scalability scenarios that take host capability and stability diversity into account to attenuate the impact of slow and/or unstable hosts. We propose an architecture covering all three phases of the aggregation process: Data gathering through a highly extensible sensing framework, data aggregation using reusable, fully isolated reduction networks, and application-sensitive data delivery using a broad range of propagation strategies. Our solution combines the advantages of approaches based on Distributed Hash Tables (DHTs) (i.e., load balancing and self-maintenance) and hierarchical approaches (i.e., respecting administrative boundaries and resource limitations). Our approach is integrated into our Peer-to-Peer Grid platform Cohesion. We substantiate its effectiveness through performance measurements and demonstrate its applicability through a graphical monitoring solution leveraging our aggregation system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-26&engl=1}
}
@article {ART-2009-15,
   author = {Jorge A. Briones and Boris Koldehofe and Kurt Rothermel},
   title = {{SPINE : Adaptive Publish/Subscribe for Wireless Mesh Networks}},
   journal = {Studia Informatika Universalis},
   publisher = {Hermann},
   volume = {7},
   number = {3},
   pages = {320--353},
   type = {Article in Journal},
   month = {October},
   year = {2009},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2009-15/ART-2009-15.pdf},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Application deployment on Wireless Mesh Networks (WMNs) is a challenging issue. First it requires communication abstractions that allow for interoperation with Internet applications and second the offered solution should be sensitive to the available resources in the underlying network. Loosely coupled communication abstractions, like publish/subscribe, promote interoperability, but unfortunately are typically implemented at the application layer without considering the available resources at the underlay imposing a significant degradation of application performance in the setting of Wireless Mesh Networks. In this paper we present SPINE, a content-based publish/subscribe system, which considers the particular challenges of deploying application-level services in Wireless Mesh Networks. SPINE is designed to reduce the overhead which stems from both publications and reconfigurations, to cope with the inherent capacity limitations on communication links as well as with mobility of the wireless mesh-clients. We demonstrate the effectiveness of SPINE by comparison with traditional approaches in implementing content-based publish/subscribe.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-15&engl=1}
}
@article {ART-2009-14,
   author = {Bj{\"o}rn Schilling and Udo Pletat and Kurt Rothermel},
   title = {{Event Correlation in Heterogeneous Environments}},
   journal = {it --- Information Technology -- Complex Event Processing},
   publisher = {Oldenbourg Wissenschaftsverlag GmbH},
   pages = {270--275},
   type = {Article in Journal},
   month = {October},
   year = {2009},
   language = {German},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2009-14/ART-2009-14.pdf,     http://www.oldenbourg-wissenschaftsverlag.de/olb/de/1.c.1495462.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {The condition and location of a business item are of central interest in many business applications such as supply chain management, manufacturing, or ensuring safety and security for people and goods. Recent advances in sensor technology allow to transmit condition and location information about goods, materials and people to enterprise software systems in real-time. In this context, complex event processing is an emerging software technology for detecting business-relevant situations in streams of events and for providing these detected situations to various business processes. While currently complex event processing systems are mostly deployed within a single business domain at a limited scale, the cooperative nature of business applications gives reason that complex event processing will soon address multiple business domains and involve an increasingly large number of business events. In order to ensure interoperability as well as efficient utilization of processing and network capability, we motivate the need for heterogeneous correlation technology in the context of business applications. In this article we give an overview of the project $\backslash$emph{Distributed heterogeneous event processing} (DHEP) involving the IBM B{\"o}blingen lab and the Universit{\"a}t Stuttgart. In particular, we highlight how business applications can benefit from using event correlation technology in heterogeneous environments. The key aspects of the project address the deployment of collections of event correlation rules to a network of heterogeneous event correlation engines. We give an overview of challenges and possible solutions for the dynamic configuration of such environments and present our architecture which supports network-wide cooperation between different correlation engines.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-14&engl=1}
}
@inbook {INBOOK-2009-03,
   author = {Stephan Schuhmann and Klaus Herrmann and Kurt Rothermel},
   title = {{Selbstkonfiguration adaptiver Anwendungen in ubiquit{\"a}ren Systemen}},
   series = {Basissoftware f{\"u}r drahtlose Ad-hoc- und Sensornetze},
   address = {Karlsruhe},
   publisher = {Universit{\"a}tsverlag Karlsruhe},
   pages = {67--84},
   type = {Article in Book},
   month = {March},
   year = {2009},
   language = {German},
   cr-category = {C.2.4 Distributed Systems},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
   abstract = {Das Forschungsgebiet des Pervasive Computing hat in den letzten Jahren zunehmend an Bedeutung gewonnen. Hauptziel ist die Integration von Computertechnologie in Alltagsgegenst{\"a}nde und die Nutzung dieser hierdurch elektronisch angereicherten Benutzerumgebung, ihrer Ger{\"a}te und Dienste f{\"u}r die Ausf{\"u}hrung von Anwendungen. Daf{\"u}r m{\"u}ssen diese Anwendungen in die Lage versetzt werden, sich dynamisch an wechselnde Umgebungen anzupassen, beispielsweise durch Verlagerung ihrer Funktionalit{\"a}t zwischen Ger{\"a}ten. Bedingt durch dynamische Umgebungen, Nutzermobilit{\"a}t sowie drahtlose Kommunikationstechnologien ist die Entwicklung von Anwendungen f{\"u}r Pervasive Computing Umgebungen hochkomplex. Daher wurden in diesem Projekt grundlegende Konzepte und Algorithmen entwickelt, um eine automatisierte Nutzerunterst{\"u}tzung in diesen Umgebungen zu erm{\"o}glichen. Hierbei lag der Schwerpunkt auf der Entwicklung von Algorithmen zur Selbstkonfiguration von Anwendungen mittels automatisierter Komposition und Adaption. Neben dynamischen, homogenen Ad Hoc Umgebungen sollten au{\ss}erdem heterogene Umgebungen, in denen zus{\"a}tzlich ressourcenstarke Infrastrukturger{\"a}te vorhanden sind, ber{\"u}cksichtigt werden, um eine effiziente Ausf{\"u}hrung von Konfigurationen und Adaptionen auch in solchen Umgebungen zu erm{\"o}glichen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-03&engl=1}
}
@proceedings {PROC-2009-02,
   editor = {Kurt Rothermel and Dieter Fritsch and Wolfgang Blochinger and Frank D{\"u}rr},
   title = {{Quality of Context -- Proceedings of the First International Workshop on Quality of Context (QuaCon 2009)}},
   address = {Stuttgart, Germany},
   publisher = {Springer},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   series = {Lecture Notes in Computer Science},
   volume = {5786},
   pages = {184},
   type = {Proceedings},
   month = {June},
   year = {2009},
   isbn = {978-3-642-04558-5},
   keywords = {context-aware systems; location-based services; quality; context},
   language = {English},
   cr-category = {H.2.8 Database Applications,     H.3.3 Information Search and Retrieval,     H.3.5 Online Information Services},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;     Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp);     University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;     University of Stuttgart, Institute for Natural Language Processing},
   abstract = {Advances in sensor technology, wireless communication, and mobile devices lead to the proliferation of sensors in our physical environment. At the same time detailed digital models of buildings, towns, or even the globe become widely available. Integrating the huge amount of sensor data into spatial models results in highly dynamic models of the real world, often called context models. A wide range of applications can substantially benefit from context models. However, context data are inherently associated with uncertainty. In general, quality of context information has to be taken into account by both context management and applications. For example, the accuracy, completeness, and trustworthiness of spatial context information such as street or building data are very important for navigation and guidance systems. QuaCon 2009 was the first international scientific meeting that specifically focused on the different aspects of quality of context data. Research in context management and, in particular, context quality, requires an interdisciplinary approach. Therefore, the QuaCon workshop aimed to bring together researchers from various fields to discuss approaches to context quality and to make a consolidated contribution toward an integrated way of treating this topic. We received 19 high-quality paper submissions by researchers from Europe, USA, and Asia. The International Program Committee selected 11 papers for presentation at the workshop. Additionally, five invited contributions by internationally renowned experts in the field were included in the workshop program. The presentations at the workshop showed many facets of quality of context from different research fields including context data management, spatial models, context reasoning, privacy, and system frameworks. The lively discussions underlined the great interest in this topic and in particular led to a deeper understanding of the relations between the various aspects of quality of context. The success of QuaCon 2009 was the result of a team effort. We are grateful to the members of the Program Committee and the external reviewers for their thorough and timely reviews as well as to the authors for their high-quality submissions and interesting talks. We would like to extend special thanks to our invited speakers for their excellent and inspiring keynotes. Finally, we wish to thank all persons involved in the organization of the QuaCon 2009 workshop who did really a great job.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2009-02&engl=1}
}
@book {BOOK-2009-02,
   editor = {Alejandro Buchmann and Boris Koldehofe},
   title = {{IT-Information Technology}},
   publisher = {Oldenbourg Verlag},
   volume = {51},
   number = {2009},
   pages = {80},
   type = {Book},
   month = {October},
   year = {2009},
   doi = {10.1524/itit.2009.9058},
   language = {English},
   cr-category = {C.2.4 Distributed Systems},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;     University of Stuttgart, Institute of Parallel and Distributed Systems},
   abstract = {This special issue on Complex Event Processing (CEP) intends to provide an insight on the applications and principles as well as the evolution of CEP. The selected articles from International and German researchers illustrate current trends and challenges in designing powerful, scalable as well as secure event processing systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2009-02&engl=1}
}
 
To the top of the page