Institut für Parallele und Verteilte Systeme (IPVS)

Publikationen

Eine Übersicht der Publikationen des Instituts für Parallele und Verteilte Systeme

Publikationen AS: Bibliographie 2008 BibTeX

 
@inproceedings {INPROC-2008-91,
   author = {Lu Jing and Mitschang Bernhard},
   title = {{An XQuery-based Trigger Service to Bring Consistency Management to Data Integration Systems}},
   booktitle = {10th International Conference on Information Integration and Web-based Applications \& Services (iiWAS2008). Linz, Austria, November 24 - 26, 2008.},
   address = {Linz, Austria},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2008},
   isbn = {978-1-60558-349-5},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays XML-based data integration systems are accepted as data service providers on the web. In order to make such a data integration system fully equipped with data manipulation capabilities, programming frameworks which support update at the integration level are being developed. When the user is permitted to submit updates, it is necessary to establish the best possible data consistency in the whole data integration system. To that extend, we present an approach based on an XQuery trigger service. We define an XQuery trigger model together with its semantics. We report on the integration of the XQuery trigger service into the overall architecture and discuss details of the execution model. Experiments show that data consistency is enforced easily, efficiently and conveniently at the global level.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-91&engl=0}
}
@inproceedings {INPROC-2008-86,
   author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{An Annotation Approach for the Matching of Process Variables and Operational Business Data Models}},
   booktitle = {Proc. of the 21st International Conference on Computer Applications in Industry and Engineering (CAINE 2008)},
   address = {Honolulu, USA},
   publisher = {The International Society for Computers and Their Applications},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2008},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.12 Software Engineering Interoperability,     H.4.1 Office Automation,     H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient adaptation to new situations of a company’s business and its business processes plays an important role for achieving advantages in competition to other companies. For an optimization of business processes, a profound analysis of all relevant business data in the company is necessary. Analyses typically specialize either on process analysis or on data warehousing of operational business data. However, to achieve a significantly more detailed analysis in order to fully optimize a company’s business, a consolidation of all major business data sources is indispensable. This paper introduces an approach that allows consolidating process variables and operational data models in a semi-automatic manner. In order to do this, a semantic annotation is applied. In this paper, we focus on an ontology-based annotation of the operational data in data warehouses, show how it is realized in a tool and discuss its general usability in other areas.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-86&engl=0}
}
@inproceedings {INPROC-2008-82,
   author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
   title = {{Email Archiving and Discovery as a Service}},
   booktitle = {Intelligent Distributed Computing, Systems and Applications; Proceedings of the 2nd International Symposium on Intelligent Distributed Computing: IDC 2008; Catania, Italy},
   editor = {Costin Badica and Giuseppe Mangioni and Vincenza Carchiolo and Dumitru Dan Burdescu},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Studies in Computational Intelligence},
   volume = {162},
   pages = {197--206},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   isbn = {978-3-540-85256-8},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.3.2 Information Storage,     H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Corporate governance and legislative regulations are forcing companies to extend their IT infrastructure by Email Archive and Discovery (EAD) systems for compliance reasons. Praxis shows that every installation is di??erent from another; not only in terms of the execution infrastructure, but also in terms of e.g. document and archiving procedures that map a company’s own business rules. As a consequence, EAD systems have to be highly customizable to their intended usages. For this purpose, we propose a service-oriented approach at various levels of detail that, on one hand, allows for describing EAD properties at the abstract (service) level and, on the other hand, supports the appropriate mapping of these services to the existing execution infrastructure. In this paper, we focus on the development and (architectural) design of an EAD system, which is well suited to fulfill these requirements. On the long run, we consider this solution as an important step on the way to an e??ective distributed and scalable approach, which, as we think, can be achieved by appropriate mechanisms of automatic workload management and dynamic provisioning of EAD services based on e.g. grid technology.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-82&engl=0}
}
@inproceedings {INPROC-2008-81,
   author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
   title = {{Towards the Design of a Scalable Email Archiving and Discovery Solution}},
   booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
   editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
   publisher = {Sptringer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   pages = {305--320},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.3.2 Information Storage,     H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper we propose a novel approach to specialize a general purpose Enterprise Content Management (ECM) System into an Email Archiving and Discovery (EAD) System. The magnitude and range of compliance risks associated with the management of EAD is driving investment in the development of more effective and efficient approaches to support regulatory compliance, legal discovery and content life-cycle needs. Companies must recognize and address requirements like legal compliance, electronic discovery, and document retention management. What is needed today are EAD systems capable to process very high message ingest rates, support distributed full text indexing, and allow forensic search such to support litigation cases. All this must be provided at lowest cost with respect to archive management and administration. In our approach we introduce a virtualized ECM repository interface where the key content repository components are wrapped into a set of tightly coupled Grid service entities, such to achieve scale-out on a cluster of commodity blade hardware that is automatically configured and dynamically provisioned. By doing so we believe, we can leverage the strength of Relational Database Management Systems and Full Text Indexes in a managed clustered environment with minimal operational overhead.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-81&engl=0}
}
@inproceedings {INPROC-2008-51,
   author = {Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Preprocessing Position Data of Mobile Objects}},
   booktitle = {Proceedings of the 9th International Conference on Mobile Data Management (MDM'08); Beijing, China, April 27-30, 2008.},
   publisher = {IEEE computer society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2008},
   isbn = {978-0-7695-3154-0},
   language = {Deutsch},
   cr-category = {H.2.8 Database Applications,     G.1.2 Numerical Analysis Approximation},
   contact = {nicola.hoenle@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {We present the design and implementation of a component for the preprocessing of position data taken from moving objects. The movement of mobile objects is represented by piecewise functions over time that approximate the real object movement and significantly reduce the initial data volume such that efficient storage and analysis of object trajectories can be achieved. The maximal acceptable deviation---an input parameter of our algorithms---of the approximations also includes the uncertainty of the position sensor measurements. We analyze and compare five different lossy preprocessing methods. Our results clearly indicate that even with simple approaches, a more than sufficient overall performance can be achieved.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-51&engl=0}
}
@inproceedings {INPROC-2008-50,
   author = {Andreas Brodt and Daniela Nicklas and Sailesh Sathish and Bernhard Mitschang},
   title = {{Context-Aware Mashups for Mobile Devices}},
   booktitle = {Web Information Systems Engineering – WISE 2008 9th International Conference on Web Information Systems Engineering, Auckland, New Zealand, September 1-3, 2008, Proceedings},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2008},
   keywords = {mashup, location-based services, Delivery Context Client Interfaces, DCCI, AJAX, context provisioning},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.12 Software Engineering Interoperability,     H.5.1 Multimedia Information Systems,     H.5.4 Hypertext/Hypermedia,     H.2.5 Heterogeneous Databases},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-50/INPROC-2008-50.pdf},
   contact = {andreas.brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {With the Web 2.0 trend and its participation of end-users more and more data and information services are online accessible, such as web sites, Wikis, or web services. So-called mashups---web applications that integrate data from more than one source into an integrated service---can be easily realized using scripting languages. Also, mobile devices are increasingly powerful, have ubiquitous access to the Web and feature local sensors, such as GPS. Thus, mobile applications can adapt to the mobile user's current situation. We examine how context-aware mashups can be created. One challenge is the provisioning of context data to the mobile application. For this, we discuss different ways to integrate context data, such as the user's position, into web applications. Moreover, we assess different data formats and the overall performance. Finally, we present the Telar Mashup Platform, a client-server solution for location-based mashups for mobile devices such as the Nokia N810 Internet Tablet.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-50&engl=0}
}
@inproceedings {INPROC-2008-49,
   author = {Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas and Bernhard Mitschang},
   title = {{Reference Management in a Loosely Coupled, Distributed Information System}},
   booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
   editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   language = {Englisch},
   cr-category = {E.2 Data Storage Representations,     H.2.2 Database Management Physical Design,     H.2.4 Database Management Systems},
   contact = {Matthias Grossmann matthias.grossmann@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {References between objects in loosely coupled distributed information systems pose a problem. On the one hand, one tries to avoid referential inconsistencies like, e.g., dangling links in the WWW. On the other hand, using strict constraints as in databases may restrict the data providers severely. We present the solution to this problem that we developed for the Nexus system. The approach tolerates referential inconsistencies in the data while providing consistent query answers to users. For traversing references, we present a concept based on return references. This concept is especially suitable for infrequent object migrations and provides a good query performance. For scenarios where object migrations are frequent, we developed an alternative concept based on a distributed hash table.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-49&engl=0}
}
@inproceedings {INPROC-2008-46,
   author = {Thorsten Scheibler and Ralph Mietzner and Frank Leymann},
   title = {{EAI as a Service - Combining the Power of Executable EAI Patterns and SaaS}},
   booktitle = {International EDOC Conference (EDOC 2008)},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   language = {Deutsch},
   cr-category = {H.4.1 Office Automation},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {One of the predominant problems IT companies are facing today is Enterprise Application Integration (EAI). Most of the infrastructures built to tackle integration issues are proprietary because no standards exist for how to model, develop, and actually execute integration scenarios. Moreover, those systems are built on top of Pipes-and-Filters architecture that offers only limited capabilities for productive environments. As Service-oriented architectures (SOA) can be seen as de-facto standard for building enterprise systems today, including integration systems, there is a need to utilize those systems for executing integration scenarios. Business processes in an SOA environment can be used to integrate various applications to form an integration solution. Thus the application domain of BPM is significantly extended. In this paper, we introduce how integration solutions can be executed on BPM infrastructures. To demonstrate this we introduce a tool supporting integration architects to design integration scenarios and execute these solutions automatically.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-46&engl=0}
}
@inproceedings {INPROC-2008-32,
   author = {Sylvia Radesch{\"u}tz and Florian Niedermann and Bernhard Mitschang},
   title = {{Ein Annotationsansatz zur Unterst{\"u}tzung einer ganzheitlichen Gesch{\"a}ftsanalyse}},
   booktitle = {Proc. of the 5th Conference on Data Warehousing: Synergien durch Integration und Informationslogistik. (DW2008); St. Gallen, 27.-28. Oktober, 2008},
   publisher = {Lecture Notes in Informatics (LNI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--19},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2008},
   language = {Deutsch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.12 Software Engineering Interoperability,     H.4.1 Office Automation,     H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Die Verbesserung der Gesch{\"a}ftsprozesse in einem Unternehmen spielt eine immer wichtigere Rolle, um Wettbewerbsvorteile gegen{\"u}ber der Konkurrenz zu erlangen. Daf{\"u}r ist eine umfassende Analyse n{\"o}tig {\"u}ber alle verf{\"u}gbaren Informationen in diesem Unternehmen. Aktuelle Verfahren konzentrieren sich entweder auf die Analyse von Prozessdaten oder die Analyse von operativen Anwendungsdaten, die typischerweise in einem Data Warehouse vorliegen. F{\"u}r die Ausf{\"u}hrung einer tiefergehenden Analyse ist es jedoch notwendig, Prozessdaten und operative Daten zu verkn{\"u}pfen. Dieser Beitrag stellt zwei Ans{\"a}tze vor, welche es erm{\"o}glichen, diese Daten effektiv und flexibel zusammenzuf{\"u}hren. Der erste Ansatz stellt eine direkte Verkn{\"u}pfung von Entit{\"a}ten aus den Prozessdaten mit Entit{\"a}ten aus den operativen Daten her. Die Verkn{\"u}pfung im zweiten Ansatz beruht hingegen auf der semantischen Beschreibung der Daten. Beide Methoden sind in einem Werkzeug realisiert.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-32&engl=0}
}
@inproceedings {INPROC-2008-21,
   author = {Andreas Brodt and Daniela Nicklas},
   title = {{The TELAR mobile mashup platform for Nokia Internet Tablets}},
   booktitle = {EDBT '08: Proceedings of the 11th international conference on Extending database technology; Nantes, France, March 25-29, 2008},
   editor = {ACM},
   publisher = {ACM New York, NY, USA},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {ACM International Conference Proceeding Series},
   volume = {261},
   pages = {700--704},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2008},
   doi = {10.1145/1353343.1353429},
   isbn = {978-1-59593-926-5},
   keywords = {location-based services, GPS, DCCI, mashup},
   language = {Englisch},
   cr-category = {H.2.5 Heterogeneous Databases,     H.5.1 Multimedia Information Systems,     H.5.4 Hypertext/Hypermedia},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-21/INPROC-2008-21.pdf},
   contact = {andreas.brodt@ipvs.uni-stuttgart.de, daniela.nicklas@uni-oldenburg.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {With the Web 2.0 trend and its participation of end-users more and more data and information services are online accessible, such as web sites, Wikis, or web services. The integration of this plethora of information is taken over by the community: so-called Mashups---web applications that combine data from more than one source into an integrated service---spring up like mushrooms, because they can be easily realized using script languages and web development platforms. Another trend is that mobile devices that get more and more powerful have ubiquitous access to the Web. Local sensors (such as GPS) can easily be connected to these devices. Thus, mobile applications can adapt to the current situation of the user, which can change frequently because of his or her mobility. In this demonstration, we present the Telar Mashup platform, a client-server solution that facilitates the creation of adaptive Mashups for mobile devices such as the Nokia Internet Tablets. On the server side, wrappers allow the integration of data from web-based services. On the client side, a simple implementation of the DCCI specification is used to integrate context information of local sensors into the mobile Web browser, which adapts the Mashup to the user's current location. We show an adaptive, mobile Mashup on the Nokia N810 Internet Tablet.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-21&engl=0}
}
@inproceedings {INPROC-2008-18,
   author = {Daniela Nicklas and Matthias Grossmann and Jorge Minguez and Mattias Wieland},
   title = {{Adding High-level Reasoning to Efficient Low-level Context Management: a Hybrid Approach}},
   booktitle = {Proceedings of the Sixth Annual IEEE Conference on Pervasive Computing and Communications : PerCom'08 Workshops, in 5th IEEE PerCom Workshop on Context Modeling and Reasoning; Hongkong, 17.-21. March 2008},
   address = {Los Alamitos - California, Washington, Tokyo},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {IEEE Computer Society},
   volume = {Order Number E3113},
   pages = {447--452},
   type = {Workshop-Beitrag},
   month = {M{\"a}rz},
   year = {2008},
   isbn = {0-7695-3113-X},
   isbn = {978-0-7695-3113-7},
   keywords = {higher level context; pervasive computing; ubiquitous computing; context-aware applications},
   language = {Englisch},
   cr-category = {D.1.6 Logic Programming,     D.2.11 Software Engineering Software Architectures,     H.2.8 Database Applications},
   ee = {http://www.nexus.uni-stuttgart.de/COMOREA,     http://www.nexus.uni-stuttgart.de},
   contact = {Daniela Nicklas dnicklas@acm.org},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Rule-based context reasoning is an expressive way to define situations, which are crucial for the implementation of many context-aware applications. Along the scenario of the Conference Guard application we show how this reasoning can be done both by leveraging an efficient context management (realized by the Nexus platform) and by a generic rule based service. We present the architecture of the Nexus semantic service, which uses the underlying definition of a low-level context model (the Nexus Augmented World Model) to carry out rules given in first order logic. We realize this service in a straight forward manner by using state-of-the-art software components (the Jena 2 framework) and evaluate the number of instances this approach can handle. Our first experiences show that a pre-selection of instances is necessary if the semantic service should work on a large-scale context model.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-18&engl=0}
}
@inproceedings {INPROC-2008-150,
   author = {Laura Kassner and Vivi Nastase and Michael Strube},
   title = {{Acquiring a Taxonomy from the German Wikipedia}},
   booktitle = {Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)},
   editor = {Nicoletta Calzolari},
   publisher = {European Language Resources Association (ELRA)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2008},
   isbn = {2-9517408-4-0},
   keywords = {taxonomy; ontology; taxonomy generation; ontology generation; semantic network; Wikipedia; WordNet; GermaNet; multilinguality},
   language = {Englisch},
   cr-category = {I.2.4 Knowledge Representation Formalisms and Methods,     I.2.7 Natural Language Processing},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-150/INPROC-2008-150.pdf,     http://www.lrec-conf.org/proceedings/lrec2008/},
   contact = {laura.kassner@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {This paper presents the process of acquiring a large, domain independent, taxonomy from the German Wikipedia. We build upon a previously implemented platform that extracts a semantic network and taxonomy from the English version of theWikipedia. We describe two accomplishments of our work: the semantic network for the German language in which isa links are identifed and annotated, and an expansion of the platform for easy adaptation for a new language. We identify the platform's strengths and shortcomings, which stem from the scarcity of free processing resources for languages other than English. We show that the taxonomy induction process is highly reliable - evaluated against the German version of WordNet, GermaNet, the resource obtained shows an accuracy of 83.34\%.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-150&engl=0}
}
@inproceedings {INPROC-2008-112,
   author = {Steffen Volz and Daniela Nicklas and Matthias Grossmann and Matthias Wieland},
   title = {{On creating a spatial integration schema for global, context-aware applications}},
   booktitle = {Proceedings of the X Brazilian Symposium on GeoInformatics (GeoInfo2008)},
   publisher = {INPE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2008},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The world of spatial data is split into individual data source islands that have different thematic or spatial focuses. When attempting to integrate those data sources, severe challenges arise, since for most GIS application domains a spatial integration schema does not exist. This is also true for the newly emerging domain of mobile, context-aware applications. Since the users of these systems are mobile, transborder access to spatial data or context models is crucial for global deployment. The basis for this work is the Nexus Augmented World Schema, a conceptual schema that serves as an integration standard for autonomous spatial context servers. This paper analyzes some major spatial data standards, especially with respect to the requirements of a spatial integration schema for context-aware applications and illustrates the Nexus approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-112&engl=0}
}
@inproceedings {INPROC-2008-05,
   author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang and Frank Leymann},
   title = {{Matching of Process Data and Operational Data for a Deep Business Analysis}},
   booktitle = {Proc. of the 4th International Conference on Interoperability for Enterprise Software and Applications (I-ESA 2008), Berlin, M{\"a}rz 26-28, 2008.},
   address = {London},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {171--182},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2008},
   doi = {10.1007/978-1-84800-221-0_14},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   ee = {http://www.aidima.es/iesa2008/},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient adaptation to new situations of a company's business and its business processes plays an important role for achieving advantages in competition to other companies. For an optimization of processes, a profound analysis of all relevant information in the company is necessary. Analyses typically specialize either on process analysis or on data warehousing of operational data. A consolidation of business data is needed, i.e. of internal process execution data and external operational data, in order to allow for interoperability between these major business data sources to analyze and optimize processes in a much more comprehensive scope. This paper introduces a framework that offers various data descriptions to reach an efficient matching of process data and operational data, and shows its enhancement compared to separate analyses and other matching approaches.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-05&engl=0}
}
@inproceedings {INPROC-2008-04,
   author = {Lu Jing and Mitschang Bernhard},
   title = {{A Constraint-Aware Query Optimizer for Web-based Data Integration}},
   booktitle = {Proceedings of the Fourth International Conference on Web Information Systems and Technologies, May 4-7, 2008.},
   address = {Funchal, Madeira, Portugal},
   publisher = {Conference Proceedings},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2008},
   language = {Englisch},
   cr-category = {H.3.5 Online Information Services},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Web has brought forth opportunities to connect information sources across all types of boundaries. The information sources include databases, XML documents, and other unstructured sources. Data integration is to combine data residing at different sources and providing the user with a unified view of these data. Currently users are expecting more efficient services from such data integration systems. Indeed, querying multiple data sources scattered on the web encounters many barriers for achieving efficiency due to the heterogeneity and autonomy of the information sources. This paper describes a query optimizer, which uses constraints to semantically optimize the queries. The optimizer first translates constraints from data sources into constraints expressed at the global level, e.g., in the common schema, and stores them in the constraint repository, again, at the global level. Then the optimizer can use semantic query optimization technologies including detection of empty results, join elimination, and predicate elimination to generate a more efficient but semantically equivalent query for the user. The optmizer is published as a web service and can be invoked by many data integration systems. We carry out experiments using our semantic query optimizer and first results show that performance can be greatly improved.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-04&engl=0}
}
@inproceedings {INPROC-2008-02,
   author = {Marko Vrhovnik and Holger Schwarz and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{An Overview of SQL Support in Workflow Products}},
   booktitle = {Proc. of the 24th International Conference on Data Engineering (ICDE 2008), Canc{\'u}n, M{\'e}xico, April 7-12, 2008},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2008},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Over the last years, data management products as well as workflow products have established themselves as indispensable building blocks for advanced IT systems in almost all application areas. Recently, many vendors have created innovative product extensions that combine service-oriented frameworks with powerful workflow and data management capabilities. In this paper, we discuss several workflow products from different vendors with a specific focus on their SQL support. We provide a comparison based on a set of important data management patterns and illustrate the characteristics of various approaches by means of a running example.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-02&engl=0}
}
@inproceedings {INPROC-2008-01,
   author = {Marko Vrhovnik and Holger Schwarz and Stephan Ewen and Oliver Suhre},
   title = {{PGM/F: A Framework for the Optimization of Data Processing in Business Processes}},
   booktitle = {Proc. of the 24th International Conference on Data Engineering (ICDE 2008), Canc{\'u}n, M{\'e}xico, April 7-12, 2008},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2008},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Business process execution languages like BPEL are broadly adopted by industry to integrate the heterogeneous applications and data store of an enterprise. Leading vendors provide extensions to BPEL that allow for a tight integration of data processing capabilities into the process logic. Business processes exploiting these capabilities show a remarkable potential for optimization. In this demonstration, we present PGMOF, a framework for the optimization of data processing in such business processes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-01&engl=0}
}
@article {ART-2008-16,
   author = {Stefanie Springer and Severin Beucker and Daniel Heubach and Fabian Kaiser and Dierk-Oliver Kiehne and Mih{\'a}ly Jakob},
   title = {{Mit Softwaretools zu nachhaltigen Produkt- und Serviceinnovationen}},
   journal = {{\"O}kologisches Wirtschaften},
   address = {M{\"u}nchen},
   publisher = {oekom verlag},
   pages = {43--46},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2008},
   issn = {1430-8800},
   language = {Deutsch},
   cr-category = {H.3.3 Information Search and Retrieval,     D.2.3 Software Engineering Coding Tools and Techniques},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Wie k{\"o}nnen Unternehmen durch Produktinnovationen einen Beitrag dazu leisten, dass das Leitbild einer nachhaltigen Entwicklung umgesetzt wird, und welche Potenziale bietet dabei der Einsatz des Internet? Das waren die zentralen Fragen, die in dem von 2003 bis 2007 vom BMBF gef{\"o}rderten Projekt nova-net gestellt wurden.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2008-16&engl=0}
}
@article {ART-2008-06,
   author = {Daniel Heubach and Severin Beucker and Fabian Kaiser and Mih{\'a}ly Jakob and Dierk-Oliver Kiehne},
   title = {{Fr{\"u}he Innovationsphasen - Informationsgewinnung durch delphigest{\"u}tztes Szenariomanagement und Expertensuche}},
   journal = {ZWF Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb},
   publisher = {Hanser-Verlag},
   pages = {260--264},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2008},
   issn = {0947-0085},
   keywords = {fr{\"u}he Innovationsphasen; delphigest{\"u}tztes Szenario-Management; Expertensuche; semanti-sche Modelle; Internet; Technologiemanagement},
   language = {Deutsch},
   cr-category = {H.3 Information Storage and Retrieval,     H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Fr{\"u}he Innovationsphasen stellen f{\"u}r Unternehmen eine besondere Herausforderung dar, da in ihnen Orientierungswissen geschaffen und strukturiert werden muss, das bei einer Entscheidungsfindung {\"u}ber den Einstieg in ein Technologie- oder Marktfeld oder auch ein bestimmtes Produktsegment unterst{\"u}tzt. Daf{\"u}r k{\"o}nnen verschiedene Informationsquellen wie z.B. das Internet und externe Experten genutzt werden. Hier setzt das Forschungsprojekt nova-net an. Es stellt die zwei Methoden des delphigest{\"u}tzten Szenario-Management und der Expertensuche mit den dazugeh{\"o}rigen Softwaretools SEMAFOR und EXPOSE zur Verf{\"u}gung, die den Arbeitsaufwand der Informationsbeschaffung und Strukturierung deutlich reduzieren und die in ihrer Kombination eine gute Unterst{\"u}tzung bei der Entscheidungsfindung in fr{\"u}hen Innovationsphasen liefern.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2008-06&engl=0}
}
@inbook {INBOOK-2008-16,
   author = {Anne-Sophie Br{\"u}ggen and Sarah Jessen and Laura Kassner and Thorsten Liebelt and Yvonne Schweizer and Annika Weschler},
   title = {{Imagination}},
   series = {Kognition und Verhalten: Theory of Mind, Zeit, Imagination, Vergessen, Altruismus},
   address = {M{\"u}nster},
   publisher = {LIT-Verlag},
   series = {Interdisziplin{\"a}re Forschungsarbeiten am FORUM SCIENTIARUM},
   volume = {1},
   pages = {85--128},
   type = {Beitrag in Buch},
   month = {Januar},
   year = {2008},
   isbn = {978-3-8258-1826-5},
   keywords = {Imagination; Interdisziplin{\"a}re Forschung; K{\"u}nstliche Intelligenz},
   language = {Deutsch},
   cr-category = {A.m General Literature, Miscellaneous},
   ee = {http://www.forum-scientiarum.uni-tuebingen.de/studium/studienkolleg/archiv/studienkolleg0607.html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {'Die F{\"a}higkeit des Menschen zu denken ist Thema der Geisteswissenschaften, aber auch der Psychologie, Anthropologie und zunehmend der Neurowissenschaften. Dieser Sammelband, in dem die Abschlussarbeiten des ersten Jahrgangs des Studienkollegs am Forum Scientiarum der Universit{\"a}t T{\"u}bingen dokumentiert werden, besch{\"a}ftigt sich mit einigen ausgew{\"a}hlten Themen im Zusammenhang der biologischen und kulturellen Grundlagen menschlichen Denkens.' (Autorenreferat). Inhaltsverzeichnis: Judith Benz-Schwarzburg, Linda Braun, Alexander Ecker, Tobias Kobitzsch, Christian L{\"u}cking: Theory of Mind bei Mensch und Tier (I-50); Nina Baier, Christoph Paret, Sarah Wiethoff: Zeit und Zeitbewusstsein (51-84); Anne-Sophie Br{\"u}ggen, Sarah Jessen, Laura Kassner, Thorsten Liebelt, Yvonne Schweizer, Annika Weschler: Imagination (85-128); Rainer Engelken, Kathleen Hildebrand, Nikolaus Schmitz, Silke Wagenh{\"a}user: Vergessen als eine Grundlage menschlichen Denkens (129-176); Christian G{\"a}ssler, Ralf J. Geretshauser, Bilal Hawa, Steffen Kudella, Sebastian Sehr, Nora Umbach: Altruismus (177-211).},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-16&engl=0}
}
@inbook {INBOOK-2008-02,
   author = {Clemens Dorda and Uwe Heinkel and Bernhard Mitschang},
   title = {{A Concept for Applying Model-driven Engineering to Application Integration}},
   series = {Challenges In Information Technology Management},
   address = {Singapur},
   publisher = {World Scientific},
   pages = {168--174},
   type = {Beitrag in Buch},
   month = {Mai},
   year = {2008},
   isbn = {139789812819062},
   isbn = {109812819061},
   keywords = {Enterprise Application Integration, Model-Driven Engineering, Software Lifecycle, EAI, MDA, MDE, UML, Unified Modeling Language},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.13 Software Engineering Reusable Software,     I.6.5 Model Development},
   contact = {Clemens.Dorda@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Modern software for Enterprise Application Integration (EAI) provides tools for modeling integration scenarios. A drawback of these tools is the lack of functionality for exchanging or integrating models from different EAI products. Consequently, developers are only partially able to describe real heterogeneous IT environments. Our goal is to avoid the creation of these so-called ``integration islands''. For that purpose we present an approach which introduces an abstract view by technology-independent and multivendor-capable modeling for both development and maintenance. With this approach, we propose a toolset- and repository-based refinement of the abstract view to automate implementation with real products and deployment on real platforms.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-02&engl=0}
}
 
Zum Seitenanfang